1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/debugfs.h> 30 #include <linux/sort.h> 31 #include <linux/sched/mm.h> 32 #include "intel_drv.h" 33 #include "intel_guc_submission.h" 34 35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 36 { 37 return to_i915(node->minor->dev); 38 } 39 40 static int i915_capabilities(struct seq_file *m, void *data) 41 { 42 struct drm_i915_private *dev_priv = node_to_i915(m->private); 43 const struct intel_device_info *info = INTEL_INFO(dev_priv); 44 struct drm_printer p = drm_seq_file_printer(m); 45 46 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv)); 47 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform)); 48 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv)); 49 50 intel_device_info_dump_flags(info, &p); 51 intel_device_info_dump_runtime(info, &p); 52 53 kernel_param_lock(THIS_MODULE); 54 i915_params_dump(&i915_modparams, &p); 55 kernel_param_unlock(THIS_MODULE); 56 57 return 0; 58 } 59 60 static char get_active_flag(struct drm_i915_gem_object *obj) 61 { 62 return i915_gem_object_is_active(obj) ? '*' : ' '; 63 } 64 65 static char get_pin_flag(struct drm_i915_gem_object *obj) 66 { 67 return obj->pin_global ? 'p' : ' '; 68 } 69 70 static char get_tiling_flag(struct drm_i915_gem_object *obj) 71 { 72 switch (i915_gem_object_get_tiling(obj)) { 73 default: 74 case I915_TILING_NONE: return ' '; 75 case I915_TILING_X: return 'X'; 76 case I915_TILING_Y: return 'Y'; 77 } 78 } 79 80 static char get_global_flag(struct drm_i915_gem_object *obj) 81 { 82 return obj->userfault_count ? 'g' : ' '; 83 } 84 85 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) 86 { 87 return obj->mm.mapping ? 'M' : ' '; 88 } 89 90 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) 91 { 92 u64 size = 0; 93 struct i915_vma *vma; 94 95 for_each_ggtt_vma(vma, obj) { 96 if (drm_mm_node_allocated(&vma->node)) 97 size += vma->node.size; 98 } 99 100 return size; 101 } 102 103 static const char * 104 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len) 105 { 106 size_t x = 0; 107 108 switch (page_sizes) { 109 case 0: 110 return ""; 111 case I915_GTT_PAGE_SIZE_4K: 112 return "4K"; 113 case I915_GTT_PAGE_SIZE_64K: 114 return "64K"; 115 case I915_GTT_PAGE_SIZE_2M: 116 return "2M"; 117 default: 118 if (!buf) 119 return "M"; 120 121 if (page_sizes & I915_GTT_PAGE_SIZE_2M) 122 x += snprintf(buf + x, len - x, "2M, "); 123 if (page_sizes & I915_GTT_PAGE_SIZE_64K) 124 x += snprintf(buf + x, len - x, "64K, "); 125 if (page_sizes & I915_GTT_PAGE_SIZE_4K) 126 x += snprintf(buf + x, len - x, "4K, "); 127 buf[x-2] = '\0'; 128 129 return buf; 130 } 131 } 132 133 static void 134 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 135 { 136 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 137 struct intel_engine_cs *engine; 138 struct i915_vma *vma; 139 unsigned int frontbuffer_bits; 140 int pin_count = 0; 141 142 lockdep_assert_held(&obj->base.dev->struct_mutex); 143 144 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s", 145 &obj->base, 146 get_active_flag(obj), 147 get_pin_flag(obj), 148 get_tiling_flag(obj), 149 get_global_flag(obj), 150 get_pin_mapped_flag(obj), 151 obj->base.size / 1024, 152 obj->base.read_domains, 153 obj->base.write_domain, 154 i915_cache_level_str(dev_priv, obj->cache_level), 155 obj->mm.dirty ? " dirty" : "", 156 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : ""); 157 if (obj->base.name) 158 seq_printf(m, " (name: %d)", obj->base.name); 159 list_for_each_entry(vma, &obj->vma_list, obj_link) { 160 if (i915_vma_is_pinned(vma)) 161 pin_count++; 162 } 163 seq_printf(m, " (pinned x %d)", pin_count); 164 if (obj->pin_global) 165 seq_printf(m, " (global)"); 166 list_for_each_entry(vma, &obj->vma_list, obj_link) { 167 if (!drm_mm_node_allocated(&vma->node)) 168 continue; 169 170 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s", 171 i915_vma_is_ggtt(vma) ? "g" : "pp", 172 vma->node.start, vma->node.size, 173 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0)); 174 if (i915_vma_is_ggtt(vma)) { 175 switch (vma->ggtt_view.type) { 176 case I915_GGTT_VIEW_NORMAL: 177 seq_puts(m, ", normal"); 178 break; 179 180 case I915_GGTT_VIEW_PARTIAL: 181 seq_printf(m, ", partial [%08llx+%x]", 182 vma->ggtt_view.partial.offset << PAGE_SHIFT, 183 vma->ggtt_view.partial.size << PAGE_SHIFT); 184 break; 185 186 case I915_GGTT_VIEW_ROTATED: 187 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]", 188 vma->ggtt_view.rotated.plane[0].width, 189 vma->ggtt_view.rotated.plane[0].height, 190 vma->ggtt_view.rotated.plane[0].stride, 191 vma->ggtt_view.rotated.plane[0].offset, 192 vma->ggtt_view.rotated.plane[1].width, 193 vma->ggtt_view.rotated.plane[1].height, 194 vma->ggtt_view.rotated.plane[1].stride, 195 vma->ggtt_view.rotated.plane[1].offset); 196 break; 197 198 default: 199 MISSING_CASE(vma->ggtt_view.type); 200 break; 201 } 202 } 203 if (vma->fence) 204 seq_printf(m, " , fence: %d%s", 205 vma->fence->id, 206 i915_gem_active_isset(&vma->last_fence) ? "*" : ""); 207 seq_puts(m, ")"); 208 } 209 if (obj->stolen) 210 seq_printf(m, " (stolen: %08llx)", obj->stolen->start); 211 212 engine = i915_gem_object_last_write_engine(obj); 213 if (engine) 214 seq_printf(m, " (%s)", engine->name); 215 216 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits); 217 if (frontbuffer_bits) 218 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits); 219 } 220 221 static int obj_rank_by_stolen(const void *A, const void *B) 222 { 223 const struct drm_i915_gem_object *a = 224 *(const struct drm_i915_gem_object **)A; 225 const struct drm_i915_gem_object *b = 226 *(const struct drm_i915_gem_object **)B; 227 228 if (a->stolen->start < b->stolen->start) 229 return -1; 230 if (a->stolen->start > b->stolen->start) 231 return 1; 232 return 0; 233 } 234 235 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 236 { 237 struct drm_i915_private *dev_priv = node_to_i915(m->private); 238 struct drm_device *dev = &dev_priv->drm; 239 struct drm_i915_gem_object **objects; 240 struct drm_i915_gem_object *obj; 241 u64 total_obj_size, total_gtt_size; 242 unsigned long total, count, n; 243 int ret; 244 245 total = READ_ONCE(dev_priv->mm.object_count); 246 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL); 247 if (!objects) 248 return -ENOMEM; 249 250 ret = mutex_lock_interruptible(&dev->struct_mutex); 251 if (ret) 252 goto out; 253 254 total_obj_size = total_gtt_size = count = 0; 255 256 spin_lock(&dev_priv->mm.obj_lock); 257 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { 258 if (count == total) 259 break; 260 261 if (obj->stolen == NULL) 262 continue; 263 264 objects[count++] = obj; 265 total_obj_size += obj->base.size; 266 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 267 268 } 269 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) { 270 if (count == total) 271 break; 272 273 if (obj->stolen == NULL) 274 continue; 275 276 objects[count++] = obj; 277 total_obj_size += obj->base.size; 278 } 279 spin_unlock(&dev_priv->mm.obj_lock); 280 281 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL); 282 283 seq_puts(m, "Stolen:\n"); 284 for (n = 0; n < count; n++) { 285 seq_puts(m, " "); 286 describe_obj(m, objects[n]); 287 seq_putc(m, '\n'); 288 } 289 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n", 290 count, total_obj_size, total_gtt_size); 291 292 mutex_unlock(&dev->struct_mutex); 293 out: 294 kvfree(objects); 295 return ret; 296 } 297 298 struct file_stats { 299 struct drm_i915_file_private *file_priv; 300 unsigned long count; 301 u64 total, unbound; 302 u64 global, shared; 303 u64 active, inactive; 304 }; 305 306 static int per_file_stats(int id, void *ptr, void *data) 307 { 308 struct drm_i915_gem_object *obj = ptr; 309 struct file_stats *stats = data; 310 struct i915_vma *vma; 311 312 lockdep_assert_held(&obj->base.dev->struct_mutex); 313 314 stats->count++; 315 stats->total += obj->base.size; 316 if (!obj->bind_count) 317 stats->unbound += obj->base.size; 318 if (obj->base.name || obj->base.dma_buf) 319 stats->shared += obj->base.size; 320 321 list_for_each_entry(vma, &obj->vma_list, obj_link) { 322 if (!drm_mm_node_allocated(&vma->node)) 323 continue; 324 325 if (i915_vma_is_ggtt(vma)) { 326 stats->global += vma->node.size; 327 } else { 328 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm); 329 330 if (ppgtt->base.file != stats->file_priv) 331 continue; 332 } 333 334 if (i915_vma_is_active(vma)) 335 stats->active += vma->node.size; 336 else 337 stats->inactive += vma->node.size; 338 } 339 340 return 0; 341 } 342 343 #define print_file_stats(m, name, stats) do { \ 344 if (stats.count) \ 345 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \ 346 name, \ 347 stats.count, \ 348 stats.total, \ 349 stats.active, \ 350 stats.inactive, \ 351 stats.global, \ 352 stats.shared, \ 353 stats.unbound); \ 354 } while (0) 355 356 static void print_batch_pool_stats(struct seq_file *m, 357 struct drm_i915_private *dev_priv) 358 { 359 struct drm_i915_gem_object *obj; 360 struct file_stats stats; 361 struct intel_engine_cs *engine; 362 enum intel_engine_id id; 363 int j; 364 365 memset(&stats, 0, sizeof(stats)); 366 367 for_each_engine(engine, dev_priv, id) { 368 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { 369 list_for_each_entry(obj, 370 &engine->batch_pool.cache_list[j], 371 batch_pool_link) 372 per_file_stats(0, obj, &stats); 373 } 374 } 375 376 print_file_stats(m, "[k]batch pool", stats); 377 } 378 379 static int per_file_ctx_stats(int id, void *ptr, void *data) 380 { 381 struct i915_gem_context *ctx = ptr; 382 int n; 383 384 for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) { 385 if (ctx->engine[n].state) 386 per_file_stats(0, ctx->engine[n].state->obj, data); 387 if (ctx->engine[n].ring) 388 per_file_stats(0, ctx->engine[n].ring->vma->obj, data); 389 } 390 391 return 0; 392 } 393 394 static void print_context_stats(struct seq_file *m, 395 struct drm_i915_private *dev_priv) 396 { 397 struct drm_device *dev = &dev_priv->drm; 398 struct file_stats stats; 399 struct drm_file *file; 400 401 memset(&stats, 0, sizeof(stats)); 402 403 mutex_lock(&dev->struct_mutex); 404 if (dev_priv->kernel_context) 405 per_file_ctx_stats(0, dev_priv->kernel_context, &stats); 406 407 list_for_each_entry(file, &dev->filelist, lhead) { 408 struct drm_i915_file_private *fpriv = file->driver_priv; 409 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats); 410 } 411 mutex_unlock(&dev->struct_mutex); 412 413 print_file_stats(m, "[k]contexts", stats); 414 } 415 416 static int i915_gem_object_info(struct seq_file *m, void *data) 417 { 418 struct drm_i915_private *dev_priv = node_to_i915(m->private); 419 struct drm_device *dev = &dev_priv->drm; 420 struct i915_ggtt *ggtt = &dev_priv->ggtt; 421 u32 count, mapped_count, purgeable_count, dpy_count, huge_count; 422 u64 size, mapped_size, purgeable_size, dpy_size, huge_size; 423 struct drm_i915_gem_object *obj; 424 unsigned int page_sizes = 0; 425 struct drm_file *file; 426 char buf[80]; 427 int ret; 428 429 ret = mutex_lock_interruptible(&dev->struct_mutex); 430 if (ret) 431 return ret; 432 433 seq_printf(m, "%u objects, %llu bytes\n", 434 dev_priv->mm.object_count, 435 dev_priv->mm.object_memory); 436 437 size = count = 0; 438 mapped_size = mapped_count = 0; 439 purgeable_size = purgeable_count = 0; 440 huge_size = huge_count = 0; 441 442 spin_lock(&dev_priv->mm.obj_lock); 443 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) { 444 size += obj->base.size; 445 ++count; 446 447 if (obj->mm.madv == I915_MADV_DONTNEED) { 448 purgeable_size += obj->base.size; 449 ++purgeable_count; 450 } 451 452 if (obj->mm.mapping) { 453 mapped_count++; 454 mapped_size += obj->base.size; 455 } 456 457 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) { 458 huge_count++; 459 huge_size += obj->base.size; 460 page_sizes |= obj->mm.page_sizes.sg; 461 } 462 } 463 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); 464 465 size = count = dpy_size = dpy_count = 0; 466 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { 467 size += obj->base.size; 468 ++count; 469 470 if (obj->pin_global) { 471 dpy_size += obj->base.size; 472 ++dpy_count; 473 } 474 475 if (obj->mm.madv == I915_MADV_DONTNEED) { 476 purgeable_size += obj->base.size; 477 ++purgeable_count; 478 } 479 480 if (obj->mm.mapping) { 481 mapped_count++; 482 mapped_size += obj->base.size; 483 } 484 485 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) { 486 huge_count++; 487 huge_size += obj->base.size; 488 page_sizes |= obj->mm.page_sizes.sg; 489 } 490 } 491 spin_unlock(&dev_priv->mm.obj_lock); 492 493 seq_printf(m, "%u bound objects, %llu bytes\n", 494 count, size); 495 seq_printf(m, "%u purgeable objects, %llu bytes\n", 496 purgeable_count, purgeable_size); 497 seq_printf(m, "%u mapped objects, %llu bytes\n", 498 mapped_count, mapped_size); 499 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n", 500 huge_count, 501 stringify_page_sizes(page_sizes, buf, sizeof(buf)), 502 huge_size); 503 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n", 504 dpy_count, dpy_size); 505 506 seq_printf(m, "%llu [%pa] gtt total\n", 507 ggtt->base.total, &ggtt->mappable_end); 508 seq_printf(m, "Supported page sizes: %s\n", 509 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes, 510 buf, sizeof(buf))); 511 512 seq_putc(m, '\n'); 513 print_batch_pool_stats(m, dev_priv); 514 mutex_unlock(&dev->struct_mutex); 515 516 mutex_lock(&dev->filelist_mutex); 517 print_context_stats(m, dev_priv); 518 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 519 struct file_stats stats; 520 struct drm_i915_file_private *file_priv = file->driver_priv; 521 struct drm_i915_gem_request *request; 522 struct task_struct *task; 523 524 mutex_lock(&dev->struct_mutex); 525 526 memset(&stats, 0, sizeof(stats)); 527 stats.file_priv = file->driver_priv; 528 spin_lock(&file->table_lock); 529 idr_for_each(&file->object_idr, per_file_stats, &stats); 530 spin_unlock(&file->table_lock); 531 /* 532 * Although we have a valid reference on file->pid, that does 533 * not guarantee that the task_struct who called get_pid() is 534 * still alive (e.g. get_pid(current) => fork() => exit()). 535 * Therefore, we need to protect this ->comm access using RCU. 536 */ 537 request = list_first_entry_or_null(&file_priv->mm.request_list, 538 struct drm_i915_gem_request, 539 client_link); 540 rcu_read_lock(); 541 task = pid_task(request && request->ctx->pid ? 542 request->ctx->pid : file->pid, 543 PIDTYPE_PID); 544 print_file_stats(m, task ? task->comm : "<unknown>", stats); 545 rcu_read_unlock(); 546 547 mutex_unlock(&dev->struct_mutex); 548 } 549 mutex_unlock(&dev->filelist_mutex); 550 551 return 0; 552 } 553 554 static int i915_gem_gtt_info(struct seq_file *m, void *data) 555 { 556 struct drm_info_node *node = m->private; 557 struct drm_i915_private *dev_priv = node_to_i915(node); 558 struct drm_device *dev = &dev_priv->drm; 559 struct drm_i915_gem_object **objects; 560 struct drm_i915_gem_object *obj; 561 u64 total_obj_size, total_gtt_size; 562 unsigned long nobject, n; 563 int count, ret; 564 565 nobject = READ_ONCE(dev_priv->mm.object_count); 566 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL); 567 if (!objects) 568 return -ENOMEM; 569 570 ret = mutex_lock_interruptible(&dev->struct_mutex); 571 if (ret) 572 return ret; 573 574 count = 0; 575 spin_lock(&dev_priv->mm.obj_lock); 576 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { 577 objects[count++] = obj; 578 if (count == nobject) 579 break; 580 } 581 spin_unlock(&dev_priv->mm.obj_lock); 582 583 total_obj_size = total_gtt_size = 0; 584 for (n = 0; n < count; n++) { 585 obj = objects[n]; 586 587 seq_puts(m, " "); 588 describe_obj(m, obj); 589 seq_putc(m, '\n'); 590 total_obj_size += obj->base.size; 591 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 592 } 593 594 mutex_unlock(&dev->struct_mutex); 595 596 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 597 count, total_obj_size, total_gtt_size); 598 kvfree(objects); 599 600 return 0; 601 } 602 603 static int i915_gem_batch_pool_info(struct seq_file *m, void *data) 604 { 605 struct drm_i915_private *dev_priv = node_to_i915(m->private); 606 struct drm_device *dev = &dev_priv->drm; 607 struct drm_i915_gem_object *obj; 608 struct intel_engine_cs *engine; 609 enum intel_engine_id id; 610 int total = 0; 611 int ret, j; 612 613 ret = mutex_lock_interruptible(&dev->struct_mutex); 614 if (ret) 615 return ret; 616 617 for_each_engine(engine, dev_priv, id) { 618 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { 619 int count; 620 621 count = 0; 622 list_for_each_entry(obj, 623 &engine->batch_pool.cache_list[j], 624 batch_pool_link) 625 count++; 626 seq_printf(m, "%s cache[%d]: %d objects\n", 627 engine->name, j, count); 628 629 list_for_each_entry(obj, 630 &engine->batch_pool.cache_list[j], 631 batch_pool_link) { 632 seq_puts(m, " "); 633 describe_obj(m, obj); 634 seq_putc(m, '\n'); 635 } 636 637 total += count; 638 } 639 } 640 641 seq_printf(m, "total: %d\n", total); 642 643 mutex_unlock(&dev->struct_mutex); 644 645 return 0; 646 } 647 648 static int i915_interrupt_info(struct seq_file *m, void *data) 649 { 650 struct drm_i915_private *dev_priv = node_to_i915(m->private); 651 struct intel_engine_cs *engine; 652 enum intel_engine_id id; 653 int i, pipe; 654 655 intel_runtime_pm_get(dev_priv); 656 657 if (IS_CHERRYVIEW(dev_priv)) { 658 seq_printf(m, "Master Interrupt Control:\t%08x\n", 659 I915_READ(GEN8_MASTER_IRQ)); 660 661 seq_printf(m, "Display IER:\t%08x\n", 662 I915_READ(VLV_IER)); 663 seq_printf(m, "Display IIR:\t%08x\n", 664 I915_READ(VLV_IIR)); 665 seq_printf(m, "Display IIR_RW:\t%08x\n", 666 I915_READ(VLV_IIR_RW)); 667 seq_printf(m, "Display IMR:\t%08x\n", 668 I915_READ(VLV_IMR)); 669 for_each_pipe(dev_priv, pipe) { 670 enum intel_display_power_domain power_domain; 671 672 power_domain = POWER_DOMAIN_PIPE(pipe); 673 if (!intel_display_power_get_if_enabled(dev_priv, 674 power_domain)) { 675 seq_printf(m, "Pipe %c power disabled\n", 676 pipe_name(pipe)); 677 continue; 678 } 679 680 seq_printf(m, "Pipe %c stat:\t%08x\n", 681 pipe_name(pipe), 682 I915_READ(PIPESTAT(pipe))); 683 684 intel_display_power_put(dev_priv, power_domain); 685 } 686 687 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 688 seq_printf(m, "Port hotplug:\t%08x\n", 689 I915_READ(PORT_HOTPLUG_EN)); 690 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 691 I915_READ(VLV_DPFLIPSTAT)); 692 seq_printf(m, "DPINVGTT:\t%08x\n", 693 I915_READ(DPINVGTT)); 694 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 695 696 for (i = 0; i < 4; i++) { 697 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 698 i, I915_READ(GEN8_GT_IMR(i))); 699 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 700 i, I915_READ(GEN8_GT_IIR(i))); 701 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 702 i, I915_READ(GEN8_GT_IER(i))); 703 } 704 705 seq_printf(m, "PCU interrupt mask:\t%08x\n", 706 I915_READ(GEN8_PCU_IMR)); 707 seq_printf(m, "PCU interrupt identity:\t%08x\n", 708 I915_READ(GEN8_PCU_IIR)); 709 seq_printf(m, "PCU interrupt enable:\t%08x\n", 710 I915_READ(GEN8_PCU_IER)); 711 } else if (INTEL_GEN(dev_priv) >= 8) { 712 seq_printf(m, "Master Interrupt Control:\t%08x\n", 713 I915_READ(GEN8_MASTER_IRQ)); 714 715 for (i = 0; i < 4; i++) { 716 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 717 i, I915_READ(GEN8_GT_IMR(i))); 718 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 719 i, I915_READ(GEN8_GT_IIR(i))); 720 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 721 i, I915_READ(GEN8_GT_IER(i))); 722 } 723 724 for_each_pipe(dev_priv, pipe) { 725 enum intel_display_power_domain power_domain; 726 727 power_domain = POWER_DOMAIN_PIPE(pipe); 728 if (!intel_display_power_get_if_enabled(dev_priv, 729 power_domain)) { 730 seq_printf(m, "Pipe %c power disabled\n", 731 pipe_name(pipe)); 732 continue; 733 } 734 seq_printf(m, "Pipe %c IMR:\t%08x\n", 735 pipe_name(pipe), 736 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 737 seq_printf(m, "Pipe %c IIR:\t%08x\n", 738 pipe_name(pipe), 739 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 740 seq_printf(m, "Pipe %c IER:\t%08x\n", 741 pipe_name(pipe), 742 I915_READ(GEN8_DE_PIPE_IER(pipe))); 743 744 intel_display_power_put(dev_priv, power_domain); 745 } 746 747 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 748 I915_READ(GEN8_DE_PORT_IMR)); 749 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 750 I915_READ(GEN8_DE_PORT_IIR)); 751 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 752 I915_READ(GEN8_DE_PORT_IER)); 753 754 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 755 I915_READ(GEN8_DE_MISC_IMR)); 756 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 757 I915_READ(GEN8_DE_MISC_IIR)); 758 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 759 I915_READ(GEN8_DE_MISC_IER)); 760 761 seq_printf(m, "PCU interrupt mask:\t%08x\n", 762 I915_READ(GEN8_PCU_IMR)); 763 seq_printf(m, "PCU interrupt identity:\t%08x\n", 764 I915_READ(GEN8_PCU_IIR)); 765 seq_printf(m, "PCU interrupt enable:\t%08x\n", 766 I915_READ(GEN8_PCU_IER)); 767 } else if (IS_VALLEYVIEW(dev_priv)) { 768 seq_printf(m, "Display IER:\t%08x\n", 769 I915_READ(VLV_IER)); 770 seq_printf(m, "Display IIR:\t%08x\n", 771 I915_READ(VLV_IIR)); 772 seq_printf(m, "Display IIR_RW:\t%08x\n", 773 I915_READ(VLV_IIR_RW)); 774 seq_printf(m, "Display IMR:\t%08x\n", 775 I915_READ(VLV_IMR)); 776 for_each_pipe(dev_priv, pipe) { 777 enum intel_display_power_domain power_domain; 778 779 power_domain = POWER_DOMAIN_PIPE(pipe); 780 if (!intel_display_power_get_if_enabled(dev_priv, 781 power_domain)) { 782 seq_printf(m, "Pipe %c power disabled\n", 783 pipe_name(pipe)); 784 continue; 785 } 786 787 seq_printf(m, "Pipe %c stat:\t%08x\n", 788 pipe_name(pipe), 789 I915_READ(PIPESTAT(pipe))); 790 intel_display_power_put(dev_priv, power_domain); 791 } 792 793 seq_printf(m, "Master IER:\t%08x\n", 794 I915_READ(VLV_MASTER_IER)); 795 796 seq_printf(m, "Render IER:\t%08x\n", 797 I915_READ(GTIER)); 798 seq_printf(m, "Render IIR:\t%08x\n", 799 I915_READ(GTIIR)); 800 seq_printf(m, "Render IMR:\t%08x\n", 801 I915_READ(GTIMR)); 802 803 seq_printf(m, "PM IER:\t\t%08x\n", 804 I915_READ(GEN6_PMIER)); 805 seq_printf(m, "PM IIR:\t\t%08x\n", 806 I915_READ(GEN6_PMIIR)); 807 seq_printf(m, "PM IMR:\t\t%08x\n", 808 I915_READ(GEN6_PMIMR)); 809 810 seq_printf(m, "Port hotplug:\t%08x\n", 811 I915_READ(PORT_HOTPLUG_EN)); 812 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 813 I915_READ(VLV_DPFLIPSTAT)); 814 seq_printf(m, "DPINVGTT:\t%08x\n", 815 I915_READ(DPINVGTT)); 816 817 } else if (!HAS_PCH_SPLIT(dev_priv)) { 818 seq_printf(m, "Interrupt enable: %08x\n", 819 I915_READ(IER)); 820 seq_printf(m, "Interrupt identity: %08x\n", 821 I915_READ(IIR)); 822 seq_printf(m, "Interrupt mask: %08x\n", 823 I915_READ(IMR)); 824 for_each_pipe(dev_priv, pipe) 825 seq_printf(m, "Pipe %c stat: %08x\n", 826 pipe_name(pipe), 827 I915_READ(PIPESTAT(pipe))); 828 } else { 829 seq_printf(m, "North Display Interrupt enable: %08x\n", 830 I915_READ(DEIER)); 831 seq_printf(m, "North Display Interrupt identity: %08x\n", 832 I915_READ(DEIIR)); 833 seq_printf(m, "North Display Interrupt mask: %08x\n", 834 I915_READ(DEIMR)); 835 seq_printf(m, "South Display Interrupt enable: %08x\n", 836 I915_READ(SDEIER)); 837 seq_printf(m, "South Display Interrupt identity: %08x\n", 838 I915_READ(SDEIIR)); 839 seq_printf(m, "South Display Interrupt mask: %08x\n", 840 I915_READ(SDEIMR)); 841 seq_printf(m, "Graphics Interrupt enable: %08x\n", 842 I915_READ(GTIER)); 843 seq_printf(m, "Graphics Interrupt identity: %08x\n", 844 I915_READ(GTIIR)); 845 seq_printf(m, "Graphics Interrupt mask: %08x\n", 846 I915_READ(GTIMR)); 847 } 848 if (INTEL_GEN(dev_priv) >= 6) { 849 for_each_engine(engine, dev_priv, id) { 850 seq_printf(m, 851 "Graphics Interrupt mask (%s): %08x\n", 852 engine->name, I915_READ_IMR(engine)); 853 } 854 } 855 intel_runtime_pm_put(dev_priv); 856 857 return 0; 858 } 859 860 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 861 { 862 struct drm_i915_private *dev_priv = node_to_i915(m->private); 863 struct drm_device *dev = &dev_priv->drm; 864 int i, ret; 865 866 ret = mutex_lock_interruptible(&dev->struct_mutex); 867 if (ret) 868 return ret; 869 870 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 871 for (i = 0; i < dev_priv->num_fence_regs; i++) { 872 struct i915_vma *vma = dev_priv->fence_regs[i].vma; 873 874 seq_printf(m, "Fence %d, pin count = %d, object = ", 875 i, dev_priv->fence_regs[i].pin_count); 876 if (!vma) 877 seq_puts(m, "unused"); 878 else 879 describe_obj(m, vma->obj); 880 seq_putc(m, '\n'); 881 } 882 883 mutex_unlock(&dev->struct_mutex); 884 return 0; 885 } 886 887 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 888 static ssize_t gpu_state_read(struct file *file, char __user *ubuf, 889 size_t count, loff_t *pos) 890 { 891 struct i915_gpu_state *error = file->private_data; 892 struct drm_i915_error_state_buf str; 893 ssize_t ret; 894 loff_t tmp; 895 896 if (!error) 897 return 0; 898 899 ret = i915_error_state_buf_init(&str, error->i915, count, *pos); 900 if (ret) 901 return ret; 902 903 ret = i915_error_state_to_str(&str, error); 904 if (ret) 905 goto out; 906 907 tmp = 0; 908 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes); 909 if (ret < 0) 910 goto out; 911 912 *pos = str.start + ret; 913 out: 914 i915_error_state_buf_release(&str); 915 return ret; 916 } 917 918 static int gpu_state_release(struct inode *inode, struct file *file) 919 { 920 i915_gpu_state_put(file->private_data); 921 return 0; 922 } 923 924 static int i915_gpu_info_open(struct inode *inode, struct file *file) 925 { 926 struct drm_i915_private *i915 = inode->i_private; 927 struct i915_gpu_state *gpu; 928 929 intel_runtime_pm_get(i915); 930 gpu = i915_capture_gpu_state(i915); 931 intel_runtime_pm_put(i915); 932 if (!gpu) 933 return -ENOMEM; 934 935 file->private_data = gpu; 936 return 0; 937 } 938 939 static const struct file_operations i915_gpu_info_fops = { 940 .owner = THIS_MODULE, 941 .open = i915_gpu_info_open, 942 .read = gpu_state_read, 943 .llseek = default_llseek, 944 .release = gpu_state_release, 945 }; 946 947 static ssize_t 948 i915_error_state_write(struct file *filp, 949 const char __user *ubuf, 950 size_t cnt, 951 loff_t *ppos) 952 { 953 struct i915_gpu_state *error = filp->private_data; 954 955 if (!error) 956 return 0; 957 958 DRM_DEBUG_DRIVER("Resetting error state\n"); 959 i915_reset_error_state(error->i915); 960 961 return cnt; 962 } 963 964 static int i915_error_state_open(struct inode *inode, struct file *file) 965 { 966 file->private_data = i915_first_error_state(inode->i_private); 967 return 0; 968 } 969 970 static const struct file_operations i915_error_state_fops = { 971 .owner = THIS_MODULE, 972 .open = i915_error_state_open, 973 .read = gpu_state_read, 974 .write = i915_error_state_write, 975 .llseek = default_llseek, 976 .release = gpu_state_release, 977 }; 978 #endif 979 980 static int 981 i915_next_seqno_set(void *data, u64 val) 982 { 983 struct drm_i915_private *dev_priv = data; 984 struct drm_device *dev = &dev_priv->drm; 985 int ret; 986 987 ret = mutex_lock_interruptible(&dev->struct_mutex); 988 if (ret) 989 return ret; 990 991 intel_runtime_pm_get(dev_priv); 992 ret = i915_gem_set_global_seqno(dev, val); 993 intel_runtime_pm_put(dev_priv); 994 995 mutex_unlock(&dev->struct_mutex); 996 997 return ret; 998 } 999 1000 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 1001 NULL, i915_next_seqno_set, 1002 "0x%llx\n"); 1003 1004 static int i915_frequency_info(struct seq_file *m, void *unused) 1005 { 1006 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1007 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1008 int ret = 0; 1009 1010 intel_runtime_pm_get(dev_priv); 1011 1012 if (IS_GEN5(dev_priv)) { 1013 u16 rgvswctl = I915_READ16(MEMSWCTL); 1014 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1015 1016 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 1017 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 1018 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 1019 MEMSTAT_VID_SHIFT); 1020 seq_printf(m, "Current P-state: %d\n", 1021 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1022 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1023 u32 rpmodectl, freq_sts; 1024 1025 mutex_lock(&dev_priv->pcu_lock); 1026 1027 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1028 seq_printf(m, "Video Turbo Mode: %s\n", 1029 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); 1030 seq_printf(m, "HW control enabled: %s\n", 1031 yesno(rpmodectl & GEN6_RP_ENABLE)); 1032 seq_printf(m, "SW control enabled: %s\n", 1033 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == 1034 GEN6_RP_MEDIA_SW_MODE)); 1035 1036 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1037 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1038 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1039 1040 seq_printf(m, "actual GPU freq: %d MHz\n", 1041 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1042 1043 seq_printf(m, "current GPU freq: %d MHz\n", 1044 intel_gpu_freq(dev_priv, rps->cur_freq)); 1045 1046 seq_printf(m, "max GPU freq: %d MHz\n", 1047 intel_gpu_freq(dev_priv, rps->max_freq)); 1048 1049 seq_printf(m, "min GPU freq: %d MHz\n", 1050 intel_gpu_freq(dev_priv, rps->min_freq)); 1051 1052 seq_printf(m, "idle GPU freq: %d MHz\n", 1053 intel_gpu_freq(dev_priv, rps->idle_freq)); 1054 1055 seq_printf(m, 1056 "efficient (RPe) frequency: %d MHz\n", 1057 intel_gpu_freq(dev_priv, rps->efficient_freq)); 1058 mutex_unlock(&dev_priv->pcu_lock); 1059 } else if (INTEL_GEN(dev_priv) >= 6) { 1060 u32 rp_state_limits; 1061 u32 gt_perf_status; 1062 u32 rp_state_cap; 1063 u32 rpmodectl, rpinclimit, rpdeclimit; 1064 u32 rpstat, cagf, reqf; 1065 u32 rpupei, rpcurup, rpprevup; 1066 u32 rpdownei, rpcurdown, rpprevdown; 1067 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 1068 int max_freq; 1069 1070 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1071 if (IS_GEN9_LP(dev_priv)) { 1072 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 1073 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); 1074 } else { 1075 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1076 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1077 } 1078 1079 /* RPSTAT1 is in the GT power well */ 1080 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1081 1082 reqf = I915_READ(GEN6_RPNSWREQ); 1083 if (INTEL_GEN(dev_priv) >= 9) 1084 reqf >>= 23; 1085 else { 1086 reqf &= ~GEN6_TURBO_DISABLE; 1087 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1088 reqf >>= 24; 1089 else 1090 reqf >>= 25; 1091 } 1092 reqf = intel_gpu_freq(dev_priv, reqf); 1093 1094 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1095 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 1096 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 1097 1098 rpstat = I915_READ(GEN6_RPSTAT1); 1099 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 1100 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 1101 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 1102 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 1103 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 1104 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 1105 cagf = intel_gpu_freq(dev_priv, 1106 intel_get_cagf(dev_priv, rpstat)); 1107 1108 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1109 1110 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { 1111 pm_ier = I915_READ(GEN6_PMIER); 1112 pm_imr = I915_READ(GEN6_PMIMR); 1113 pm_isr = I915_READ(GEN6_PMISR); 1114 pm_iir = I915_READ(GEN6_PMIIR); 1115 pm_mask = I915_READ(GEN6_PMINTRMSK); 1116 } else { 1117 pm_ier = I915_READ(GEN8_GT_IER(2)); 1118 pm_imr = I915_READ(GEN8_GT_IMR(2)); 1119 pm_isr = I915_READ(GEN8_GT_ISR(2)); 1120 pm_iir = I915_READ(GEN8_GT_IIR(2)); 1121 pm_mask = I915_READ(GEN6_PMINTRMSK); 1122 } 1123 seq_printf(m, "Video Turbo Mode: %s\n", 1124 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); 1125 seq_printf(m, "HW control enabled: %s\n", 1126 yesno(rpmodectl & GEN6_RP_ENABLE)); 1127 seq_printf(m, "SW control enabled: %s\n", 1128 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == 1129 GEN6_RP_MEDIA_SW_MODE)); 1130 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1131 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1132 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n", 1133 rps->pm_intrmsk_mbz); 1134 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1135 seq_printf(m, "Render p-state ratio: %d\n", 1136 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8); 1137 seq_printf(m, "Render p-state VID: %d\n", 1138 gt_perf_status & 0xff); 1139 seq_printf(m, "Render p-state limit: %d\n", 1140 rp_state_limits & 0xff); 1141 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1142 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 1143 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 1144 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 1145 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1146 seq_printf(m, "CAGF: %dMHz\n", cagf); 1147 seq_printf(m, "RP CUR UP EI: %d (%dus)\n", 1148 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei)); 1149 seq_printf(m, "RP CUR UP: %d (%dus)\n", 1150 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup)); 1151 seq_printf(m, "RP PREV UP: %d (%dus)\n", 1152 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup)); 1153 seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold); 1154 1155 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n", 1156 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei)); 1157 seq_printf(m, "RP CUR DOWN: %d (%dus)\n", 1158 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown)); 1159 seq_printf(m, "RP PREV DOWN: %d (%dus)\n", 1160 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown)); 1161 seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold); 1162 1163 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : 1164 rp_state_cap >> 16) & 0xff; 1165 max_freq *= (IS_GEN9_BC(dev_priv) || 1166 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); 1167 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1168 intel_gpu_freq(dev_priv, max_freq)); 1169 1170 max_freq = (rp_state_cap & 0xff00) >> 8; 1171 max_freq *= (IS_GEN9_BC(dev_priv) || 1172 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); 1173 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1174 intel_gpu_freq(dev_priv, max_freq)); 1175 1176 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : 1177 rp_state_cap >> 0) & 0xff; 1178 max_freq *= (IS_GEN9_BC(dev_priv) || 1179 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); 1180 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1181 intel_gpu_freq(dev_priv, max_freq)); 1182 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1183 intel_gpu_freq(dev_priv, rps->max_freq)); 1184 1185 seq_printf(m, "Current freq: %d MHz\n", 1186 intel_gpu_freq(dev_priv, rps->cur_freq)); 1187 seq_printf(m, "Actual freq: %d MHz\n", cagf); 1188 seq_printf(m, "Idle freq: %d MHz\n", 1189 intel_gpu_freq(dev_priv, rps->idle_freq)); 1190 seq_printf(m, "Min freq: %d MHz\n", 1191 intel_gpu_freq(dev_priv, rps->min_freq)); 1192 seq_printf(m, "Boost freq: %d MHz\n", 1193 intel_gpu_freq(dev_priv, rps->boost_freq)); 1194 seq_printf(m, "Max freq: %d MHz\n", 1195 intel_gpu_freq(dev_priv, rps->max_freq)); 1196 seq_printf(m, 1197 "efficient (RPe) frequency: %d MHz\n", 1198 intel_gpu_freq(dev_priv, rps->efficient_freq)); 1199 } else { 1200 seq_puts(m, "no P-state info available\n"); 1201 } 1202 1203 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk); 1204 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); 1205 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); 1206 1207 intel_runtime_pm_put(dev_priv); 1208 return ret; 1209 } 1210 1211 static void i915_instdone_info(struct drm_i915_private *dev_priv, 1212 struct seq_file *m, 1213 struct intel_instdone *instdone) 1214 { 1215 int slice; 1216 int subslice; 1217 1218 seq_printf(m, "\t\tINSTDONE: 0x%08x\n", 1219 instdone->instdone); 1220 1221 if (INTEL_GEN(dev_priv) <= 3) 1222 return; 1223 1224 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n", 1225 instdone->slice_common); 1226 1227 if (INTEL_GEN(dev_priv) <= 6) 1228 return; 1229 1230 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 1231 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n", 1232 slice, subslice, instdone->sampler[slice][subslice]); 1233 1234 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 1235 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n", 1236 slice, subslice, instdone->row[slice][subslice]); 1237 } 1238 1239 static int i915_hangcheck_info(struct seq_file *m, void *unused) 1240 { 1241 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1242 struct intel_engine_cs *engine; 1243 u64 acthd[I915_NUM_ENGINES]; 1244 u32 seqno[I915_NUM_ENGINES]; 1245 struct intel_instdone instdone; 1246 enum intel_engine_id id; 1247 1248 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) 1249 seq_puts(m, "Wedged\n"); 1250 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) 1251 seq_puts(m, "Reset in progress: struct_mutex backoff\n"); 1252 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags)) 1253 seq_puts(m, "Reset in progress: reset handoff to waiter\n"); 1254 if (waitqueue_active(&dev_priv->gpu_error.wait_queue)) 1255 seq_puts(m, "Waiter holding struct mutex\n"); 1256 if (waitqueue_active(&dev_priv->gpu_error.reset_queue)) 1257 seq_puts(m, "struct_mutex blocked for reset\n"); 1258 1259 if (!i915_modparams.enable_hangcheck) { 1260 seq_puts(m, "Hangcheck disabled\n"); 1261 return 0; 1262 } 1263 1264 intel_runtime_pm_get(dev_priv); 1265 1266 for_each_engine(engine, dev_priv, id) { 1267 acthd[id] = intel_engine_get_active_head(engine); 1268 seqno[id] = intel_engine_get_seqno(engine); 1269 } 1270 1271 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); 1272 1273 intel_runtime_pm_put(dev_priv); 1274 1275 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer)) 1276 seq_printf(m, "Hangcheck active, timer fires in %dms\n", 1277 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - 1278 jiffies)); 1279 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) 1280 seq_puts(m, "Hangcheck active, work pending\n"); 1281 else 1282 seq_puts(m, "Hangcheck inactive\n"); 1283 1284 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake)); 1285 1286 for_each_engine(engine, dev_priv, id) { 1287 struct intel_breadcrumbs *b = &engine->breadcrumbs; 1288 struct rb_node *rb; 1289 1290 seq_printf(m, "%s:\n", engine->name); 1291 seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n", 1292 engine->hangcheck.seqno, seqno[id], 1293 intel_engine_last_submit(engine), 1294 engine->timeline->inflight_seqnos); 1295 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n", 1296 yesno(intel_engine_has_waiter(engine)), 1297 yesno(test_bit(engine->id, 1298 &dev_priv->gpu_error.missed_irq_rings)), 1299 yesno(engine->hangcheck.stalled)); 1300 1301 spin_lock_irq(&b->rb_lock); 1302 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { 1303 struct intel_wait *w = rb_entry(rb, typeof(*w), node); 1304 1305 seq_printf(m, "\t%s [%d] waiting for %x\n", 1306 w->tsk->comm, w->tsk->pid, w->seqno); 1307 } 1308 spin_unlock_irq(&b->rb_lock); 1309 1310 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", 1311 (long long)engine->hangcheck.acthd, 1312 (long long)acthd[id]); 1313 seq_printf(m, "\taction = %s(%d) %d ms ago\n", 1314 hangcheck_action_to_str(engine->hangcheck.action), 1315 engine->hangcheck.action, 1316 jiffies_to_msecs(jiffies - 1317 engine->hangcheck.action_timestamp)); 1318 1319 if (engine->id == RCS) { 1320 seq_puts(m, "\tinstdone read =\n"); 1321 1322 i915_instdone_info(dev_priv, m, &instdone); 1323 1324 seq_puts(m, "\tinstdone accu =\n"); 1325 1326 i915_instdone_info(dev_priv, m, 1327 &engine->hangcheck.instdone); 1328 } 1329 } 1330 1331 return 0; 1332 } 1333 1334 static int i915_reset_info(struct seq_file *m, void *unused) 1335 { 1336 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1337 struct i915_gpu_error *error = &dev_priv->gpu_error; 1338 struct intel_engine_cs *engine; 1339 enum intel_engine_id id; 1340 1341 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error)); 1342 1343 for_each_engine(engine, dev_priv, id) { 1344 seq_printf(m, "%s = %u\n", engine->name, 1345 i915_reset_engine_count(error, engine)); 1346 } 1347 1348 return 0; 1349 } 1350 1351 static int ironlake_drpc_info(struct seq_file *m) 1352 { 1353 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1354 u32 rgvmodectl, rstdbyctl; 1355 u16 crstandvid; 1356 1357 rgvmodectl = I915_READ(MEMMODECTL); 1358 rstdbyctl = I915_READ(RSTDBYCTL); 1359 crstandvid = I915_READ16(CRSTANDVID); 1360 1361 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); 1362 seq_printf(m, "Boost freq: %d\n", 1363 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1364 MEMMODE_BOOST_FREQ_SHIFT); 1365 seq_printf(m, "HW control enabled: %s\n", 1366 yesno(rgvmodectl & MEMMODE_HWIDLE_EN)); 1367 seq_printf(m, "SW control enabled: %s\n", 1368 yesno(rgvmodectl & MEMMODE_SWMODE_EN)); 1369 seq_printf(m, "Gated voltage change: %s\n", 1370 yesno(rgvmodectl & MEMMODE_RCLK_GATE)); 1371 seq_printf(m, "Starting frequency: P%d\n", 1372 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1373 seq_printf(m, "Max P-state: P%d\n", 1374 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1375 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1376 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1377 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1378 seq_printf(m, "Render standby enabled: %s\n", 1379 yesno(!(rstdbyctl & RCX_SW_EXIT))); 1380 seq_puts(m, "Current RS state: "); 1381 switch (rstdbyctl & RSX_STATUS_MASK) { 1382 case RSX_STATUS_ON: 1383 seq_puts(m, "on\n"); 1384 break; 1385 case RSX_STATUS_RC1: 1386 seq_puts(m, "RC1\n"); 1387 break; 1388 case RSX_STATUS_RC1E: 1389 seq_puts(m, "RC1E\n"); 1390 break; 1391 case RSX_STATUS_RS1: 1392 seq_puts(m, "RS1\n"); 1393 break; 1394 case RSX_STATUS_RS2: 1395 seq_puts(m, "RS2 (RC6)\n"); 1396 break; 1397 case RSX_STATUS_RS3: 1398 seq_puts(m, "RC3 (RC6+)\n"); 1399 break; 1400 default: 1401 seq_puts(m, "unknown\n"); 1402 break; 1403 } 1404 1405 return 0; 1406 } 1407 1408 static int i915_forcewake_domains(struct seq_file *m, void *data) 1409 { 1410 struct drm_i915_private *i915 = node_to_i915(m->private); 1411 struct intel_uncore_forcewake_domain *fw_domain; 1412 unsigned int tmp; 1413 1414 seq_printf(m, "user.bypass_count = %u\n", 1415 i915->uncore.user_forcewake.count); 1416 1417 for_each_fw_domain(fw_domain, i915, tmp) 1418 seq_printf(m, "%s.wake_count = %u\n", 1419 intel_uncore_forcewake_domain_to_str(fw_domain->id), 1420 READ_ONCE(fw_domain->wake_count)); 1421 1422 return 0; 1423 } 1424 1425 static void print_rc6_res(struct seq_file *m, 1426 const char *title, 1427 const i915_reg_t reg) 1428 { 1429 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1430 1431 seq_printf(m, "%s %u (%llu us)\n", 1432 title, I915_READ(reg), 1433 intel_rc6_residency_us(dev_priv, reg)); 1434 } 1435 1436 static int vlv_drpc_info(struct seq_file *m) 1437 { 1438 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1439 u32 rcctl1, pw_status; 1440 1441 pw_status = I915_READ(VLV_GTLC_PW_STATUS); 1442 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1443 1444 seq_printf(m, "RC6 Enabled: %s\n", 1445 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1446 GEN6_RC_CTL_EI_MODE(1)))); 1447 seq_printf(m, "Render Power Well: %s\n", 1448 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1449 seq_printf(m, "Media Power Well: %s\n", 1450 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1451 1452 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6); 1453 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6); 1454 1455 return i915_forcewake_domains(m, NULL); 1456 } 1457 1458 static int gen6_drpc_info(struct seq_file *m) 1459 { 1460 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1461 u32 gt_core_status, rcctl1, rc6vids = 0; 1462 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0; 1463 unsigned forcewake_count; 1464 int count = 0; 1465 1466 forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count); 1467 if (forcewake_count) { 1468 seq_puts(m, "RC information inaccurate because somebody " 1469 "holds a forcewake reference \n"); 1470 } else { 1471 /* NB: we cannot use forcewake, else we read the wrong values */ 1472 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1473 udelay(10); 1474 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1475 } 1476 1477 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS); 1478 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1479 1480 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1481 if (INTEL_GEN(dev_priv) >= 9) { 1482 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE); 1483 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS); 1484 } 1485 1486 mutex_lock(&dev_priv->pcu_lock); 1487 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1488 mutex_unlock(&dev_priv->pcu_lock); 1489 1490 seq_printf(m, "RC1e Enabled: %s\n", 1491 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1492 seq_printf(m, "RC6 Enabled: %s\n", 1493 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1494 if (INTEL_GEN(dev_priv) >= 9) { 1495 seq_printf(m, "Render Well Gating Enabled: %s\n", 1496 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE)); 1497 seq_printf(m, "Media Well Gating Enabled: %s\n", 1498 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE)); 1499 } 1500 seq_printf(m, "Deep RC6 Enabled: %s\n", 1501 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1502 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1503 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1504 seq_puts(m, "Current RC state: "); 1505 switch (gt_core_status & GEN6_RCn_MASK) { 1506 case GEN6_RC0: 1507 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1508 seq_puts(m, "Core Power Down\n"); 1509 else 1510 seq_puts(m, "on\n"); 1511 break; 1512 case GEN6_RC3: 1513 seq_puts(m, "RC3\n"); 1514 break; 1515 case GEN6_RC6: 1516 seq_puts(m, "RC6\n"); 1517 break; 1518 case GEN6_RC7: 1519 seq_puts(m, "RC7\n"); 1520 break; 1521 default: 1522 seq_puts(m, "Unknown\n"); 1523 break; 1524 } 1525 1526 seq_printf(m, "Core Power Down: %s\n", 1527 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1528 if (INTEL_GEN(dev_priv) >= 9) { 1529 seq_printf(m, "Render Power Well: %s\n", 1530 (gen9_powergate_status & 1531 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down"); 1532 seq_printf(m, "Media Power Well: %s\n", 1533 (gen9_powergate_status & 1534 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1535 } 1536 1537 /* Not exactly sure what this is */ 1538 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:", 1539 GEN6_GT_GFX_RC6_LOCKED); 1540 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6); 1541 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p); 1542 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp); 1543 1544 seq_printf(m, "RC6 voltage: %dmV\n", 1545 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1546 seq_printf(m, "RC6+ voltage: %dmV\n", 1547 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1548 seq_printf(m, "RC6++ voltage: %dmV\n", 1549 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1550 return i915_forcewake_domains(m, NULL); 1551 } 1552 1553 static int i915_drpc_info(struct seq_file *m, void *unused) 1554 { 1555 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1556 int err; 1557 1558 intel_runtime_pm_get(dev_priv); 1559 1560 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1561 err = vlv_drpc_info(m); 1562 else if (INTEL_GEN(dev_priv) >= 6) 1563 err = gen6_drpc_info(m); 1564 else 1565 err = ironlake_drpc_info(m); 1566 1567 intel_runtime_pm_put(dev_priv); 1568 1569 return err; 1570 } 1571 1572 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) 1573 { 1574 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1575 1576 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 1577 dev_priv->fb_tracking.busy_bits); 1578 1579 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 1580 dev_priv->fb_tracking.flip_bits); 1581 1582 return 0; 1583 } 1584 1585 static int i915_fbc_status(struct seq_file *m, void *unused) 1586 { 1587 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1588 struct intel_fbc *fbc = &dev_priv->fbc; 1589 1590 if (!HAS_FBC(dev_priv)) 1591 return -ENODEV; 1592 1593 intel_runtime_pm_get(dev_priv); 1594 mutex_lock(&fbc->lock); 1595 1596 if (intel_fbc_is_active(dev_priv)) 1597 seq_puts(m, "FBC enabled\n"); 1598 else 1599 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); 1600 1601 if (fbc->work.scheduled) 1602 seq_printf(m, "FBC worker scheduled on vblank %u, now %llu\n", 1603 fbc->work.scheduled_vblank, 1604 drm_crtc_vblank_count(&fbc->crtc->base)); 1605 1606 if (intel_fbc_is_active(dev_priv)) { 1607 u32 mask; 1608 1609 if (INTEL_GEN(dev_priv) >= 8) 1610 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK; 1611 else if (INTEL_GEN(dev_priv) >= 7) 1612 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK; 1613 else if (INTEL_GEN(dev_priv) >= 5) 1614 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK; 1615 else if (IS_G4X(dev_priv)) 1616 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK; 1617 else 1618 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING | 1619 FBC_STAT_COMPRESSED); 1620 1621 seq_printf(m, "Compressing: %s\n", yesno(mask)); 1622 } 1623 1624 mutex_unlock(&fbc->lock); 1625 intel_runtime_pm_put(dev_priv); 1626 1627 return 0; 1628 } 1629 1630 static int i915_fbc_false_color_get(void *data, u64 *val) 1631 { 1632 struct drm_i915_private *dev_priv = data; 1633 1634 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 1635 return -ENODEV; 1636 1637 *val = dev_priv->fbc.false_color; 1638 1639 return 0; 1640 } 1641 1642 static int i915_fbc_false_color_set(void *data, u64 val) 1643 { 1644 struct drm_i915_private *dev_priv = data; 1645 u32 reg; 1646 1647 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 1648 return -ENODEV; 1649 1650 mutex_lock(&dev_priv->fbc.lock); 1651 1652 reg = I915_READ(ILK_DPFC_CONTROL); 1653 dev_priv->fbc.false_color = val; 1654 1655 I915_WRITE(ILK_DPFC_CONTROL, val ? 1656 (reg | FBC_CTL_FALSE_COLOR) : 1657 (reg & ~FBC_CTL_FALSE_COLOR)); 1658 1659 mutex_unlock(&dev_priv->fbc.lock); 1660 return 0; 1661 } 1662 1663 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, 1664 i915_fbc_false_color_get, i915_fbc_false_color_set, 1665 "%llu\n"); 1666 1667 static int i915_ips_status(struct seq_file *m, void *unused) 1668 { 1669 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1670 1671 if (!HAS_IPS(dev_priv)) 1672 return -ENODEV; 1673 1674 intel_runtime_pm_get(dev_priv); 1675 1676 seq_printf(m, "Enabled by kernel parameter: %s\n", 1677 yesno(i915_modparams.enable_ips)); 1678 1679 if (INTEL_GEN(dev_priv) >= 8) { 1680 seq_puts(m, "Currently: unknown\n"); 1681 } else { 1682 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1683 seq_puts(m, "Currently: enabled\n"); 1684 else 1685 seq_puts(m, "Currently: disabled\n"); 1686 } 1687 1688 intel_runtime_pm_put(dev_priv); 1689 1690 return 0; 1691 } 1692 1693 static int i915_sr_status(struct seq_file *m, void *unused) 1694 { 1695 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1696 bool sr_enabled = false; 1697 1698 intel_runtime_pm_get(dev_priv); 1699 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 1700 1701 if (INTEL_GEN(dev_priv) >= 9) 1702 /* no global SR status; inspect per-plane WM */; 1703 else if (HAS_PCH_SPLIT(dev_priv)) 1704 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1705 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || 1706 IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 1707 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1708 else if (IS_I915GM(dev_priv)) 1709 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1710 else if (IS_PINEVIEW(dev_priv)) 1711 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1712 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1713 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 1714 1715 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 1716 intel_runtime_pm_put(dev_priv); 1717 1718 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); 1719 1720 return 0; 1721 } 1722 1723 static int i915_emon_status(struct seq_file *m, void *unused) 1724 { 1725 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1726 struct drm_device *dev = &dev_priv->drm; 1727 unsigned long temp, chipset, gfx; 1728 int ret; 1729 1730 if (!IS_GEN5(dev_priv)) 1731 return -ENODEV; 1732 1733 ret = mutex_lock_interruptible(&dev->struct_mutex); 1734 if (ret) 1735 return ret; 1736 1737 temp = i915_mch_val(dev_priv); 1738 chipset = i915_chipset_val(dev_priv); 1739 gfx = i915_gfx_val(dev_priv); 1740 mutex_unlock(&dev->struct_mutex); 1741 1742 seq_printf(m, "GMCH temp: %ld\n", temp); 1743 seq_printf(m, "Chipset power: %ld\n", chipset); 1744 seq_printf(m, "GFX power: %ld\n", gfx); 1745 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1746 1747 return 0; 1748 } 1749 1750 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1751 { 1752 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1753 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1754 int ret = 0; 1755 int gpu_freq, ia_freq; 1756 unsigned int max_gpu_freq, min_gpu_freq; 1757 1758 if (!HAS_LLC(dev_priv)) 1759 return -ENODEV; 1760 1761 intel_runtime_pm_get(dev_priv); 1762 1763 ret = mutex_lock_interruptible(&dev_priv->pcu_lock); 1764 if (ret) 1765 goto out; 1766 1767 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { 1768 /* Convert GT frequency to 50 HZ units */ 1769 min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER; 1770 max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER; 1771 } else { 1772 min_gpu_freq = rps->min_freq_softlimit; 1773 max_gpu_freq = rps->max_freq_softlimit; 1774 } 1775 1776 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1777 1778 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { 1779 ia_freq = gpu_freq; 1780 sandybridge_pcode_read(dev_priv, 1781 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1782 &ia_freq); 1783 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1784 intel_gpu_freq(dev_priv, (gpu_freq * 1785 (IS_GEN9_BC(dev_priv) || 1786 IS_CANNONLAKE(dev_priv) ? 1787 GEN9_FREQ_SCALER : 1))), 1788 ((ia_freq >> 0) & 0xff) * 100, 1789 ((ia_freq >> 8) & 0xff) * 100); 1790 } 1791 1792 mutex_unlock(&dev_priv->pcu_lock); 1793 1794 out: 1795 intel_runtime_pm_put(dev_priv); 1796 return ret; 1797 } 1798 1799 static int i915_opregion(struct seq_file *m, void *unused) 1800 { 1801 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1802 struct drm_device *dev = &dev_priv->drm; 1803 struct intel_opregion *opregion = &dev_priv->opregion; 1804 int ret; 1805 1806 ret = mutex_lock_interruptible(&dev->struct_mutex); 1807 if (ret) 1808 goto out; 1809 1810 if (opregion->header) 1811 seq_write(m, opregion->header, OPREGION_SIZE); 1812 1813 mutex_unlock(&dev->struct_mutex); 1814 1815 out: 1816 return 0; 1817 } 1818 1819 static int i915_vbt(struct seq_file *m, void *unused) 1820 { 1821 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; 1822 1823 if (opregion->vbt) 1824 seq_write(m, opregion->vbt, opregion->vbt_size); 1825 1826 return 0; 1827 } 1828 1829 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1830 { 1831 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1832 struct drm_device *dev = &dev_priv->drm; 1833 struct intel_framebuffer *fbdev_fb = NULL; 1834 struct drm_framebuffer *drm_fb; 1835 int ret; 1836 1837 ret = mutex_lock_interruptible(&dev->struct_mutex); 1838 if (ret) 1839 return ret; 1840 1841 #ifdef CONFIG_DRM_FBDEV_EMULATION 1842 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) { 1843 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb); 1844 1845 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1846 fbdev_fb->base.width, 1847 fbdev_fb->base.height, 1848 fbdev_fb->base.format->depth, 1849 fbdev_fb->base.format->cpp[0] * 8, 1850 fbdev_fb->base.modifier, 1851 drm_framebuffer_read_refcount(&fbdev_fb->base)); 1852 describe_obj(m, fbdev_fb->obj); 1853 seq_putc(m, '\n'); 1854 } 1855 #endif 1856 1857 mutex_lock(&dev->mode_config.fb_lock); 1858 drm_for_each_fb(drm_fb, dev) { 1859 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 1860 if (fb == fbdev_fb) 1861 continue; 1862 1863 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1864 fb->base.width, 1865 fb->base.height, 1866 fb->base.format->depth, 1867 fb->base.format->cpp[0] * 8, 1868 fb->base.modifier, 1869 drm_framebuffer_read_refcount(&fb->base)); 1870 describe_obj(m, fb->obj); 1871 seq_putc(m, '\n'); 1872 } 1873 mutex_unlock(&dev->mode_config.fb_lock); 1874 mutex_unlock(&dev->struct_mutex); 1875 1876 return 0; 1877 } 1878 1879 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) 1880 { 1881 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)", 1882 ring->space, ring->head, ring->tail); 1883 } 1884 1885 static int i915_context_status(struct seq_file *m, void *unused) 1886 { 1887 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1888 struct drm_device *dev = &dev_priv->drm; 1889 struct intel_engine_cs *engine; 1890 struct i915_gem_context *ctx; 1891 enum intel_engine_id id; 1892 int ret; 1893 1894 ret = mutex_lock_interruptible(&dev->struct_mutex); 1895 if (ret) 1896 return ret; 1897 1898 list_for_each_entry(ctx, &dev_priv->contexts.list, link) { 1899 seq_printf(m, "HW context %u ", ctx->hw_id); 1900 if (ctx->pid) { 1901 struct task_struct *task; 1902 1903 task = get_pid_task(ctx->pid, PIDTYPE_PID); 1904 if (task) { 1905 seq_printf(m, "(%s [%d]) ", 1906 task->comm, task->pid); 1907 put_task_struct(task); 1908 } 1909 } else if (IS_ERR(ctx->file_priv)) { 1910 seq_puts(m, "(deleted) "); 1911 } else { 1912 seq_puts(m, "(kernel) "); 1913 } 1914 1915 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 1916 seq_putc(m, '\n'); 1917 1918 for_each_engine(engine, dev_priv, id) { 1919 struct intel_context *ce = &ctx->engine[engine->id]; 1920 1921 seq_printf(m, "%s: ", engine->name); 1922 if (ce->state) 1923 describe_obj(m, ce->state->obj); 1924 if (ce->ring) 1925 describe_ctx_ring(m, ce->ring); 1926 seq_putc(m, '\n'); 1927 } 1928 1929 seq_putc(m, '\n'); 1930 } 1931 1932 mutex_unlock(&dev->struct_mutex); 1933 1934 return 0; 1935 } 1936 1937 static const char *swizzle_string(unsigned swizzle) 1938 { 1939 switch (swizzle) { 1940 case I915_BIT_6_SWIZZLE_NONE: 1941 return "none"; 1942 case I915_BIT_6_SWIZZLE_9: 1943 return "bit9"; 1944 case I915_BIT_6_SWIZZLE_9_10: 1945 return "bit9/bit10"; 1946 case I915_BIT_6_SWIZZLE_9_11: 1947 return "bit9/bit11"; 1948 case I915_BIT_6_SWIZZLE_9_10_11: 1949 return "bit9/bit10/bit11"; 1950 case I915_BIT_6_SWIZZLE_9_17: 1951 return "bit9/bit17"; 1952 case I915_BIT_6_SWIZZLE_9_10_17: 1953 return "bit9/bit10/bit17"; 1954 case I915_BIT_6_SWIZZLE_UNKNOWN: 1955 return "unknown"; 1956 } 1957 1958 return "bug"; 1959 } 1960 1961 static int i915_swizzle_info(struct seq_file *m, void *data) 1962 { 1963 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1964 1965 intel_runtime_pm_get(dev_priv); 1966 1967 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1968 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1969 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1970 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1971 1972 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) { 1973 seq_printf(m, "DDC = 0x%08x\n", 1974 I915_READ(DCC)); 1975 seq_printf(m, "DDC2 = 0x%08x\n", 1976 I915_READ(DCC2)); 1977 seq_printf(m, "C0DRB3 = 0x%04x\n", 1978 I915_READ16(C0DRB3)); 1979 seq_printf(m, "C1DRB3 = 0x%04x\n", 1980 I915_READ16(C1DRB3)); 1981 } else if (INTEL_GEN(dev_priv) >= 6) { 1982 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1983 I915_READ(MAD_DIMM_C0)); 1984 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1985 I915_READ(MAD_DIMM_C1)); 1986 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1987 I915_READ(MAD_DIMM_C2)); 1988 seq_printf(m, "TILECTL = 0x%08x\n", 1989 I915_READ(TILECTL)); 1990 if (INTEL_GEN(dev_priv) >= 8) 1991 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 1992 I915_READ(GAMTARBMODE)); 1993 else 1994 seq_printf(m, "ARB_MODE = 0x%08x\n", 1995 I915_READ(ARB_MODE)); 1996 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1997 I915_READ(DISP_ARB_CTL)); 1998 } 1999 2000 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 2001 seq_puts(m, "L-shaped memory detected\n"); 2002 2003 intel_runtime_pm_put(dev_priv); 2004 2005 return 0; 2006 } 2007 2008 static int per_file_ctx(int id, void *ptr, void *data) 2009 { 2010 struct i915_gem_context *ctx = ptr; 2011 struct seq_file *m = data; 2012 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2013 2014 if (!ppgtt) { 2015 seq_printf(m, " no ppgtt for context %d\n", 2016 ctx->user_handle); 2017 return 0; 2018 } 2019 2020 if (i915_gem_context_is_default(ctx)) 2021 seq_puts(m, " default context:\n"); 2022 else 2023 seq_printf(m, " context %d:\n", ctx->user_handle); 2024 ppgtt->debug_dump(ppgtt, m); 2025 2026 return 0; 2027 } 2028 2029 static void gen8_ppgtt_info(struct seq_file *m, 2030 struct drm_i915_private *dev_priv) 2031 { 2032 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2033 struct intel_engine_cs *engine; 2034 enum intel_engine_id id; 2035 int i; 2036 2037 if (!ppgtt) 2038 return; 2039 2040 for_each_engine(engine, dev_priv, id) { 2041 seq_printf(m, "%s\n", engine->name); 2042 for (i = 0; i < 4; i++) { 2043 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i)); 2044 pdp <<= 32; 2045 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i)); 2046 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 2047 } 2048 } 2049 } 2050 2051 static void gen6_ppgtt_info(struct seq_file *m, 2052 struct drm_i915_private *dev_priv) 2053 { 2054 struct intel_engine_cs *engine; 2055 enum intel_engine_id id; 2056 2057 if (IS_GEN6(dev_priv)) 2058 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2059 2060 for_each_engine(engine, dev_priv, id) { 2061 seq_printf(m, "%s\n", engine->name); 2062 if (IS_GEN7(dev_priv)) 2063 seq_printf(m, "GFX_MODE: 0x%08x\n", 2064 I915_READ(RING_MODE_GEN7(engine))); 2065 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", 2066 I915_READ(RING_PP_DIR_BASE(engine))); 2067 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", 2068 I915_READ(RING_PP_DIR_BASE_READ(engine))); 2069 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", 2070 I915_READ(RING_PP_DIR_DCLV(engine))); 2071 } 2072 if (dev_priv->mm.aliasing_ppgtt) { 2073 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2074 2075 seq_puts(m, "aliasing PPGTT:\n"); 2076 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset); 2077 2078 ppgtt->debug_dump(ppgtt, m); 2079 } 2080 2081 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 2082 } 2083 2084 static int i915_ppgtt_info(struct seq_file *m, void *data) 2085 { 2086 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2087 struct drm_device *dev = &dev_priv->drm; 2088 struct drm_file *file; 2089 int ret; 2090 2091 mutex_lock(&dev->filelist_mutex); 2092 ret = mutex_lock_interruptible(&dev->struct_mutex); 2093 if (ret) 2094 goto out_unlock; 2095 2096 intel_runtime_pm_get(dev_priv); 2097 2098 if (INTEL_GEN(dev_priv) >= 8) 2099 gen8_ppgtt_info(m, dev_priv); 2100 else if (INTEL_GEN(dev_priv) >= 6) 2101 gen6_ppgtt_info(m, dev_priv); 2102 2103 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2104 struct drm_i915_file_private *file_priv = file->driver_priv; 2105 struct task_struct *task; 2106 2107 task = get_pid_task(file->pid, PIDTYPE_PID); 2108 if (!task) { 2109 ret = -ESRCH; 2110 goto out_rpm; 2111 } 2112 seq_printf(m, "\nproc: %s\n", task->comm); 2113 put_task_struct(task); 2114 idr_for_each(&file_priv->context_idr, per_file_ctx, 2115 (void *)(unsigned long)m); 2116 } 2117 2118 out_rpm: 2119 intel_runtime_pm_put(dev_priv); 2120 mutex_unlock(&dev->struct_mutex); 2121 out_unlock: 2122 mutex_unlock(&dev->filelist_mutex); 2123 return ret; 2124 } 2125 2126 static int count_irq_waiters(struct drm_i915_private *i915) 2127 { 2128 struct intel_engine_cs *engine; 2129 enum intel_engine_id id; 2130 int count = 0; 2131 2132 for_each_engine(engine, i915, id) 2133 count += intel_engine_has_waiter(engine); 2134 2135 return count; 2136 } 2137 2138 static const char *rps_power_to_str(unsigned int power) 2139 { 2140 static const char * const strings[] = { 2141 [LOW_POWER] = "low power", 2142 [BETWEEN] = "mixed", 2143 [HIGH_POWER] = "high power", 2144 }; 2145 2146 if (power >= ARRAY_SIZE(strings) || !strings[power]) 2147 return "unknown"; 2148 2149 return strings[power]; 2150 } 2151 2152 static int i915_rps_boost_info(struct seq_file *m, void *data) 2153 { 2154 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2155 struct drm_device *dev = &dev_priv->drm; 2156 struct intel_rps *rps = &dev_priv->gt_pm.rps; 2157 struct drm_file *file; 2158 2159 seq_printf(m, "RPS enabled? %d\n", rps->enabled); 2160 seq_printf(m, "GPU busy? %s [%d requests]\n", 2161 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); 2162 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); 2163 seq_printf(m, "Boosts outstanding? %d\n", 2164 atomic_read(&rps->num_waiters)); 2165 seq_printf(m, "Frequency requested %d\n", 2166 intel_gpu_freq(dev_priv, rps->cur_freq)); 2167 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", 2168 intel_gpu_freq(dev_priv, rps->min_freq), 2169 intel_gpu_freq(dev_priv, rps->min_freq_softlimit), 2170 intel_gpu_freq(dev_priv, rps->max_freq_softlimit), 2171 intel_gpu_freq(dev_priv, rps->max_freq)); 2172 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n", 2173 intel_gpu_freq(dev_priv, rps->idle_freq), 2174 intel_gpu_freq(dev_priv, rps->efficient_freq), 2175 intel_gpu_freq(dev_priv, rps->boost_freq)); 2176 2177 mutex_lock(&dev->filelist_mutex); 2178 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2179 struct drm_i915_file_private *file_priv = file->driver_priv; 2180 struct task_struct *task; 2181 2182 rcu_read_lock(); 2183 task = pid_task(file->pid, PIDTYPE_PID); 2184 seq_printf(m, "%s [%d]: %d boosts\n", 2185 task ? task->comm : "<unknown>", 2186 task ? task->pid : -1, 2187 atomic_read(&file_priv->rps_client.boosts)); 2188 rcu_read_unlock(); 2189 } 2190 seq_printf(m, "Kernel (anonymous) boosts: %d\n", 2191 atomic_read(&rps->boosts)); 2192 mutex_unlock(&dev->filelist_mutex); 2193 2194 if (INTEL_GEN(dev_priv) >= 6 && 2195 rps->enabled && 2196 dev_priv->gt.active_requests) { 2197 u32 rpup, rpupei; 2198 u32 rpdown, rpdownei; 2199 2200 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 2201 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK; 2202 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK; 2203 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK; 2204 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK; 2205 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 2206 2207 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", 2208 rps_power_to_str(rps->power)); 2209 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n", 2210 rpup && rpupei ? 100 * rpup / rpupei : 0, 2211 rps->up_threshold); 2212 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n", 2213 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0, 2214 rps->down_threshold); 2215 } else { 2216 seq_puts(m, "\nRPS Autotuning inactive\n"); 2217 } 2218 2219 return 0; 2220 } 2221 2222 static int i915_llc(struct seq_file *m, void *data) 2223 { 2224 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2225 const bool edram = INTEL_GEN(dev_priv) > 8; 2226 2227 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv))); 2228 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC", 2229 intel_uncore_edram_size(dev_priv)/1024/1024); 2230 2231 return 0; 2232 } 2233 2234 static int i915_huc_load_status_info(struct seq_file *m, void *data) 2235 { 2236 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2237 struct drm_printer p; 2238 2239 if (!HAS_HUC(dev_priv)) 2240 return -ENODEV; 2241 2242 p = drm_seq_file_printer(m); 2243 intel_uc_fw_dump(&dev_priv->huc.fw, &p); 2244 2245 intel_runtime_pm_get(dev_priv); 2246 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); 2247 intel_runtime_pm_put(dev_priv); 2248 2249 return 0; 2250 } 2251 2252 static int i915_guc_load_status_info(struct seq_file *m, void *data) 2253 { 2254 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2255 struct drm_printer p; 2256 u32 tmp, i; 2257 2258 if (!HAS_GUC(dev_priv)) 2259 return -ENODEV; 2260 2261 p = drm_seq_file_printer(m); 2262 intel_uc_fw_dump(&dev_priv->guc.fw, &p); 2263 2264 intel_runtime_pm_get(dev_priv); 2265 2266 tmp = I915_READ(GUC_STATUS); 2267 2268 seq_printf(m, "\nGuC status 0x%08x:\n", tmp); 2269 seq_printf(m, "\tBootrom status = 0x%x\n", 2270 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); 2271 seq_printf(m, "\tuKernel status = 0x%x\n", 2272 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); 2273 seq_printf(m, "\tMIA Core status = 0x%x\n", 2274 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); 2275 seq_puts(m, "\nScratch registers:\n"); 2276 for (i = 0; i < 16; i++) 2277 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i))); 2278 2279 intel_runtime_pm_put(dev_priv); 2280 2281 return 0; 2282 } 2283 2284 static void i915_guc_log_info(struct seq_file *m, 2285 struct drm_i915_private *dev_priv) 2286 { 2287 struct intel_guc *guc = &dev_priv->guc; 2288 2289 seq_puts(m, "\nGuC logging stats:\n"); 2290 2291 seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n", 2292 guc->log.flush_count[GUC_ISR_LOG_BUFFER], 2293 guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]); 2294 2295 seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n", 2296 guc->log.flush_count[GUC_DPC_LOG_BUFFER], 2297 guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]); 2298 2299 seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n", 2300 guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER], 2301 guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]); 2302 2303 seq_printf(m, "\tTotal flush interrupt count: %u\n", 2304 guc->log.flush_interrupt_count); 2305 2306 seq_printf(m, "\tCapture miss count: %u\n", 2307 guc->log.capture_miss_count); 2308 } 2309 2310 static void i915_guc_client_info(struct seq_file *m, 2311 struct drm_i915_private *dev_priv, 2312 struct intel_guc_client *client) 2313 { 2314 struct intel_engine_cs *engine; 2315 enum intel_engine_id id; 2316 uint64_t tot = 0; 2317 2318 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n", 2319 client->priority, client->stage_id, client->proc_desc_offset); 2320 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n", 2321 client->doorbell_id, client->doorbell_offset); 2322 2323 for_each_engine(engine, dev_priv, id) { 2324 u64 submissions = client->submissions[id]; 2325 tot += submissions; 2326 seq_printf(m, "\tSubmissions: %llu %s\n", 2327 submissions, engine->name); 2328 } 2329 seq_printf(m, "\tTotal: %llu\n", tot); 2330 } 2331 2332 static int i915_guc_info(struct seq_file *m, void *data) 2333 { 2334 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2335 const struct intel_guc *guc = &dev_priv->guc; 2336 2337 if (!USES_GUC_SUBMISSION(dev_priv)) 2338 return -ENODEV; 2339 2340 GEM_BUG_ON(!guc->execbuf_client); 2341 GEM_BUG_ON(!guc->preempt_client); 2342 2343 seq_printf(m, "Doorbell map:\n"); 2344 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap); 2345 seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline); 2346 2347 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client); 2348 i915_guc_client_info(m, dev_priv, guc->execbuf_client); 2349 seq_printf(m, "\nGuC preempt client @ %p:\n", guc->preempt_client); 2350 i915_guc_client_info(m, dev_priv, guc->preempt_client); 2351 2352 i915_guc_log_info(m, dev_priv); 2353 2354 /* Add more as required ... */ 2355 2356 return 0; 2357 } 2358 2359 static int i915_guc_stage_pool(struct seq_file *m, void *data) 2360 { 2361 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2362 const struct intel_guc *guc = &dev_priv->guc; 2363 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr; 2364 struct intel_guc_client *client = guc->execbuf_client; 2365 unsigned int tmp; 2366 int index; 2367 2368 if (!USES_GUC_SUBMISSION(dev_priv)) 2369 return -ENODEV; 2370 2371 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) { 2372 struct intel_engine_cs *engine; 2373 2374 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE)) 2375 continue; 2376 2377 seq_printf(m, "GuC stage descriptor %u:\n", index); 2378 seq_printf(m, "\tIndex: %u\n", desc->stage_id); 2379 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute); 2380 seq_printf(m, "\tPriority: %d\n", desc->priority); 2381 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id); 2382 seq_printf(m, "\tEngines used: 0x%x\n", 2383 desc->engines_used); 2384 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n", 2385 desc->db_trigger_phy, 2386 desc->db_trigger_cpu, 2387 desc->db_trigger_uk); 2388 seq_printf(m, "\tProcess descriptor: 0x%x\n", 2389 desc->process_desc); 2390 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n", 2391 desc->wq_addr, desc->wq_size); 2392 seq_putc(m, '\n'); 2393 2394 for_each_engine_masked(engine, dev_priv, client->engines, tmp) { 2395 u32 guc_engine_id = engine->guc_id; 2396 struct guc_execlist_context *lrc = 2397 &desc->lrc[guc_engine_id]; 2398 2399 seq_printf(m, "\t%s LRC:\n", engine->name); 2400 seq_printf(m, "\t\tContext desc: 0x%x\n", 2401 lrc->context_desc); 2402 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id); 2403 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca); 2404 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin); 2405 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end); 2406 seq_putc(m, '\n'); 2407 } 2408 } 2409 2410 return 0; 2411 } 2412 2413 static int i915_guc_log_dump(struct seq_file *m, void *data) 2414 { 2415 struct drm_info_node *node = m->private; 2416 struct drm_i915_private *dev_priv = node_to_i915(node); 2417 bool dump_load_err = !!node->info_ent->data; 2418 struct drm_i915_gem_object *obj = NULL; 2419 u32 *log; 2420 int i = 0; 2421 2422 if (!HAS_GUC(dev_priv)) 2423 return -ENODEV; 2424 2425 if (dump_load_err) 2426 obj = dev_priv->guc.load_err_log; 2427 else if (dev_priv->guc.log.vma) 2428 obj = dev_priv->guc.log.vma->obj; 2429 2430 if (!obj) 2431 return 0; 2432 2433 log = i915_gem_object_pin_map(obj, I915_MAP_WC); 2434 if (IS_ERR(log)) { 2435 DRM_DEBUG("Failed to pin object\n"); 2436 seq_puts(m, "(log data unaccessible)\n"); 2437 return PTR_ERR(log); 2438 } 2439 2440 for (i = 0; i < obj->base.size / sizeof(u32); i += 4) 2441 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n", 2442 *(log + i), *(log + i + 1), 2443 *(log + i + 2), *(log + i + 3)); 2444 2445 seq_putc(m, '\n'); 2446 2447 i915_gem_object_unpin_map(obj); 2448 2449 return 0; 2450 } 2451 2452 static int i915_guc_log_control_get(void *data, u64 *val) 2453 { 2454 struct drm_i915_private *dev_priv = data; 2455 2456 if (!HAS_GUC(dev_priv)) 2457 return -ENODEV; 2458 2459 if (!dev_priv->guc.log.vma) 2460 return -EINVAL; 2461 2462 *val = i915_modparams.guc_log_level; 2463 2464 return 0; 2465 } 2466 2467 static int i915_guc_log_control_set(void *data, u64 val) 2468 { 2469 struct drm_i915_private *dev_priv = data; 2470 2471 if (!HAS_GUC(dev_priv)) 2472 return -ENODEV; 2473 2474 return intel_guc_log_control(&dev_priv->guc, val); 2475 } 2476 2477 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops, 2478 i915_guc_log_control_get, i915_guc_log_control_set, 2479 "%lld\n"); 2480 2481 static const char *psr2_live_status(u32 val) 2482 { 2483 static const char * const live_status[] = { 2484 "IDLE", 2485 "CAPTURE", 2486 "CAPTURE_FS", 2487 "SLEEP", 2488 "BUFON_FW", 2489 "ML_UP", 2490 "SU_STANDBY", 2491 "FAST_SLEEP", 2492 "DEEP_SLEEP", 2493 "BUF_ON", 2494 "TG_ON" 2495 }; 2496 2497 val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT; 2498 if (val < ARRAY_SIZE(live_status)) 2499 return live_status[val]; 2500 2501 return "unknown"; 2502 } 2503 2504 static int i915_edp_psr_status(struct seq_file *m, void *data) 2505 { 2506 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2507 u32 psrperf = 0; 2508 u32 stat[3]; 2509 enum pipe pipe; 2510 bool enabled = false; 2511 bool sink_support; 2512 2513 if (!HAS_PSR(dev_priv)) 2514 return -ENODEV; 2515 2516 sink_support = dev_priv->psr.sink_support; 2517 seq_printf(m, "Sink_Support: %s\n", yesno(sink_support)); 2518 if (!sink_support) 2519 return 0; 2520 2521 intel_runtime_pm_get(dev_priv); 2522 2523 mutex_lock(&dev_priv->psr.lock); 2524 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); 2525 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); 2526 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", 2527 dev_priv->psr.busy_frontbuffer_bits); 2528 seq_printf(m, "Re-enable work scheduled: %s\n", 2529 yesno(work_busy(&dev_priv->psr.work.work))); 2530 2531 if (HAS_DDI(dev_priv)) { 2532 if (dev_priv->psr.psr2_support) 2533 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; 2534 else 2535 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 2536 } else { 2537 for_each_pipe(dev_priv, pipe) { 2538 enum transcoder cpu_transcoder = 2539 intel_pipe_to_cpu_transcoder(dev_priv, pipe); 2540 enum intel_display_power_domain power_domain; 2541 2542 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 2543 if (!intel_display_power_get_if_enabled(dev_priv, 2544 power_domain)) 2545 continue; 2546 2547 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & 2548 VLV_EDP_PSR_CURR_STATE_MASK; 2549 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2550 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2551 enabled = true; 2552 2553 intel_display_power_put(dev_priv, power_domain); 2554 } 2555 } 2556 2557 seq_printf(m, "Main link in standby mode: %s\n", 2558 yesno(dev_priv->psr.link_standby)); 2559 2560 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); 2561 2562 if (!HAS_DDI(dev_priv)) 2563 for_each_pipe(dev_priv, pipe) { 2564 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2565 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2566 seq_printf(m, " pipe %c", pipe_name(pipe)); 2567 } 2568 seq_puts(m, "\n"); 2569 2570 /* 2571 * VLV/CHV PSR has no kind of performance counter 2572 * SKL+ Perf counter is reset to 0 everytime DC state is entered 2573 */ 2574 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2575 psrperf = I915_READ(EDP_PSR_PERF_CNT) & 2576 EDP_PSR_PERF_CNT_MASK; 2577 2578 seq_printf(m, "Performance_Counter: %u\n", psrperf); 2579 } 2580 if (dev_priv->psr.psr2_support) { 2581 u32 psr2 = I915_READ(EDP_PSR2_STATUS); 2582 2583 seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n", 2584 psr2, psr2_live_status(psr2)); 2585 } 2586 mutex_unlock(&dev_priv->psr.lock); 2587 2588 intel_runtime_pm_put(dev_priv); 2589 return 0; 2590 } 2591 2592 static int i915_sink_crc(struct seq_file *m, void *data) 2593 { 2594 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2595 struct drm_device *dev = &dev_priv->drm; 2596 struct intel_connector *connector; 2597 struct drm_connector_list_iter conn_iter; 2598 struct intel_dp *intel_dp = NULL; 2599 struct drm_modeset_acquire_ctx ctx; 2600 int ret; 2601 u8 crc[6]; 2602 2603 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 2604 2605 drm_connector_list_iter_begin(dev, &conn_iter); 2606 2607 for_each_intel_connector_iter(connector, &conn_iter) { 2608 struct drm_crtc *crtc; 2609 struct drm_connector_state *state; 2610 struct intel_crtc_state *crtc_state; 2611 2612 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) 2613 continue; 2614 2615 retry: 2616 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); 2617 if (ret) 2618 goto err; 2619 2620 state = connector->base.state; 2621 if (!state->best_encoder) 2622 continue; 2623 2624 crtc = state->crtc; 2625 ret = drm_modeset_lock(&crtc->mutex, &ctx); 2626 if (ret) 2627 goto err; 2628 2629 crtc_state = to_intel_crtc_state(crtc->state); 2630 if (!crtc_state->base.active) 2631 continue; 2632 2633 /* 2634 * We need to wait for all crtc updates to complete, to make 2635 * sure any pending modesets and plane updates are completed. 2636 */ 2637 if (crtc_state->base.commit) { 2638 ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done); 2639 2640 if (ret) 2641 goto err; 2642 } 2643 2644 intel_dp = enc_to_intel_dp(state->best_encoder); 2645 2646 ret = intel_dp_sink_crc(intel_dp, crtc_state, crc); 2647 if (ret) 2648 goto err; 2649 2650 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 2651 crc[0], crc[1], crc[2], 2652 crc[3], crc[4], crc[5]); 2653 goto out; 2654 2655 err: 2656 if (ret == -EDEADLK) { 2657 ret = drm_modeset_backoff(&ctx); 2658 if (!ret) 2659 goto retry; 2660 } 2661 goto out; 2662 } 2663 ret = -ENODEV; 2664 out: 2665 drm_connector_list_iter_end(&conn_iter); 2666 drm_modeset_drop_locks(&ctx); 2667 drm_modeset_acquire_fini(&ctx); 2668 2669 return ret; 2670 } 2671 2672 static int i915_energy_uJ(struct seq_file *m, void *data) 2673 { 2674 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2675 unsigned long long power; 2676 u32 units; 2677 2678 if (INTEL_GEN(dev_priv) < 6) 2679 return -ENODEV; 2680 2681 intel_runtime_pm_get(dev_priv); 2682 2683 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) { 2684 intel_runtime_pm_put(dev_priv); 2685 return -ENODEV; 2686 } 2687 2688 units = (power & 0x1f00) >> 8; 2689 power = I915_READ(MCH_SECP_NRG_STTS); 2690 power = (1000000 * power) >> units; /* convert to uJ */ 2691 2692 intel_runtime_pm_put(dev_priv); 2693 2694 seq_printf(m, "%llu", power); 2695 2696 return 0; 2697 } 2698 2699 static int i915_runtime_pm_status(struct seq_file *m, void *unused) 2700 { 2701 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2702 struct pci_dev *pdev = dev_priv->drm.pdev; 2703 2704 if (!HAS_RUNTIME_PM(dev_priv)) 2705 seq_puts(m, "Runtime power management not supported\n"); 2706 2707 seq_printf(m, "GPU idle: %s (epoch %u)\n", 2708 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch); 2709 seq_printf(m, "IRQs disabled: %s\n", 2710 yesno(!intel_irqs_enabled(dev_priv))); 2711 #ifdef CONFIG_PM 2712 seq_printf(m, "Usage count: %d\n", 2713 atomic_read(&dev_priv->drm.dev->power.usage_count)); 2714 #else 2715 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 2716 #endif 2717 seq_printf(m, "PCI device power state: %s [%d]\n", 2718 pci_power_name(pdev->current_state), 2719 pdev->current_state); 2720 2721 return 0; 2722 } 2723 2724 static int i915_power_domain_info(struct seq_file *m, void *unused) 2725 { 2726 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2727 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2728 int i; 2729 2730 mutex_lock(&power_domains->lock); 2731 2732 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2733 for (i = 0; i < power_domains->power_well_count; i++) { 2734 struct i915_power_well *power_well; 2735 enum intel_display_power_domain power_domain; 2736 2737 power_well = &power_domains->power_wells[i]; 2738 seq_printf(m, "%-25s %d\n", power_well->name, 2739 power_well->count); 2740 2741 for_each_power_domain(power_domain, power_well->domains) 2742 seq_printf(m, " %-23s %d\n", 2743 intel_display_power_domain_str(power_domain), 2744 power_domains->domain_use_count[power_domain]); 2745 } 2746 2747 mutex_unlock(&power_domains->lock); 2748 2749 return 0; 2750 } 2751 2752 static int i915_dmc_info(struct seq_file *m, void *unused) 2753 { 2754 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2755 struct intel_csr *csr; 2756 2757 if (!HAS_CSR(dev_priv)) 2758 return -ENODEV; 2759 2760 csr = &dev_priv->csr; 2761 2762 intel_runtime_pm_get(dev_priv); 2763 2764 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); 2765 seq_printf(m, "path: %s\n", csr->fw_path); 2766 2767 if (!csr->dmc_payload) 2768 goto out; 2769 2770 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), 2771 CSR_VERSION_MINOR(csr->version)); 2772 2773 if (IS_KABYLAKE(dev_priv) || 2774 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) { 2775 seq_printf(m, "DC3 -> DC5 count: %d\n", 2776 I915_READ(SKL_CSR_DC3_DC5_COUNT)); 2777 seq_printf(m, "DC5 -> DC6 count: %d\n", 2778 I915_READ(SKL_CSR_DC5_DC6_COUNT)); 2779 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) { 2780 seq_printf(m, "DC3 -> DC5 count: %d\n", 2781 I915_READ(BXT_CSR_DC3_DC5_COUNT)); 2782 } 2783 2784 out: 2785 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); 2786 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); 2787 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); 2788 2789 intel_runtime_pm_put(dev_priv); 2790 2791 return 0; 2792 } 2793 2794 static void intel_seq_print_mode(struct seq_file *m, int tabs, 2795 struct drm_display_mode *mode) 2796 { 2797 int i; 2798 2799 for (i = 0; i < tabs; i++) 2800 seq_putc(m, '\t'); 2801 2802 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2803 mode->base.id, mode->name, 2804 mode->vrefresh, mode->clock, 2805 mode->hdisplay, mode->hsync_start, 2806 mode->hsync_end, mode->htotal, 2807 mode->vdisplay, mode->vsync_start, 2808 mode->vsync_end, mode->vtotal, 2809 mode->type, mode->flags); 2810 } 2811 2812 static void intel_encoder_info(struct seq_file *m, 2813 struct intel_crtc *intel_crtc, 2814 struct intel_encoder *intel_encoder) 2815 { 2816 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2817 struct drm_device *dev = &dev_priv->drm; 2818 struct drm_crtc *crtc = &intel_crtc->base; 2819 struct intel_connector *intel_connector; 2820 struct drm_encoder *encoder; 2821 2822 encoder = &intel_encoder->base; 2823 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2824 encoder->base.id, encoder->name); 2825 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2826 struct drm_connector *connector = &intel_connector->base; 2827 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2828 connector->base.id, 2829 connector->name, 2830 drm_get_connector_status_name(connector->status)); 2831 if (connector->status == connector_status_connected) { 2832 struct drm_display_mode *mode = &crtc->mode; 2833 seq_printf(m, ", mode:\n"); 2834 intel_seq_print_mode(m, 2, mode); 2835 } else { 2836 seq_putc(m, '\n'); 2837 } 2838 } 2839 } 2840 2841 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2842 { 2843 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2844 struct drm_device *dev = &dev_priv->drm; 2845 struct drm_crtc *crtc = &intel_crtc->base; 2846 struct intel_encoder *intel_encoder; 2847 struct drm_plane_state *plane_state = crtc->primary->state; 2848 struct drm_framebuffer *fb = plane_state->fb; 2849 2850 if (fb) 2851 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2852 fb->base.id, plane_state->src_x >> 16, 2853 plane_state->src_y >> 16, fb->width, fb->height); 2854 else 2855 seq_puts(m, "\tprimary plane disabled\n"); 2856 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2857 intel_encoder_info(m, intel_crtc, intel_encoder); 2858 } 2859 2860 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 2861 { 2862 struct drm_display_mode *mode = panel->fixed_mode; 2863 2864 seq_printf(m, "\tfixed mode:\n"); 2865 intel_seq_print_mode(m, 2, mode); 2866 } 2867 2868 static void intel_dp_info(struct seq_file *m, 2869 struct intel_connector *intel_connector) 2870 { 2871 struct intel_encoder *intel_encoder = intel_connector->encoder; 2872 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2873 2874 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2875 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); 2876 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) 2877 intel_panel_info(m, &intel_connector->panel); 2878 2879 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, 2880 &intel_dp->aux); 2881 } 2882 2883 static void intel_dp_mst_info(struct seq_file *m, 2884 struct intel_connector *intel_connector) 2885 { 2886 struct intel_encoder *intel_encoder = intel_connector->encoder; 2887 struct intel_dp_mst_encoder *intel_mst = 2888 enc_to_mst(&intel_encoder->base); 2889 struct intel_digital_port *intel_dig_port = intel_mst->primary; 2890 struct intel_dp *intel_dp = &intel_dig_port->dp; 2891 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, 2892 intel_connector->port); 2893 2894 seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); 2895 } 2896 2897 static void intel_hdmi_info(struct seq_file *m, 2898 struct intel_connector *intel_connector) 2899 { 2900 struct intel_encoder *intel_encoder = intel_connector->encoder; 2901 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 2902 2903 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); 2904 } 2905 2906 static void intel_lvds_info(struct seq_file *m, 2907 struct intel_connector *intel_connector) 2908 { 2909 intel_panel_info(m, &intel_connector->panel); 2910 } 2911 2912 static void intel_connector_info(struct seq_file *m, 2913 struct drm_connector *connector) 2914 { 2915 struct intel_connector *intel_connector = to_intel_connector(connector); 2916 struct intel_encoder *intel_encoder = intel_connector->encoder; 2917 struct drm_display_mode *mode; 2918 2919 seq_printf(m, "connector %d: type %s, status: %s\n", 2920 connector->base.id, connector->name, 2921 drm_get_connector_status_name(connector->status)); 2922 if (connector->status == connector_status_connected) { 2923 seq_printf(m, "\tname: %s\n", connector->display_info.name); 2924 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 2925 connector->display_info.width_mm, 2926 connector->display_info.height_mm); 2927 seq_printf(m, "\tsubpixel order: %s\n", 2928 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 2929 seq_printf(m, "\tCEA rev: %d\n", 2930 connector->display_info.cea_rev); 2931 } 2932 2933 if (!intel_encoder) 2934 return; 2935 2936 switch (connector->connector_type) { 2937 case DRM_MODE_CONNECTOR_DisplayPort: 2938 case DRM_MODE_CONNECTOR_eDP: 2939 if (intel_encoder->type == INTEL_OUTPUT_DP_MST) 2940 intel_dp_mst_info(m, intel_connector); 2941 else 2942 intel_dp_info(m, intel_connector); 2943 break; 2944 case DRM_MODE_CONNECTOR_LVDS: 2945 if (intel_encoder->type == INTEL_OUTPUT_LVDS) 2946 intel_lvds_info(m, intel_connector); 2947 break; 2948 case DRM_MODE_CONNECTOR_HDMIA: 2949 if (intel_encoder->type == INTEL_OUTPUT_HDMI || 2950 intel_encoder->type == INTEL_OUTPUT_DDI) 2951 intel_hdmi_info(m, intel_connector); 2952 break; 2953 default: 2954 break; 2955 } 2956 2957 seq_printf(m, "\tmodes:\n"); 2958 list_for_each_entry(mode, &connector->modes, head) 2959 intel_seq_print_mode(m, 2, mode); 2960 } 2961 2962 static const char *plane_type(enum drm_plane_type type) 2963 { 2964 switch (type) { 2965 case DRM_PLANE_TYPE_OVERLAY: 2966 return "OVL"; 2967 case DRM_PLANE_TYPE_PRIMARY: 2968 return "PRI"; 2969 case DRM_PLANE_TYPE_CURSOR: 2970 return "CUR"; 2971 /* 2972 * Deliberately omitting default: to generate compiler warnings 2973 * when a new drm_plane_type gets added. 2974 */ 2975 } 2976 2977 return "unknown"; 2978 } 2979 2980 static const char *plane_rotation(unsigned int rotation) 2981 { 2982 static char buf[48]; 2983 /* 2984 * According to doc only one DRM_MODE_ROTATE_ is allowed but this 2985 * will print them all to visualize if the values are misused 2986 */ 2987 snprintf(buf, sizeof(buf), 2988 "%s%s%s%s%s%s(0x%08x)", 2989 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "", 2990 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "", 2991 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "", 2992 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "", 2993 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "", 2994 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "", 2995 rotation); 2996 2997 return buf; 2998 } 2999 3000 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3001 { 3002 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3003 struct drm_device *dev = &dev_priv->drm; 3004 struct intel_plane *intel_plane; 3005 3006 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3007 struct drm_plane_state *state; 3008 struct drm_plane *plane = &intel_plane->base; 3009 struct drm_format_name_buf format_name; 3010 3011 if (!plane->state) { 3012 seq_puts(m, "plane->state is NULL!\n"); 3013 continue; 3014 } 3015 3016 state = plane->state; 3017 3018 if (state->fb) { 3019 drm_get_format_name(state->fb->format->format, 3020 &format_name); 3021 } else { 3022 sprintf(format_name.str, "N/A"); 3023 } 3024 3025 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n", 3026 plane->base.id, 3027 plane_type(intel_plane->base.type), 3028 state->crtc_x, state->crtc_y, 3029 state->crtc_w, state->crtc_h, 3030 (state->src_x >> 16), 3031 ((state->src_x & 0xffff) * 15625) >> 10, 3032 (state->src_y >> 16), 3033 ((state->src_y & 0xffff) * 15625) >> 10, 3034 (state->src_w >> 16), 3035 ((state->src_w & 0xffff) * 15625) >> 10, 3036 (state->src_h >> 16), 3037 ((state->src_h & 0xffff) * 15625) >> 10, 3038 format_name.str, 3039 plane_rotation(state->rotation)); 3040 } 3041 } 3042 3043 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3044 { 3045 struct intel_crtc_state *pipe_config; 3046 int num_scalers = intel_crtc->num_scalers; 3047 int i; 3048 3049 pipe_config = to_intel_crtc_state(intel_crtc->base.state); 3050 3051 /* Not all platformas have a scaler */ 3052 if (num_scalers) { 3053 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", 3054 num_scalers, 3055 pipe_config->scaler_state.scaler_users, 3056 pipe_config->scaler_state.scaler_id); 3057 3058 for (i = 0; i < num_scalers; i++) { 3059 struct intel_scaler *sc = 3060 &pipe_config->scaler_state.scalers[i]; 3061 3062 seq_printf(m, ", scalers[%d]: use=%s, mode=%x", 3063 i, yesno(sc->in_use), sc->mode); 3064 } 3065 seq_puts(m, "\n"); 3066 } else { 3067 seq_puts(m, "\tNo scalers available on this platform\n"); 3068 } 3069 } 3070 3071 static int i915_display_info(struct seq_file *m, void *unused) 3072 { 3073 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3074 struct drm_device *dev = &dev_priv->drm; 3075 struct intel_crtc *crtc; 3076 struct drm_connector *connector; 3077 struct drm_connector_list_iter conn_iter; 3078 3079 intel_runtime_pm_get(dev_priv); 3080 seq_printf(m, "CRTC info\n"); 3081 seq_printf(m, "---------\n"); 3082 for_each_intel_crtc(dev, crtc) { 3083 struct intel_crtc_state *pipe_config; 3084 3085 drm_modeset_lock(&crtc->base.mutex, NULL); 3086 pipe_config = to_intel_crtc_state(crtc->base.state); 3087 3088 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n", 3089 crtc->base.base.id, pipe_name(crtc->pipe), 3090 yesno(pipe_config->base.active), 3091 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 3092 yesno(pipe_config->dither), pipe_config->pipe_bpp); 3093 3094 if (pipe_config->base.active) { 3095 struct intel_plane *cursor = 3096 to_intel_plane(crtc->base.cursor); 3097 3098 intel_crtc_info(m, crtc); 3099 3100 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n", 3101 yesno(cursor->base.state->visible), 3102 cursor->base.state->crtc_x, 3103 cursor->base.state->crtc_y, 3104 cursor->base.state->crtc_w, 3105 cursor->base.state->crtc_h, 3106 cursor->cursor.base); 3107 intel_scaler_info(m, crtc); 3108 intel_plane_info(m, crtc); 3109 } 3110 3111 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 3112 yesno(!crtc->cpu_fifo_underrun_disabled), 3113 yesno(!crtc->pch_fifo_underrun_disabled)); 3114 drm_modeset_unlock(&crtc->base.mutex); 3115 } 3116 3117 seq_printf(m, "\n"); 3118 seq_printf(m, "Connector info\n"); 3119 seq_printf(m, "--------------\n"); 3120 mutex_lock(&dev->mode_config.mutex); 3121 drm_connector_list_iter_begin(dev, &conn_iter); 3122 drm_for_each_connector_iter(connector, &conn_iter) 3123 intel_connector_info(m, connector); 3124 drm_connector_list_iter_end(&conn_iter); 3125 mutex_unlock(&dev->mode_config.mutex); 3126 3127 intel_runtime_pm_put(dev_priv); 3128 3129 return 0; 3130 } 3131 3132 static int i915_engine_info(struct seq_file *m, void *unused) 3133 { 3134 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3135 struct intel_engine_cs *engine; 3136 enum intel_engine_id id; 3137 struct drm_printer p; 3138 3139 intel_runtime_pm_get(dev_priv); 3140 3141 seq_printf(m, "GT awake? %s (epoch %u)\n", 3142 yesno(dev_priv->gt.awake), dev_priv->gt.epoch); 3143 seq_printf(m, "Global active requests: %d\n", 3144 dev_priv->gt.active_requests); 3145 seq_printf(m, "CS timestamp frequency: %u kHz\n", 3146 dev_priv->info.cs_timestamp_frequency_khz); 3147 3148 p = drm_seq_file_printer(m); 3149 for_each_engine(engine, dev_priv, id) 3150 intel_engine_dump(engine, &p, "%s\n", engine->name); 3151 3152 intel_runtime_pm_put(dev_priv); 3153 3154 return 0; 3155 } 3156 3157 static int i915_shrinker_info(struct seq_file *m, void *unused) 3158 { 3159 struct drm_i915_private *i915 = node_to_i915(m->private); 3160 3161 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks); 3162 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch); 3163 3164 return 0; 3165 } 3166 3167 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 3168 { 3169 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3170 struct drm_device *dev = &dev_priv->drm; 3171 int i; 3172 3173 drm_modeset_lock_all(dev); 3174 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3175 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 3176 3177 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); 3178 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", 3179 pll->state.crtc_mask, pll->active_mask, yesno(pll->on)); 3180 seq_printf(m, " tracked hardware state:\n"); 3181 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); 3182 seq_printf(m, " dpll_md: 0x%08x\n", 3183 pll->state.hw_state.dpll_md); 3184 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0); 3185 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1); 3186 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); 3187 } 3188 drm_modeset_unlock_all(dev); 3189 3190 return 0; 3191 } 3192 3193 static int i915_wa_registers(struct seq_file *m, void *unused) 3194 { 3195 int i; 3196 int ret; 3197 struct intel_engine_cs *engine; 3198 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3199 struct drm_device *dev = &dev_priv->drm; 3200 struct i915_workarounds *workarounds = &dev_priv->workarounds; 3201 enum intel_engine_id id; 3202 3203 ret = mutex_lock_interruptible(&dev->struct_mutex); 3204 if (ret) 3205 return ret; 3206 3207 intel_runtime_pm_get(dev_priv); 3208 3209 seq_printf(m, "Workarounds applied: %d\n", workarounds->count); 3210 for_each_engine(engine, dev_priv, id) 3211 seq_printf(m, "HW whitelist count for %s: %d\n", 3212 engine->name, workarounds->hw_whitelist_count[id]); 3213 for (i = 0; i < workarounds->count; ++i) { 3214 i915_reg_t addr; 3215 u32 mask, value, read; 3216 bool ok; 3217 3218 addr = workarounds->reg[i].addr; 3219 mask = workarounds->reg[i].mask; 3220 value = workarounds->reg[i].value; 3221 read = I915_READ(addr); 3222 ok = (value & mask) == (read & mask); 3223 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", 3224 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL"); 3225 } 3226 3227 intel_runtime_pm_put(dev_priv); 3228 mutex_unlock(&dev->struct_mutex); 3229 3230 return 0; 3231 } 3232 3233 static int i915_ipc_status_show(struct seq_file *m, void *data) 3234 { 3235 struct drm_i915_private *dev_priv = m->private; 3236 3237 seq_printf(m, "Isochronous Priority Control: %s\n", 3238 yesno(dev_priv->ipc_enabled)); 3239 return 0; 3240 } 3241 3242 static int i915_ipc_status_open(struct inode *inode, struct file *file) 3243 { 3244 struct drm_i915_private *dev_priv = inode->i_private; 3245 3246 if (!HAS_IPC(dev_priv)) 3247 return -ENODEV; 3248 3249 return single_open(file, i915_ipc_status_show, dev_priv); 3250 } 3251 3252 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, 3253 size_t len, loff_t *offp) 3254 { 3255 struct seq_file *m = file->private_data; 3256 struct drm_i915_private *dev_priv = m->private; 3257 int ret; 3258 bool enable; 3259 3260 ret = kstrtobool_from_user(ubuf, len, &enable); 3261 if (ret < 0) 3262 return ret; 3263 3264 intel_runtime_pm_get(dev_priv); 3265 if (!dev_priv->ipc_enabled && enable) 3266 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n"); 3267 dev_priv->wm.distrust_bios_wm = true; 3268 dev_priv->ipc_enabled = enable; 3269 intel_enable_ipc(dev_priv); 3270 intel_runtime_pm_put(dev_priv); 3271 3272 return len; 3273 } 3274 3275 static const struct file_operations i915_ipc_status_fops = { 3276 .owner = THIS_MODULE, 3277 .open = i915_ipc_status_open, 3278 .read = seq_read, 3279 .llseek = seq_lseek, 3280 .release = single_release, 3281 .write = i915_ipc_status_write 3282 }; 3283 3284 static int i915_ddb_info(struct seq_file *m, void *unused) 3285 { 3286 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3287 struct drm_device *dev = &dev_priv->drm; 3288 struct skl_ddb_allocation *ddb; 3289 struct skl_ddb_entry *entry; 3290 enum pipe pipe; 3291 int plane; 3292 3293 if (INTEL_GEN(dev_priv) < 9) 3294 return -ENODEV; 3295 3296 drm_modeset_lock_all(dev); 3297 3298 ddb = &dev_priv->wm.skl_hw.ddb; 3299 3300 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 3301 3302 for_each_pipe(dev_priv, pipe) { 3303 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 3304 3305 for_each_universal_plane(dev_priv, pipe, plane) { 3306 entry = &ddb->plane[pipe][plane]; 3307 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, 3308 entry->start, entry->end, 3309 skl_ddb_entry_size(entry)); 3310 } 3311 3312 entry = &ddb->plane[pipe][PLANE_CURSOR]; 3313 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 3314 entry->end, skl_ddb_entry_size(entry)); 3315 } 3316 3317 drm_modeset_unlock_all(dev); 3318 3319 return 0; 3320 } 3321 3322 static void drrs_status_per_crtc(struct seq_file *m, 3323 struct drm_device *dev, 3324 struct intel_crtc *intel_crtc) 3325 { 3326 struct drm_i915_private *dev_priv = to_i915(dev); 3327 struct i915_drrs *drrs = &dev_priv->drrs; 3328 int vrefresh = 0; 3329 struct drm_connector *connector; 3330 struct drm_connector_list_iter conn_iter; 3331 3332 drm_connector_list_iter_begin(dev, &conn_iter); 3333 drm_for_each_connector_iter(connector, &conn_iter) { 3334 if (connector->state->crtc != &intel_crtc->base) 3335 continue; 3336 3337 seq_printf(m, "%s:\n", connector->name); 3338 } 3339 drm_connector_list_iter_end(&conn_iter); 3340 3341 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) 3342 seq_puts(m, "\tVBT: DRRS_type: Static"); 3343 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT) 3344 seq_puts(m, "\tVBT: DRRS_type: Seamless"); 3345 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED) 3346 seq_puts(m, "\tVBT: DRRS_type: None"); 3347 else 3348 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value"); 3349 3350 seq_puts(m, "\n\n"); 3351 3352 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { 3353 struct intel_panel *panel; 3354 3355 mutex_lock(&drrs->mutex); 3356 /* DRRS Supported */ 3357 seq_puts(m, "\tDRRS Supported: Yes\n"); 3358 3359 /* disable_drrs() will make drrs->dp NULL */ 3360 if (!drrs->dp) { 3361 seq_puts(m, "Idleness DRRS: Disabled\n"); 3362 if (dev_priv->psr.enabled) 3363 seq_puts(m, 3364 "\tAs PSR is enabled, DRRS is not enabled\n"); 3365 mutex_unlock(&drrs->mutex); 3366 return; 3367 } 3368 3369 panel = &drrs->dp->attached_connector->panel; 3370 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", 3371 drrs->busy_frontbuffer_bits); 3372 3373 seq_puts(m, "\n\t\t"); 3374 if (drrs->refresh_rate_type == DRRS_HIGH_RR) { 3375 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); 3376 vrefresh = panel->fixed_mode->vrefresh; 3377 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { 3378 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); 3379 vrefresh = panel->downclock_mode->vrefresh; 3380 } else { 3381 seq_printf(m, "DRRS_State: Unknown(%d)\n", 3382 drrs->refresh_rate_type); 3383 mutex_unlock(&drrs->mutex); 3384 return; 3385 } 3386 seq_printf(m, "\t\tVrefresh: %d", vrefresh); 3387 3388 seq_puts(m, "\n\t\t"); 3389 mutex_unlock(&drrs->mutex); 3390 } else { 3391 /* DRRS not supported. Print the VBT parameter*/ 3392 seq_puts(m, "\tDRRS Supported : No"); 3393 } 3394 seq_puts(m, "\n"); 3395 } 3396 3397 static int i915_drrs_status(struct seq_file *m, void *unused) 3398 { 3399 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3400 struct drm_device *dev = &dev_priv->drm; 3401 struct intel_crtc *intel_crtc; 3402 int active_crtc_cnt = 0; 3403 3404 drm_modeset_lock_all(dev); 3405 for_each_intel_crtc(dev, intel_crtc) { 3406 if (intel_crtc->base.state->active) { 3407 active_crtc_cnt++; 3408 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 3409 3410 drrs_status_per_crtc(m, dev, intel_crtc); 3411 } 3412 } 3413 drm_modeset_unlock_all(dev); 3414 3415 if (!active_crtc_cnt) 3416 seq_puts(m, "No active crtc found\n"); 3417 3418 return 0; 3419 } 3420 3421 static int i915_dp_mst_info(struct seq_file *m, void *unused) 3422 { 3423 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3424 struct drm_device *dev = &dev_priv->drm; 3425 struct intel_encoder *intel_encoder; 3426 struct intel_digital_port *intel_dig_port; 3427 struct drm_connector *connector; 3428 struct drm_connector_list_iter conn_iter; 3429 3430 drm_connector_list_iter_begin(dev, &conn_iter); 3431 drm_for_each_connector_iter(connector, &conn_iter) { 3432 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 3433 continue; 3434 3435 intel_encoder = intel_attached_encoder(connector); 3436 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) 3437 continue; 3438 3439 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 3440 if (!intel_dig_port->dp.can_mst) 3441 continue; 3442 3443 seq_printf(m, "MST Source Port %c\n", 3444 port_name(intel_dig_port->base.port)); 3445 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 3446 } 3447 drm_connector_list_iter_end(&conn_iter); 3448 3449 return 0; 3450 } 3451 3452 static ssize_t i915_displayport_test_active_write(struct file *file, 3453 const char __user *ubuf, 3454 size_t len, loff_t *offp) 3455 { 3456 char *input_buffer; 3457 int status = 0; 3458 struct drm_device *dev; 3459 struct drm_connector *connector; 3460 struct drm_connector_list_iter conn_iter; 3461 struct intel_dp *intel_dp; 3462 int val = 0; 3463 3464 dev = ((struct seq_file *)file->private_data)->private; 3465 3466 if (len == 0) 3467 return 0; 3468 3469 input_buffer = memdup_user_nul(ubuf, len); 3470 if (IS_ERR(input_buffer)) 3471 return PTR_ERR(input_buffer); 3472 3473 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); 3474 3475 drm_connector_list_iter_begin(dev, &conn_iter); 3476 drm_for_each_connector_iter(connector, &conn_iter) { 3477 struct intel_encoder *encoder; 3478 3479 if (connector->connector_type != 3480 DRM_MODE_CONNECTOR_DisplayPort) 3481 continue; 3482 3483 encoder = to_intel_encoder(connector->encoder); 3484 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3485 continue; 3486 3487 if (encoder && connector->status == connector_status_connected) { 3488 intel_dp = enc_to_intel_dp(&encoder->base); 3489 status = kstrtoint(input_buffer, 10, &val); 3490 if (status < 0) 3491 break; 3492 DRM_DEBUG_DRIVER("Got %d for test active\n", val); 3493 /* To prevent erroneous activation of the compliance 3494 * testing code, only accept an actual value of 1 here 3495 */ 3496 if (val == 1) 3497 intel_dp->compliance.test_active = 1; 3498 else 3499 intel_dp->compliance.test_active = 0; 3500 } 3501 } 3502 drm_connector_list_iter_end(&conn_iter); 3503 kfree(input_buffer); 3504 if (status < 0) 3505 return status; 3506 3507 *offp += len; 3508 return len; 3509 } 3510 3511 static int i915_displayport_test_active_show(struct seq_file *m, void *data) 3512 { 3513 struct drm_device *dev = m->private; 3514 struct drm_connector *connector; 3515 struct drm_connector_list_iter conn_iter; 3516 struct intel_dp *intel_dp; 3517 3518 drm_connector_list_iter_begin(dev, &conn_iter); 3519 drm_for_each_connector_iter(connector, &conn_iter) { 3520 struct intel_encoder *encoder; 3521 3522 if (connector->connector_type != 3523 DRM_MODE_CONNECTOR_DisplayPort) 3524 continue; 3525 3526 encoder = to_intel_encoder(connector->encoder); 3527 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3528 continue; 3529 3530 if (encoder && connector->status == connector_status_connected) { 3531 intel_dp = enc_to_intel_dp(&encoder->base); 3532 if (intel_dp->compliance.test_active) 3533 seq_puts(m, "1"); 3534 else 3535 seq_puts(m, "0"); 3536 } else 3537 seq_puts(m, "0"); 3538 } 3539 drm_connector_list_iter_end(&conn_iter); 3540 3541 return 0; 3542 } 3543 3544 static int i915_displayport_test_active_open(struct inode *inode, 3545 struct file *file) 3546 { 3547 struct drm_i915_private *dev_priv = inode->i_private; 3548 3549 return single_open(file, i915_displayport_test_active_show, 3550 &dev_priv->drm); 3551 } 3552 3553 static const struct file_operations i915_displayport_test_active_fops = { 3554 .owner = THIS_MODULE, 3555 .open = i915_displayport_test_active_open, 3556 .read = seq_read, 3557 .llseek = seq_lseek, 3558 .release = single_release, 3559 .write = i915_displayport_test_active_write 3560 }; 3561 3562 static int i915_displayport_test_data_show(struct seq_file *m, void *data) 3563 { 3564 struct drm_device *dev = m->private; 3565 struct drm_connector *connector; 3566 struct drm_connector_list_iter conn_iter; 3567 struct intel_dp *intel_dp; 3568 3569 drm_connector_list_iter_begin(dev, &conn_iter); 3570 drm_for_each_connector_iter(connector, &conn_iter) { 3571 struct intel_encoder *encoder; 3572 3573 if (connector->connector_type != 3574 DRM_MODE_CONNECTOR_DisplayPort) 3575 continue; 3576 3577 encoder = to_intel_encoder(connector->encoder); 3578 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3579 continue; 3580 3581 if (encoder && connector->status == connector_status_connected) { 3582 intel_dp = enc_to_intel_dp(&encoder->base); 3583 if (intel_dp->compliance.test_type == 3584 DP_TEST_LINK_EDID_READ) 3585 seq_printf(m, "%lx", 3586 intel_dp->compliance.test_data.edid); 3587 else if (intel_dp->compliance.test_type == 3588 DP_TEST_LINK_VIDEO_PATTERN) { 3589 seq_printf(m, "hdisplay: %d\n", 3590 intel_dp->compliance.test_data.hdisplay); 3591 seq_printf(m, "vdisplay: %d\n", 3592 intel_dp->compliance.test_data.vdisplay); 3593 seq_printf(m, "bpc: %u\n", 3594 intel_dp->compliance.test_data.bpc); 3595 } 3596 } else 3597 seq_puts(m, "0"); 3598 } 3599 drm_connector_list_iter_end(&conn_iter); 3600 3601 return 0; 3602 } 3603 static int i915_displayport_test_data_open(struct inode *inode, 3604 struct file *file) 3605 { 3606 struct drm_i915_private *dev_priv = inode->i_private; 3607 3608 return single_open(file, i915_displayport_test_data_show, 3609 &dev_priv->drm); 3610 } 3611 3612 static const struct file_operations i915_displayport_test_data_fops = { 3613 .owner = THIS_MODULE, 3614 .open = i915_displayport_test_data_open, 3615 .read = seq_read, 3616 .llseek = seq_lseek, 3617 .release = single_release 3618 }; 3619 3620 static int i915_displayport_test_type_show(struct seq_file *m, void *data) 3621 { 3622 struct drm_device *dev = m->private; 3623 struct drm_connector *connector; 3624 struct drm_connector_list_iter conn_iter; 3625 struct intel_dp *intel_dp; 3626 3627 drm_connector_list_iter_begin(dev, &conn_iter); 3628 drm_for_each_connector_iter(connector, &conn_iter) { 3629 struct intel_encoder *encoder; 3630 3631 if (connector->connector_type != 3632 DRM_MODE_CONNECTOR_DisplayPort) 3633 continue; 3634 3635 encoder = to_intel_encoder(connector->encoder); 3636 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3637 continue; 3638 3639 if (encoder && connector->status == connector_status_connected) { 3640 intel_dp = enc_to_intel_dp(&encoder->base); 3641 seq_printf(m, "%02lx", intel_dp->compliance.test_type); 3642 } else 3643 seq_puts(m, "0"); 3644 } 3645 drm_connector_list_iter_end(&conn_iter); 3646 3647 return 0; 3648 } 3649 3650 static int i915_displayport_test_type_open(struct inode *inode, 3651 struct file *file) 3652 { 3653 struct drm_i915_private *dev_priv = inode->i_private; 3654 3655 return single_open(file, i915_displayport_test_type_show, 3656 &dev_priv->drm); 3657 } 3658 3659 static const struct file_operations i915_displayport_test_type_fops = { 3660 .owner = THIS_MODULE, 3661 .open = i915_displayport_test_type_open, 3662 .read = seq_read, 3663 .llseek = seq_lseek, 3664 .release = single_release 3665 }; 3666 3667 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) 3668 { 3669 struct drm_i915_private *dev_priv = m->private; 3670 struct drm_device *dev = &dev_priv->drm; 3671 int level; 3672 int num_levels; 3673 3674 if (IS_CHERRYVIEW(dev_priv)) 3675 num_levels = 3; 3676 else if (IS_VALLEYVIEW(dev_priv)) 3677 num_levels = 1; 3678 else if (IS_G4X(dev_priv)) 3679 num_levels = 3; 3680 else 3681 num_levels = ilk_wm_max_level(dev_priv) + 1; 3682 3683 drm_modeset_lock_all(dev); 3684 3685 for (level = 0; level < num_levels; level++) { 3686 unsigned int latency = wm[level]; 3687 3688 /* 3689 * - WM1+ latency values in 0.5us units 3690 * - latencies are in us on gen9/vlv/chv 3691 */ 3692 if (INTEL_GEN(dev_priv) >= 9 || 3693 IS_VALLEYVIEW(dev_priv) || 3694 IS_CHERRYVIEW(dev_priv) || 3695 IS_G4X(dev_priv)) 3696 latency *= 10; 3697 else if (level > 0) 3698 latency *= 5; 3699 3700 seq_printf(m, "WM%d %u (%u.%u usec)\n", 3701 level, wm[level], latency / 10, latency % 10); 3702 } 3703 3704 drm_modeset_unlock_all(dev); 3705 } 3706 3707 static int pri_wm_latency_show(struct seq_file *m, void *data) 3708 { 3709 struct drm_i915_private *dev_priv = m->private; 3710 const uint16_t *latencies; 3711 3712 if (INTEL_GEN(dev_priv) >= 9) 3713 latencies = dev_priv->wm.skl_latency; 3714 else 3715 latencies = dev_priv->wm.pri_latency; 3716 3717 wm_latency_show(m, latencies); 3718 3719 return 0; 3720 } 3721 3722 static int spr_wm_latency_show(struct seq_file *m, void *data) 3723 { 3724 struct drm_i915_private *dev_priv = m->private; 3725 const uint16_t *latencies; 3726 3727 if (INTEL_GEN(dev_priv) >= 9) 3728 latencies = dev_priv->wm.skl_latency; 3729 else 3730 latencies = dev_priv->wm.spr_latency; 3731 3732 wm_latency_show(m, latencies); 3733 3734 return 0; 3735 } 3736 3737 static int cur_wm_latency_show(struct seq_file *m, void *data) 3738 { 3739 struct drm_i915_private *dev_priv = m->private; 3740 const uint16_t *latencies; 3741 3742 if (INTEL_GEN(dev_priv) >= 9) 3743 latencies = dev_priv->wm.skl_latency; 3744 else 3745 latencies = dev_priv->wm.cur_latency; 3746 3747 wm_latency_show(m, latencies); 3748 3749 return 0; 3750 } 3751 3752 static int pri_wm_latency_open(struct inode *inode, struct file *file) 3753 { 3754 struct drm_i915_private *dev_priv = inode->i_private; 3755 3756 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 3757 return -ENODEV; 3758 3759 return single_open(file, pri_wm_latency_show, dev_priv); 3760 } 3761 3762 static int spr_wm_latency_open(struct inode *inode, struct file *file) 3763 { 3764 struct drm_i915_private *dev_priv = inode->i_private; 3765 3766 if (HAS_GMCH_DISPLAY(dev_priv)) 3767 return -ENODEV; 3768 3769 return single_open(file, spr_wm_latency_show, dev_priv); 3770 } 3771 3772 static int cur_wm_latency_open(struct inode *inode, struct file *file) 3773 { 3774 struct drm_i915_private *dev_priv = inode->i_private; 3775 3776 if (HAS_GMCH_DISPLAY(dev_priv)) 3777 return -ENODEV; 3778 3779 return single_open(file, cur_wm_latency_show, dev_priv); 3780 } 3781 3782 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 3783 size_t len, loff_t *offp, uint16_t wm[8]) 3784 { 3785 struct seq_file *m = file->private_data; 3786 struct drm_i915_private *dev_priv = m->private; 3787 struct drm_device *dev = &dev_priv->drm; 3788 uint16_t new[8] = { 0 }; 3789 int num_levels; 3790 int level; 3791 int ret; 3792 char tmp[32]; 3793 3794 if (IS_CHERRYVIEW(dev_priv)) 3795 num_levels = 3; 3796 else if (IS_VALLEYVIEW(dev_priv)) 3797 num_levels = 1; 3798 else if (IS_G4X(dev_priv)) 3799 num_levels = 3; 3800 else 3801 num_levels = ilk_wm_max_level(dev_priv) + 1; 3802 3803 if (len >= sizeof(tmp)) 3804 return -EINVAL; 3805 3806 if (copy_from_user(tmp, ubuf, len)) 3807 return -EFAULT; 3808 3809 tmp[len] = '\0'; 3810 3811 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 3812 &new[0], &new[1], &new[2], &new[3], 3813 &new[4], &new[5], &new[6], &new[7]); 3814 if (ret != num_levels) 3815 return -EINVAL; 3816 3817 drm_modeset_lock_all(dev); 3818 3819 for (level = 0; level < num_levels; level++) 3820 wm[level] = new[level]; 3821 3822 drm_modeset_unlock_all(dev); 3823 3824 return len; 3825 } 3826 3827 3828 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 3829 size_t len, loff_t *offp) 3830 { 3831 struct seq_file *m = file->private_data; 3832 struct drm_i915_private *dev_priv = m->private; 3833 uint16_t *latencies; 3834 3835 if (INTEL_GEN(dev_priv) >= 9) 3836 latencies = dev_priv->wm.skl_latency; 3837 else 3838 latencies = dev_priv->wm.pri_latency; 3839 3840 return wm_latency_write(file, ubuf, len, offp, latencies); 3841 } 3842 3843 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 3844 size_t len, loff_t *offp) 3845 { 3846 struct seq_file *m = file->private_data; 3847 struct drm_i915_private *dev_priv = m->private; 3848 uint16_t *latencies; 3849 3850 if (INTEL_GEN(dev_priv) >= 9) 3851 latencies = dev_priv->wm.skl_latency; 3852 else 3853 latencies = dev_priv->wm.spr_latency; 3854 3855 return wm_latency_write(file, ubuf, len, offp, latencies); 3856 } 3857 3858 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 3859 size_t len, loff_t *offp) 3860 { 3861 struct seq_file *m = file->private_data; 3862 struct drm_i915_private *dev_priv = m->private; 3863 uint16_t *latencies; 3864 3865 if (INTEL_GEN(dev_priv) >= 9) 3866 latencies = dev_priv->wm.skl_latency; 3867 else 3868 latencies = dev_priv->wm.cur_latency; 3869 3870 return wm_latency_write(file, ubuf, len, offp, latencies); 3871 } 3872 3873 static const struct file_operations i915_pri_wm_latency_fops = { 3874 .owner = THIS_MODULE, 3875 .open = pri_wm_latency_open, 3876 .read = seq_read, 3877 .llseek = seq_lseek, 3878 .release = single_release, 3879 .write = pri_wm_latency_write 3880 }; 3881 3882 static const struct file_operations i915_spr_wm_latency_fops = { 3883 .owner = THIS_MODULE, 3884 .open = spr_wm_latency_open, 3885 .read = seq_read, 3886 .llseek = seq_lseek, 3887 .release = single_release, 3888 .write = spr_wm_latency_write 3889 }; 3890 3891 static const struct file_operations i915_cur_wm_latency_fops = { 3892 .owner = THIS_MODULE, 3893 .open = cur_wm_latency_open, 3894 .read = seq_read, 3895 .llseek = seq_lseek, 3896 .release = single_release, 3897 .write = cur_wm_latency_write 3898 }; 3899 3900 static int 3901 i915_wedged_get(void *data, u64 *val) 3902 { 3903 struct drm_i915_private *dev_priv = data; 3904 3905 *val = i915_terminally_wedged(&dev_priv->gpu_error); 3906 3907 return 0; 3908 } 3909 3910 static int 3911 i915_wedged_set(void *data, u64 val) 3912 { 3913 struct drm_i915_private *i915 = data; 3914 struct intel_engine_cs *engine; 3915 unsigned int tmp; 3916 3917 /* 3918 * There is no safeguard against this debugfs entry colliding 3919 * with the hangcheck calling same i915_handle_error() in 3920 * parallel, causing an explosion. For now we assume that the 3921 * test harness is responsible enough not to inject gpu hangs 3922 * while it is writing to 'i915_wedged' 3923 */ 3924 3925 if (i915_reset_backoff(&i915->gpu_error)) 3926 return -EAGAIN; 3927 3928 for_each_engine_masked(engine, i915, val, tmp) { 3929 engine->hangcheck.seqno = intel_engine_get_seqno(engine); 3930 engine->hangcheck.stalled = true; 3931 } 3932 3933 i915_handle_error(i915, val, "Manually setting wedged to %llu", val); 3934 3935 wait_on_bit(&i915->gpu_error.flags, 3936 I915_RESET_HANDOFF, 3937 TASK_UNINTERRUPTIBLE); 3938 3939 return 0; 3940 } 3941 3942 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 3943 i915_wedged_get, i915_wedged_set, 3944 "%llu\n"); 3945 3946 static int 3947 fault_irq_set(struct drm_i915_private *i915, 3948 unsigned long *irq, 3949 unsigned long val) 3950 { 3951 int err; 3952 3953 err = mutex_lock_interruptible(&i915->drm.struct_mutex); 3954 if (err) 3955 return err; 3956 3957 err = i915_gem_wait_for_idle(i915, 3958 I915_WAIT_LOCKED | 3959 I915_WAIT_INTERRUPTIBLE); 3960 if (err) 3961 goto err_unlock; 3962 3963 *irq = val; 3964 mutex_unlock(&i915->drm.struct_mutex); 3965 3966 /* Flush idle worker to disarm irq */ 3967 drain_delayed_work(&i915->gt.idle_work); 3968 3969 return 0; 3970 3971 err_unlock: 3972 mutex_unlock(&i915->drm.struct_mutex); 3973 return err; 3974 } 3975 3976 static int 3977 i915_ring_missed_irq_get(void *data, u64 *val) 3978 { 3979 struct drm_i915_private *dev_priv = data; 3980 3981 *val = dev_priv->gpu_error.missed_irq_rings; 3982 return 0; 3983 } 3984 3985 static int 3986 i915_ring_missed_irq_set(void *data, u64 val) 3987 { 3988 struct drm_i915_private *i915 = data; 3989 3990 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val); 3991 } 3992 3993 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 3994 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 3995 "0x%08llx\n"); 3996 3997 static int 3998 i915_ring_test_irq_get(void *data, u64 *val) 3999 { 4000 struct drm_i915_private *dev_priv = data; 4001 4002 *val = dev_priv->gpu_error.test_irq_rings; 4003 4004 return 0; 4005 } 4006 4007 static int 4008 i915_ring_test_irq_set(void *data, u64 val) 4009 { 4010 struct drm_i915_private *i915 = data; 4011 4012 val &= INTEL_INFO(i915)->ring_mask; 4013 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 4014 4015 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val); 4016 } 4017 4018 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 4019 i915_ring_test_irq_get, i915_ring_test_irq_set, 4020 "0x%08llx\n"); 4021 4022 #define DROP_UNBOUND BIT(0) 4023 #define DROP_BOUND BIT(1) 4024 #define DROP_RETIRE BIT(2) 4025 #define DROP_ACTIVE BIT(3) 4026 #define DROP_FREED BIT(4) 4027 #define DROP_SHRINK_ALL BIT(5) 4028 #define DROP_IDLE BIT(6) 4029 #define DROP_ALL (DROP_UNBOUND | \ 4030 DROP_BOUND | \ 4031 DROP_RETIRE | \ 4032 DROP_ACTIVE | \ 4033 DROP_FREED | \ 4034 DROP_SHRINK_ALL |\ 4035 DROP_IDLE) 4036 static int 4037 i915_drop_caches_get(void *data, u64 *val) 4038 { 4039 *val = DROP_ALL; 4040 4041 return 0; 4042 } 4043 4044 static int 4045 i915_drop_caches_set(void *data, u64 val) 4046 { 4047 struct drm_i915_private *dev_priv = data; 4048 struct drm_device *dev = &dev_priv->drm; 4049 int ret = 0; 4050 4051 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", 4052 val, val & DROP_ALL); 4053 4054 /* No need to check and wait for gpu resets, only libdrm auto-restarts 4055 * on ioctls on -EAGAIN. */ 4056 if (val & (DROP_ACTIVE | DROP_RETIRE)) { 4057 ret = mutex_lock_interruptible(&dev->struct_mutex); 4058 if (ret) 4059 return ret; 4060 4061 if (val & DROP_ACTIVE) 4062 ret = i915_gem_wait_for_idle(dev_priv, 4063 I915_WAIT_INTERRUPTIBLE | 4064 I915_WAIT_LOCKED); 4065 4066 if (val & DROP_RETIRE) 4067 i915_gem_retire_requests(dev_priv); 4068 4069 mutex_unlock(&dev->struct_mutex); 4070 } 4071 4072 fs_reclaim_acquire(GFP_KERNEL); 4073 if (val & DROP_BOUND) 4074 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND); 4075 4076 if (val & DROP_UNBOUND) 4077 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND); 4078 4079 if (val & DROP_SHRINK_ALL) 4080 i915_gem_shrink_all(dev_priv); 4081 fs_reclaim_release(GFP_KERNEL); 4082 4083 if (val & DROP_IDLE) 4084 drain_delayed_work(&dev_priv->gt.idle_work); 4085 4086 if (val & DROP_FREED) { 4087 synchronize_rcu(); 4088 i915_gem_drain_freed_objects(dev_priv); 4089 } 4090 4091 return ret; 4092 } 4093 4094 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 4095 i915_drop_caches_get, i915_drop_caches_set, 4096 "0x%08llx\n"); 4097 4098 static int 4099 i915_max_freq_get(void *data, u64 *val) 4100 { 4101 struct drm_i915_private *dev_priv = data; 4102 4103 if (INTEL_GEN(dev_priv) < 6) 4104 return -ENODEV; 4105 4106 *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.max_freq_softlimit); 4107 return 0; 4108 } 4109 4110 static int 4111 i915_max_freq_set(void *data, u64 val) 4112 { 4113 struct drm_i915_private *dev_priv = data; 4114 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4115 u32 hw_max, hw_min; 4116 int ret; 4117 4118 if (INTEL_GEN(dev_priv) < 6) 4119 return -ENODEV; 4120 4121 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 4122 4123 ret = mutex_lock_interruptible(&dev_priv->pcu_lock); 4124 if (ret) 4125 return ret; 4126 4127 /* 4128 * Turbo will still be enabled, but won't go above the set value. 4129 */ 4130 val = intel_freq_opcode(dev_priv, val); 4131 4132 hw_max = rps->max_freq; 4133 hw_min = rps->min_freq; 4134 4135 if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) { 4136 mutex_unlock(&dev_priv->pcu_lock); 4137 return -EINVAL; 4138 } 4139 4140 rps->max_freq_softlimit = val; 4141 4142 if (intel_set_rps(dev_priv, val)) 4143 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); 4144 4145 mutex_unlock(&dev_priv->pcu_lock); 4146 4147 return 0; 4148 } 4149 4150 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 4151 i915_max_freq_get, i915_max_freq_set, 4152 "%llu\n"); 4153 4154 static int 4155 i915_min_freq_get(void *data, u64 *val) 4156 { 4157 struct drm_i915_private *dev_priv = data; 4158 4159 if (INTEL_GEN(dev_priv) < 6) 4160 return -ENODEV; 4161 4162 *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.min_freq_softlimit); 4163 return 0; 4164 } 4165 4166 static int 4167 i915_min_freq_set(void *data, u64 val) 4168 { 4169 struct drm_i915_private *dev_priv = data; 4170 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4171 u32 hw_max, hw_min; 4172 int ret; 4173 4174 if (INTEL_GEN(dev_priv) < 6) 4175 return -ENODEV; 4176 4177 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 4178 4179 ret = mutex_lock_interruptible(&dev_priv->pcu_lock); 4180 if (ret) 4181 return ret; 4182 4183 /* 4184 * Turbo will still be enabled, but won't go below the set value. 4185 */ 4186 val = intel_freq_opcode(dev_priv, val); 4187 4188 hw_max = rps->max_freq; 4189 hw_min = rps->min_freq; 4190 4191 if (val < hw_min || 4192 val > hw_max || val > rps->max_freq_softlimit) { 4193 mutex_unlock(&dev_priv->pcu_lock); 4194 return -EINVAL; 4195 } 4196 4197 rps->min_freq_softlimit = val; 4198 4199 if (intel_set_rps(dev_priv, val)) 4200 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); 4201 4202 mutex_unlock(&dev_priv->pcu_lock); 4203 4204 return 0; 4205 } 4206 4207 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 4208 i915_min_freq_get, i915_min_freq_set, 4209 "%llu\n"); 4210 4211 static int 4212 i915_cache_sharing_get(void *data, u64 *val) 4213 { 4214 struct drm_i915_private *dev_priv = data; 4215 u32 snpcr; 4216 4217 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) 4218 return -ENODEV; 4219 4220 intel_runtime_pm_get(dev_priv); 4221 4222 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4223 4224 intel_runtime_pm_put(dev_priv); 4225 4226 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 4227 4228 return 0; 4229 } 4230 4231 static int 4232 i915_cache_sharing_set(void *data, u64 val) 4233 { 4234 struct drm_i915_private *dev_priv = data; 4235 u32 snpcr; 4236 4237 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) 4238 return -ENODEV; 4239 4240 if (val > 3) 4241 return -EINVAL; 4242 4243 intel_runtime_pm_get(dev_priv); 4244 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 4245 4246 /* Update the cache sharing policy here as well */ 4247 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4248 snpcr &= ~GEN6_MBC_SNPCR_MASK; 4249 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 4250 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 4251 4252 intel_runtime_pm_put(dev_priv); 4253 return 0; 4254 } 4255 4256 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 4257 i915_cache_sharing_get, i915_cache_sharing_set, 4258 "%llu\n"); 4259 4260 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv, 4261 struct sseu_dev_info *sseu) 4262 { 4263 int ss_max = 2; 4264 int ss; 4265 u32 sig1[ss_max], sig2[ss_max]; 4266 4267 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); 4268 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); 4269 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); 4270 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); 4271 4272 for (ss = 0; ss < ss_max; ss++) { 4273 unsigned int eu_cnt; 4274 4275 if (sig1[ss] & CHV_SS_PG_ENABLE) 4276 /* skip disabled subslice */ 4277 continue; 4278 4279 sseu->slice_mask = BIT(0); 4280 sseu->subslice_mask |= BIT(ss); 4281 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + 4282 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + 4283 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + 4284 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); 4285 sseu->eu_total += eu_cnt; 4286 sseu->eu_per_subslice = max_t(unsigned int, 4287 sseu->eu_per_subslice, eu_cnt); 4288 } 4289 } 4290 4291 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, 4292 struct sseu_dev_info *sseu) 4293 { 4294 const struct intel_device_info *info = INTEL_INFO(dev_priv); 4295 int s_max = 6, ss_max = 4; 4296 int s, ss; 4297 u32 s_reg[s_max], eu_reg[2 * s_max], eu_mask[2]; 4298 4299 for (s = 0; s < s_max; s++) { 4300 /* 4301 * FIXME: Valid SS Mask respects the spec and read 4302 * only valid bits for those registers, excluding reserverd 4303 * although this seems wrong because it would leave many 4304 * subslices without ACK. 4305 */ 4306 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) & 4307 GEN10_PGCTL_VALID_SS_MASK(s); 4308 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s)); 4309 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s)); 4310 } 4311 4312 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | 4313 GEN9_PGCTL_SSA_EU19_ACK | 4314 GEN9_PGCTL_SSA_EU210_ACK | 4315 GEN9_PGCTL_SSA_EU311_ACK; 4316 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | 4317 GEN9_PGCTL_SSB_EU19_ACK | 4318 GEN9_PGCTL_SSB_EU210_ACK | 4319 GEN9_PGCTL_SSB_EU311_ACK; 4320 4321 for (s = 0; s < s_max; s++) { 4322 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) 4323 /* skip disabled slice */ 4324 continue; 4325 4326 sseu->slice_mask |= BIT(s); 4327 sseu->subslice_mask = info->sseu.subslice_mask; 4328 4329 for (ss = 0; ss < ss_max; ss++) { 4330 unsigned int eu_cnt; 4331 4332 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) 4333 /* skip disabled subslice */ 4334 continue; 4335 4336 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] & 4337 eu_mask[ss % 2]); 4338 sseu->eu_total += eu_cnt; 4339 sseu->eu_per_subslice = max_t(unsigned int, 4340 sseu->eu_per_subslice, 4341 eu_cnt); 4342 } 4343 } 4344 } 4345 4346 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, 4347 struct sseu_dev_info *sseu) 4348 { 4349 int s_max = 3, ss_max = 4; 4350 int s, ss; 4351 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; 4352 4353 /* BXT has a single slice and at most 3 subslices. */ 4354 if (IS_GEN9_LP(dev_priv)) { 4355 s_max = 1; 4356 ss_max = 3; 4357 } 4358 4359 for (s = 0; s < s_max; s++) { 4360 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s)); 4361 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s)); 4362 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s)); 4363 } 4364 4365 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | 4366 GEN9_PGCTL_SSA_EU19_ACK | 4367 GEN9_PGCTL_SSA_EU210_ACK | 4368 GEN9_PGCTL_SSA_EU311_ACK; 4369 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | 4370 GEN9_PGCTL_SSB_EU19_ACK | 4371 GEN9_PGCTL_SSB_EU210_ACK | 4372 GEN9_PGCTL_SSB_EU311_ACK; 4373 4374 for (s = 0; s < s_max; s++) { 4375 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) 4376 /* skip disabled slice */ 4377 continue; 4378 4379 sseu->slice_mask |= BIT(s); 4380 4381 if (IS_GEN9_BC(dev_priv)) 4382 sseu->subslice_mask = 4383 INTEL_INFO(dev_priv)->sseu.subslice_mask; 4384 4385 for (ss = 0; ss < ss_max; ss++) { 4386 unsigned int eu_cnt; 4387 4388 if (IS_GEN9_LP(dev_priv)) { 4389 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) 4390 /* skip disabled subslice */ 4391 continue; 4392 4393 sseu->subslice_mask |= BIT(ss); 4394 } 4395 4396 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & 4397 eu_mask[ss%2]); 4398 sseu->eu_total += eu_cnt; 4399 sseu->eu_per_subslice = max_t(unsigned int, 4400 sseu->eu_per_subslice, 4401 eu_cnt); 4402 } 4403 } 4404 } 4405 4406 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, 4407 struct sseu_dev_info *sseu) 4408 { 4409 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); 4410 int s; 4411 4412 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK; 4413 4414 if (sseu->slice_mask) { 4415 sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask; 4416 sseu->eu_per_subslice = 4417 INTEL_INFO(dev_priv)->sseu.eu_per_subslice; 4418 sseu->eu_total = sseu->eu_per_subslice * 4419 sseu_subslice_total(sseu); 4420 4421 /* subtract fused off EU(s) from enabled slice(s) */ 4422 for (s = 0; s < fls(sseu->slice_mask); s++) { 4423 u8 subslice_7eu = 4424 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s]; 4425 4426 sseu->eu_total -= hweight8(subslice_7eu); 4427 } 4428 } 4429 } 4430 4431 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info, 4432 const struct sseu_dev_info *sseu) 4433 { 4434 struct drm_i915_private *dev_priv = node_to_i915(m->private); 4435 const char *type = is_available_info ? "Available" : "Enabled"; 4436 4437 seq_printf(m, " %s Slice Mask: %04x\n", type, 4438 sseu->slice_mask); 4439 seq_printf(m, " %s Slice Total: %u\n", type, 4440 hweight8(sseu->slice_mask)); 4441 seq_printf(m, " %s Subslice Total: %u\n", type, 4442 sseu_subslice_total(sseu)); 4443 seq_printf(m, " %s Subslice Mask: %04x\n", type, 4444 sseu->subslice_mask); 4445 seq_printf(m, " %s Subslice Per Slice: %u\n", type, 4446 hweight8(sseu->subslice_mask)); 4447 seq_printf(m, " %s EU Total: %u\n", type, 4448 sseu->eu_total); 4449 seq_printf(m, " %s EU Per Subslice: %u\n", type, 4450 sseu->eu_per_subslice); 4451 4452 if (!is_available_info) 4453 return; 4454 4455 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv))); 4456 if (HAS_POOLED_EU(dev_priv)) 4457 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool); 4458 4459 seq_printf(m, " Has Slice Power Gating: %s\n", 4460 yesno(sseu->has_slice_pg)); 4461 seq_printf(m, " Has Subslice Power Gating: %s\n", 4462 yesno(sseu->has_subslice_pg)); 4463 seq_printf(m, " Has EU Power Gating: %s\n", 4464 yesno(sseu->has_eu_pg)); 4465 } 4466 4467 static int i915_sseu_status(struct seq_file *m, void *unused) 4468 { 4469 struct drm_i915_private *dev_priv = node_to_i915(m->private); 4470 struct sseu_dev_info sseu; 4471 4472 if (INTEL_GEN(dev_priv) < 8) 4473 return -ENODEV; 4474 4475 seq_puts(m, "SSEU Device Info\n"); 4476 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu); 4477 4478 seq_puts(m, "SSEU Device Status\n"); 4479 memset(&sseu, 0, sizeof(sseu)); 4480 4481 intel_runtime_pm_get(dev_priv); 4482 4483 if (IS_CHERRYVIEW(dev_priv)) { 4484 cherryview_sseu_device_status(dev_priv, &sseu); 4485 } else if (IS_BROADWELL(dev_priv)) { 4486 broadwell_sseu_device_status(dev_priv, &sseu); 4487 } else if (IS_GEN9(dev_priv)) { 4488 gen9_sseu_device_status(dev_priv, &sseu); 4489 } else if (INTEL_GEN(dev_priv) >= 10) { 4490 gen10_sseu_device_status(dev_priv, &sseu); 4491 } 4492 4493 intel_runtime_pm_put(dev_priv); 4494 4495 i915_print_sseu_info(m, false, &sseu); 4496 4497 return 0; 4498 } 4499 4500 static int i915_forcewake_open(struct inode *inode, struct file *file) 4501 { 4502 struct drm_i915_private *i915 = inode->i_private; 4503 4504 if (INTEL_GEN(i915) < 6) 4505 return 0; 4506 4507 intel_runtime_pm_get(i915); 4508 intel_uncore_forcewake_user_get(i915); 4509 4510 return 0; 4511 } 4512 4513 static int i915_forcewake_release(struct inode *inode, struct file *file) 4514 { 4515 struct drm_i915_private *i915 = inode->i_private; 4516 4517 if (INTEL_GEN(i915) < 6) 4518 return 0; 4519 4520 intel_uncore_forcewake_user_put(i915); 4521 intel_runtime_pm_put(i915); 4522 4523 return 0; 4524 } 4525 4526 static const struct file_operations i915_forcewake_fops = { 4527 .owner = THIS_MODULE, 4528 .open = i915_forcewake_open, 4529 .release = i915_forcewake_release, 4530 }; 4531 4532 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 4533 { 4534 struct drm_i915_private *dev_priv = m->private; 4535 struct i915_hotplug *hotplug = &dev_priv->hotplug; 4536 4537 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 4538 seq_printf(m, "Detected: %s\n", 4539 yesno(delayed_work_pending(&hotplug->reenable_work))); 4540 4541 return 0; 4542 } 4543 4544 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 4545 const char __user *ubuf, size_t len, 4546 loff_t *offp) 4547 { 4548 struct seq_file *m = file->private_data; 4549 struct drm_i915_private *dev_priv = m->private; 4550 struct i915_hotplug *hotplug = &dev_priv->hotplug; 4551 unsigned int new_threshold; 4552 int i; 4553 char *newline; 4554 char tmp[16]; 4555 4556 if (len >= sizeof(tmp)) 4557 return -EINVAL; 4558 4559 if (copy_from_user(tmp, ubuf, len)) 4560 return -EFAULT; 4561 4562 tmp[len] = '\0'; 4563 4564 /* Strip newline, if any */ 4565 newline = strchr(tmp, '\n'); 4566 if (newline) 4567 *newline = '\0'; 4568 4569 if (strcmp(tmp, "reset") == 0) 4570 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4571 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 4572 return -EINVAL; 4573 4574 if (new_threshold > 0) 4575 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n", 4576 new_threshold); 4577 else 4578 DRM_DEBUG_KMS("Disabling HPD storm detection\n"); 4579 4580 spin_lock_irq(&dev_priv->irq_lock); 4581 hotplug->hpd_storm_threshold = new_threshold; 4582 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 4583 for_each_hpd_pin(i) 4584 hotplug->stats[i].count = 0; 4585 spin_unlock_irq(&dev_priv->irq_lock); 4586 4587 /* Re-enable hpd immediately if we were in an irq storm */ 4588 flush_delayed_work(&dev_priv->hotplug.reenable_work); 4589 4590 return len; 4591 } 4592 4593 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 4594 { 4595 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 4596 } 4597 4598 static const struct file_operations i915_hpd_storm_ctl_fops = { 4599 .owner = THIS_MODULE, 4600 .open = i915_hpd_storm_ctl_open, 4601 .read = seq_read, 4602 .llseek = seq_lseek, 4603 .release = single_release, 4604 .write = i915_hpd_storm_ctl_write 4605 }; 4606 4607 static int i915_drrs_ctl_set(void *data, u64 val) 4608 { 4609 struct drm_i915_private *dev_priv = data; 4610 struct drm_device *dev = &dev_priv->drm; 4611 struct intel_crtc *intel_crtc; 4612 struct intel_encoder *encoder; 4613 struct intel_dp *intel_dp; 4614 4615 if (INTEL_GEN(dev_priv) < 7) 4616 return -ENODEV; 4617 4618 drm_modeset_lock_all(dev); 4619 for_each_intel_crtc(dev, intel_crtc) { 4620 if (!intel_crtc->base.state->active || 4621 !intel_crtc->config->has_drrs) 4622 continue; 4623 4624 for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) { 4625 if (encoder->type != INTEL_OUTPUT_EDP) 4626 continue; 4627 4628 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n", 4629 val ? "en" : "dis", val); 4630 4631 intel_dp = enc_to_intel_dp(&encoder->base); 4632 if (val) 4633 intel_edp_drrs_enable(intel_dp, 4634 intel_crtc->config); 4635 else 4636 intel_edp_drrs_disable(intel_dp, 4637 intel_crtc->config); 4638 } 4639 } 4640 drm_modeset_unlock_all(dev); 4641 4642 return 0; 4643 } 4644 4645 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n"); 4646 4647 static const struct drm_info_list i915_debugfs_list[] = { 4648 {"i915_capabilities", i915_capabilities, 0}, 4649 {"i915_gem_objects", i915_gem_object_info, 0}, 4650 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 4651 {"i915_gem_stolen", i915_gem_stolen_list_info }, 4652 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 4653 {"i915_gem_interrupt", i915_interrupt_info, 0}, 4654 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, 4655 {"i915_guc_info", i915_guc_info, 0}, 4656 {"i915_guc_load_status", i915_guc_load_status_info, 0}, 4657 {"i915_guc_log_dump", i915_guc_log_dump, 0}, 4658 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1}, 4659 {"i915_guc_stage_pool", i915_guc_stage_pool, 0}, 4660 {"i915_huc_load_status", i915_huc_load_status_info, 0}, 4661 {"i915_frequency_info", i915_frequency_info, 0}, 4662 {"i915_hangcheck_info", i915_hangcheck_info, 0}, 4663 {"i915_reset_info", i915_reset_info, 0}, 4664 {"i915_drpc_info", i915_drpc_info, 0}, 4665 {"i915_emon_status", i915_emon_status, 0}, 4666 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 4667 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 4668 {"i915_fbc_status", i915_fbc_status, 0}, 4669 {"i915_ips_status", i915_ips_status, 0}, 4670 {"i915_sr_status", i915_sr_status, 0}, 4671 {"i915_opregion", i915_opregion, 0}, 4672 {"i915_vbt", i915_vbt, 0}, 4673 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 4674 {"i915_context_status", i915_context_status, 0}, 4675 {"i915_forcewake_domains", i915_forcewake_domains, 0}, 4676 {"i915_swizzle_info", i915_swizzle_info, 0}, 4677 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 4678 {"i915_llc", i915_llc, 0}, 4679 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 4680 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 4681 {"i915_energy_uJ", i915_energy_uJ, 0}, 4682 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 4683 {"i915_power_domain_info", i915_power_domain_info, 0}, 4684 {"i915_dmc_info", i915_dmc_info, 0}, 4685 {"i915_display_info", i915_display_info, 0}, 4686 {"i915_engine_info", i915_engine_info, 0}, 4687 {"i915_shrinker_info", i915_shrinker_info, 0}, 4688 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 4689 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 4690 {"i915_wa_registers", i915_wa_registers, 0}, 4691 {"i915_ddb_info", i915_ddb_info, 0}, 4692 {"i915_sseu_status", i915_sseu_status, 0}, 4693 {"i915_drrs_status", i915_drrs_status, 0}, 4694 {"i915_rps_boost_info", i915_rps_boost_info, 0}, 4695 }; 4696 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 4697 4698 static const struct i915_debugfs_files { 4699 const char *name; 4700 const struct file_operations *fops; 4701 } i915_debugfs_files[] = { 4702 {"i915_wedged", &i915_wedged_fops}, 4703 {"i915_max_freq", &i915_max_freq_fops}, 4704 {"i915_min_freq", &i915_min_freq_fops}, 4705 {"i915_cache_sharing", &i915_cache_sharing_fops}, 4706 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 4707 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 4708 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 4709 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 4710 {"i915_error_state", &i915_error_state_fops}, 4711 {"i915_gpu_info", &i915_gpu_info_fops}, 4712 #endif 4713 {"i915_next_seqno", &i915_next_seqno_fops}, 4714 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 4715 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 4716 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 4717 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 4718 {"i915_fbc_false_color", &i915_fbc_false_color_fops}, 4719 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 4720 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 4721 {"i915_dp_test_active", &i915_displayport_test_active_fops}, 4722 {"i915_guc_log_control", &i915_guc_log_control_fops}, 4723 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, 4724 {"i915_ipc_status", &i915_ipc_status_fops}, 4725 {"i915_drrs_ctl", &i915_drrs_ctl_fops} 4726 }; 4727 4728 int i915_debugfs_register(struct drm_i915_private *dev_priv) 4729 { 4730 struct drm_minor *minor = dev_priv->drm.primary; 4731 struct dentry *ent; 4732 int ret, i; 4733 4734 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR, 4735 minor->debugfs_root, to_i915(minor->dev), 4736 &i915_forcewake_fops); 4737 if (!ent) 4738 return -ENOMEM; 4739 4740 ret = intel_pipe_crc_create(minor); 4741 if (ret) 4742 return ret; 4743 4744 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 4745 ent = debugfs_create_file(i915_debugfs_files[i].name, 4746 S_IRUGO | S_IWUSR, 4747 minor->debugfs_root, 4748 to_i915(minor->dev), 4749 i915_debugfs_files[i].fops); 4750 if (!ent) 4751 return -ENOMEM; 4752 } 4753 4754 return drm_debugfs_create_files(i915_debugfs_list, 4755 I915_DEBUGFS_ENTRIES, 4756 minor->debugfs_root, minor); 4757 } 4758 4759 struct dpcd_block { 4760 /* DPCD dump start address. */ 4761 unsigned int offset; 4762 /* DPCD dump end address, inclusive. If unset, .size will be used. */ 4763 unsigned int end; 4764 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ 4765 size_t size; 4766 /* Only valid for eDP. */ 4767 bool edp; 4768 }; 4769 4770 static const struct dpcd_block i915_dpcd_debug[] = { 4771 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, 4772 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, 4773 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, 4774 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, 4775 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, 4776 { .offset = DP_SET_POWER }, 4777 { .offset = DP_EDP_DPCD_REV }, 4778 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, 4779 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, 4780 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, 4781 }; 4782 4783 static int i915_dpcd_show(struct seq_file *m, void *data) 4784 { 4785 struct drm_connector *connector = m->private; 4786 struct intel_dp *intel_dp = 4787 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 4788 uint8_t buf[16]; 4789 ssize_t err; 4790 int i; 4791 4792 if (connector->status != connector_status_connected) 4793 return -ENODEV; 4794 4795 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) { 4796 const struct dpcd_block *b = &i915_dpcd_debug[i]; 4797 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); 4798 4799 if (b->edp && 4800 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 4801 continue; 4802 4803 /* low tech for now */ 4804 if (WARN_ON(size > sizeof(buf))) 4805 continue; 4806 4807 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); 4808 if (err <= 0) { 4809 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", 4810 size, b->offset, err); 4811 continue; 4812 } 4813 4814 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); 4815 } 4816 4817 return 0; 4818 } 4819 4820 static int i915_dpcd_open(struct inode *inode, struct file *file) 4821 { 4822 return single_open(file, i915_dpcd_show, inode->i_private); 4823 } 4824 4825 static const struct file_operations i915_dpcd_fops = { 4826 .owner = THIS_MODULE, 4827 .open = i915_dpcd_open, 4828 .read = seq_read, 4829 .llseek = seq_lseek, 4830 .release = single_release, 4831 }; 4832 4833 static int i915_panel_show(struct seq_file *m, void *data) 4834 { 4835 struct drm_connector *connector = m->private; 4836 struct intel_dp *intel_dp = 4837 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 4838 4839 if (connector->status != connector_status_connected) 4840 return -ENODEV; 4841 4842 seq_printf(m, "Panel power up delay: %d\n", 4843 intel_dp->panel_power_up_delay); 4844 seq_printf(m, "Panel power down delay: %d\n", 4845 intel_dp->panel_power_down_delay); 4846 seq_printf(m, "Backlight on delay: %d\n", 4847 intel_dp->backlight_on_delay); 4848 seq_printf(m, "Backlight off delay: %d\n", 4849 intel_dp->backlight_off_delay); 4850 4851 return 0; 4852 } 4853 4854 static int i915_panel_open(struct inode *inode, struct file *file) 4855 { 4856 return single_open(file, i915_panel_show, inode->i_private); 4857 } 4858 4859 static const struct file_operations i915_panel_fops = { 4860 .owner = THIS_MODULE, 4861 .open = i915_panel_open, 4862 .read = seq_read, 4863 .llseek = seq_lseek, 4864 .release = single_release, 4865 }; 4866 4867 /** 4868 * i915_debugfs_connector_add - add i915 specific connector debugfs files 4869 * @connector: pointer to a registered drm_connector 4870 * 4871 * Cleanup will be done by drm_connector_unregister() through a call to 4872 * drm_debugfs_connector_remove(). 4873 * 4874 * Returns 0 on success, negative error codes on error. 4875 */ 4876 int i915_debugfs_connector_add(struct drm_connector *connector) 4877 { 4878 struct dentry *root = connector->debugfs_entry; 4879 4880 /* The connector must have been registered beforehands. */ 4881 if (!root) 4882 return -ENODEV; 4883 4884 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 4885 connector->connector_type == DRM_MODE_CONNECTOR_eDP) 4886 debugfs_create_file("i915_dpcd", S_IRUGO, root, 4887 connector, &i915_dpcd_fops); 4888 4889 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 4890 debugfs_create_file("i915_panel_timings", S_IRUGO, root, 4891 connector, &i915_panel_fops); 4892 4893 return 0; 4894 } 4895