1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/debugfs.h> 30 #include <linux/sort.h> 31 #include <linux/sched/mm.h> 32 #include "intel_drv.h" 33 #include "i915_guc_submission.h" 34 35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 36 { 37 return to_i915(node->minor->dev); 38 } 39 40 static __always_inline void seq_print_param(struct seq_file *m, 41 const char *name, 42 const char *type, 43 const void *x) 44 { 45 if (!__builtin_strcmp(type, "bool")) 46 seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x)); 47 else if (!__builtin_strcmp(type, "int")) 48 seq_printf(m, "i915.%s=%d\n", name, *(const int *)x); 49 else if (!__builtin_strcmp(type, "unsigned int")) 50 seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x); 51 else if (!__builtin_strcmp(type, "char *")) 52 seq_printf(m, "i915.%s=%s\n", name, *(const char **)x); 53 else 54 BUILD_BUG(); 55 } 56 57 static int i915_capabilities(struct seq_file *m, void *data) 58 { 59 struct drm_i915_private *dev_priv = node_to_i915(m->private); 60 const struct intel_device_info *info = INTEL_INFO(dev_priv); 61 62 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv)); 63 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform)); 64 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv)); 65 66 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 67 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); 68 #undef PRINT_FLAG 69 70 kernel_param_lock(THIS_MODULE); 71 #define PRINT_PARAM(T, x, ...) seq_print_param(m, #x, #T, &i915_modparams.x); 72 I915_PARAMS_FOR_EACH(PRINT_PARAM); 73 #undef PRINT_PARAM 74 kernel_param_unlock(THIS_MODULE); 75 76 return 0; 77 } 78 79 static char get_active_flag(struct drm_i915_gem_object *obj) 80 { 81 return i915_gem_object_is_active(obj) ? '*' : ' '; 82 } 83 84 static char get_pin_flag(struct drm_i915_gem_object *obj) 85 { 86 return obj->pin_global ? 'p' : ' '; 87 } 88 89 static char get_tiling_flag(struct drm_i915_gem_object *obj) 90 { 91 switch (i915_gem_object_get_tiling(obj)) { 92 default: 93 case I915_TILING_NONE: return ' '; 94 case I915_TILING_X: return 'X'; 95 case I915_TILING_Y: return 'Y'; 96 } 97 } 98 99 static char get_global_flag(struct drm_i915_gem_object *obj) 100 { 101 return obj->userfault_count ? 'g' : ' '; 102 } 103 104 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) 105 { 106 return obj->mm.mapping ? 'M' : ' '; 107 } 108 109 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) 110 { 111 u64 size = 0; 112 struct i915_vma *vma; 113 114 list_for_each_entry(vma, &obj->vma_list, obj_link) { 115 if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node)) 116 size += vma->node.size; 117 } 118 119 return size; 120 } 121 122 static const char * 123 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len) 124 { 125 size_t x = 0; 126 127 switch (page_sizes) { 128 case 0: 129 return ""; 130 case I915_GTT_PAGE_SIZE_4K: 131 return "4K"; 132 case I915_GTT_PAGE_SIZE_64K: 133 return "64K"; 134 case I915_GTT_PAGE_SIZE_2M: 135 return "2M"; 136 default: 137 if (!buf) 138 return "M"; 139 140 if (page_sizes & I915_GTT_PAGE_SIZE_2M) 141 x += snprintf(buf + x, len - x, "2M, "); 142 if (page_sizes & I915_GTT_PAGE_SIZE_64K) 143 x += snprintf(buf + x, len - x, "64K, "); 144 if (page_sizes & I915_GTT_PAGE_SIZE_4K) 145 x += snprintf(buf + x, len - x, "4K, "); 146 buf[x-2] = '\0'; 147 148 return buf; 149 } 150 } 151 152 static void 153 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 154 { 155 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 156 struct intel_engine_cs *engine; 157 struct i915_vma *vma; 158 unsigned int frontbuffer_bits; 159 int pin_count = 0; 160 161 lockdep_assert_held(&obj->base.dev->struct_mutex); 162 163 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s", 164 &obj->base, 165 get_active_flag(obj), 166 get_pin_flag(obj), 167 get_tiling_flag(obj), 168 get_global_flag(obj), 169 get_pin_mapped_flag(obj), 170 obj->base.size / 1024, 171 obj->base.read_domains, 172 obj->base.write_domain, 173 i915_cache_level_str(dev_priv, obj->cache_level), 174 obj->mm.dirty ? " dirty" : "", 175 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : ""); 176 if (obj->base.name) 177 seq_printf(m, " (name: %d)", obj->base.name); 178 list_for_each_entry(vma, &obj->vma_list, obj_link) { 179 if (i915_vma_is_pinned(vma)) 180 pin_count++; 181 } 182 seq_printf(m, " (pinned x %d)", pin_count); 183 if (obj->pin_global) 184 seq_printf(m, " (global)"); 185 list_for_each_entry(vma, &obj->vma_list, obj_link) { 186 if (!drm_mm_node_allocated(&vma->node)) 187 continue; 188 189 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s", 190 i915_vma_is_ggtt(vma) ? "g" : "pp", 191 vma->node.start, vma->node.size, 192 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0)); 193 if (i915_vma_is_ggtt(vma)) { 194 switch (vma->ggtt_view.type) { 195 case I915_GGTT_VIEW_NORMAL: 196 seq_puts(m, ", normal"); 197 break; 198 199 case I915_GGTT_VIEW_PARTIAL: 200 seq_printf(m, ", partial [%08llx+%x]", 201 vma->ggtt_view.partial.offset << PAGE_SHIFT, 202 vma->ggtt_view.partial.size << PAGE_SHIFT); 203 break; 204 205 case I915_GGTT_VIEW_ROTATED: 206 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]", 207 vma->ggtt_view.rotated.plane[0].width, 208 vma->ggtt_view.rotated.plane[0].height, 209 vma->ggtt_view.rotated.plane[0].stride, 210 vma->ggtt_view.rotated.plane[0].offset, 211 vma->ggtt_view.rotated.plane[1].width, 212 vma->ggtt_view.rotated.plane[1].height, 213 vma->ggtt_view.rotated.plane[1].stride, 214 vma->ggtt_view.rotated.plane[1].offset); 215 break; 216 217 default: 218 MISSING_CASE(vma->ggtt_view.type); 219 break; 220 } 221 } 222 if (vma->fence) 223 seq_printf(m, " , fence: %d%s", 224 vma->fence->id, 225 i915_gem_active_isset(&vma->last_fence) ? "*" : ""); 226 seq_puts(m, ")"); 227 } 228 if (obj->stolen) 229 seq_printf(m, " (stolen: %08llx)", obj->stolen->start); 230 231 engine = i915_gem_object_last_write_engine(obj); 232 if (engine) 233 seq_printf(m, " (%s)", engine->name); 234 235 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits); 236 if (frontbuffer_bits) 237 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits); 238 } 239 240 static int obj_rank_by_stolen(const void *A, const void *B) 241 { 242 const struct drm_i915_gem_object *a = 243 *(const struct drm_i915_gem_object **)A; 244 const struct drm_i915_gem_object *b = 245 *(const struct drm_i915_gem_object **)B; 246 247 if (a->stolen->start < b->stolen->start) 248 return -1; 249 if (a->stolen->start > b->stolen->start) 250 return 1; 251 return 0; 252 } 253 254 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 255 { 256 struct drm_i915_private *dev_priv = node_to_i915(m->private); 257 struct drm_device *dev = &dev_priv->drm; 258 struct drm_i915_gem_object **objects; 259 struct drm_i915_gem_object *obj; 260 u64 total_obj_size, total_gtt_size; 261 unsigned long total, count, n; 262 int ret; 263 264 total = READ_ONCE(dev_priv->mm.object_count); 265 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL); 266 if (!objects) 267 return -ENOMEM; 268 269 ret = mutex_lock_interruptible(&dev->struct_mutex); 270 if (ret) 271 goto out; 272 273 total_obj_size = total_gtt_size = count = 0; 274 275 spin_lock(&dev_priv->mm.obj_lock); 276 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { 277 if (count == total) 278 break; 279 280 if (obj->stolen == NULL) 281 continue; 282 283 objects[count++] = obj; 284 total_obj_size += obj->base.size; 285 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 286 287 } 288 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) { 289 if (count == total) 290 break; 291 292 if (obj->stolen == NULL) 293 continue; 294 295 objects[count++] = obj; 296 total_obj_size += obj->base.size; 297 } 298 spin_unlock(&dev_priv->mm.obj_lock); 299 300 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL); 301 302 seq_puts(m, "Stolen:\n"); 303 for (n = 0; n < count; n++) { 304 seq_puts(m, " "); 305 describe_obj(m, objects[n]); 306 seq_putc(m, '\n'); 307 } 308 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n", 309 count, total_obj_size, total_gtt_size); 310 311 mutex_unlock(&dev->struct_mutex); 312 out: 313 kvfree(objects); 314 return ret; 315 } 316 317 struct file_stats { 318 struct drm_i915_file_private *file_priv; 319 unsigned long count; 320 u64 total, unbound; 321 u64 global, shared; 322 u64 active, inactive; 323 }; 324 325 static int per_file_stats(int id, void *ptr, void *data) 326 { 327 struct drm_i915_gem_object *obj = ptr; 328 struct file_stats *stats = data; 329 struct i915_vma *vma; 330 331 lockdep_assert_held(&obj->base.dev->struct_mutex); 332 333 stats->count++; 334 stats->total += obj->base.size; 335 if (!obj->bind_count) 336 stats->unbound += obj->base.size; 337 if (obj->base.name || obj->base.dma_buf) 338 stats->shared += obj->base.size; 339 340 list_for_each_entry(vma, &obj->vma_list, obj_link) { 341 if (!drm_mm_node_allocated(&vma->node)) 342 continue; 343 344 if (i915_vma_is_ggtt(vma)) { 345 stats->global += vma->node.size; 346 } else { 347 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm); 348 349 if (ppgtt->base.file != stats->file_priv) 350 continue; 351 } 352 353 if (i915_vma_is_active(vma)) 354 stats->active += vma->node.size; 355 else 356 stats->inactive += vma->node.size; 357 } 358 359 return 0; 360 } 361 362 #define print_file_stats(m, name, stats) do { \ 363 if (stats.count) \ 364 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \ 365 name, \ 366 stats.count, \ 367 stats.total, \ 368 stats.active, \ 369 stats.inactive, \ 370 stats.global, \ 371 stats.shared, \ 372 stats.unbound); \ 373 } while (0) 374 375 static void print_batch_pool_stats(struct seq_file *m, 376 struct drm_i915_private *dev_priv) 377 { 378 struct drm_i915_gem_object *obj; 379 struct file_stats stats; 380 struct intel_engine_cs *engine; 381 enum intel_engine_id id; 382 int j; 383 384 memset(&stats, 0, sizeof(stats)); 385 386 for_each_engine(engine, dev_priv, id) { 387 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { 388 list_for_each_entry(obj, 389 &engine->batch_pool.cache_list[j], 390 batch_pool_link) 391 per_file_stats(0, obj, &stats); 392 } 393 } 394 395 print_file_stats(m, "[k]batch pool", stats); 396 } 397 398 static int per_file_ctx_stats(int id, void *ptr, void *data) 399 { 400 struct i915_gem_context *ctx = ptr; 401 int n; 402 403 for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) { 404 if (ctx->engine[n].state) 405 per_file_stats(0, ctx->engine[n].state->obj, data); 406 if (ctx->engine[n].ring) 407 per_file_stats(0, ctx->engine[n].ring->vma->obj, data); 408 } 409 410 return 0; 411 } 412 413 static void print_context_stats(struct seq_file *m, 414 struct drm_i915_private *dev_priv) 415 { 416 struct drm_device *dev = &dev_priv->drm; 417 struct file_stats stats; 418 struct drm_file *file; 419 420 memset(&stats, 0, sizeof(stats)); 421 422 mutex_lock(&dev->struct_mutex); 423 if (dev_priv->kernel_context) 424 per_file_ctx_stats(0, dev_priv->kernel_context, &stats); 425 426 list_for_each_entry(file, &dev->filelist, lhead) { 427 struct drm_i915_file_private *fpriv = file->driver_priv; 428 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats); 429 } 430 mutex_unlock(&dev->struct_mutex); 431 432 print_file_stats(m, "[k]contexts", stats); 433 } 434 435 static int i915_gem_object_info(struct seq_file *m, void *data) 436 { 437 struct drm_i915_private *dev_priv = node_to_i915(m->private); 438 struct drm_device *dev = &dev_priv->drm; 439 struct i915_ggtt *ggtt = &dev_priv->ggtt; 440 u32 count, mapped_count, purgeable_count, dpy_count, huge_count; 441 u64 size, mapped_size, purgeable_size, dpy_size, huge_size; 442 struct drm_i915_gem_object *obj; 443 unsigned int page_sizes = 0; 444 struct drm_file *file; 445 char buf[80]; 446 int ret; 447 448 ret = mutex_lock_interruptible(&dev->struct_mutex); 449 if (ret) 450 return ret; 451 452 seq_printf(m, "%u objects, %llu bytes\n", 453 dev_priv->mm.object_count, 454 dev_priv->mm.object_memory); 455 456 size = count = 0; 457 mapped_size = mapped_count = 0; 458 purgeable_size = purgeable_count = 0; 459 huge_size = huge_count = 0; 460 461 spin_lock(&dev_priv->mm.obj_lock); 462 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) { 463 size += obj->base.size; 464 ++count; 465 466 if (obj->mm.madv == I915_MADV_DONTNEED) { 467 purgeable_size += obj->base.size; 468 ++purgeable_count; 469 } 470 471 if (obj->mm.mapping) { 472 mapped_count++; 473 mapped_size += obj->base.size; 474 } 475 476 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) { 477 huge_count++; 478 huge_size += obj->base.size; 479 page_sizes |= obj->mm.page_sizes.sg; 480 } 481 } 482 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); 483 484 size = count = dpy_size = dpy_count = 0; 485 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { 486 size += obj->base.size; 487 ++count; 488 489 if (obj->pin_global) { 490 dpy_size += obj->base.size; 491 ++dpy_count; 492 } 493 494 if (obj->mm.madv == I915_MADV_DONTNEED) { 495 purgeable_size += obj->base.size; 496 ++purgeable_count; 497 } 498 499 if (obj->mm.mapping) { 500 mapped_count++; 501 mapped_size += obj->base.size; 502 } 503 504 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) { 505 huge_count++; 506 huge_size += obj->base.size; 507 page_sizes |= obj->mm.page_sizes.sg; 508 } 509 } 510 spin_unlock(&dev_priv->mm.obj_lock); 511 512 seq_printf(m, "%u bound objects, %llu bytes\n", 513 count, size); 514 seq_printf(m, "%u purgeable objects, %llu bytes\n", 515 purgeable_count, purgeable_size); 516 seq_printf(m, "%u mapped objects, %llu bytes\n", 517 mapped_count, mapped_size); 518 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n", 519 huge_count, 520 stringify_page_sizes(page_sizes, buf, sizeof(buf)), 521 huge_size); 522 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n", 523 dpy_count, dpy_size); 524 525 seq_printf(m, "%llu [%llu] gtt total\n", 526 ggtt->base.total, ggtt->mappable_end); 527 seq_printf(m, "Supported page sizes: %s\n", 528 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes, 529 buf, sizeof(buf))); 530 531 seq_putc(m, '\n'); 532 print_batch_pool_stats(m, dev_priv); 533 mutex_unlock(&dev->struct_mutex); 534 535 mutex_lock(&dev->filelist_mutex); 536 print_context_stats(m, dev_priv); 537 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 538 struct file_stats stats; 539 struct drm_i915_file_private *file_priv = file->driver_priv; 540 struct drm_i915_gem_request *request; 541 struct task_struct *task; 542 543 mutex_lock(&dev->struct_mutex); 544 545 memset(&stats, 0, sizeof(stats)); 546 stats.file_priv = file->driver_priv; 547 spin_lock(&file->table_lock); 548 idr_for_each(&file->object_idr, per_file_stats, &stats); 549 spin_unlock(&file->table_lock); 550 /* 551 * Although we have a valid reference on file->pid, that does 552 * not guarantee that the task_struct who called get_pid() is 553 * still alive (e.g. get_pid(current) => fork() => exit()). 554 * Therefore, we need to protect this ->comm access using RCU. 555 */ 556 request = list_first_entry_or_null(&file_priv->mm.request_list, 557 struct drm_i915_gem_request, 558 client_link); 559 rcu_read_lock(); 560 task = pid_task(request && request->ctx->pid ? 561 request->ctx->pid : file->pid, 562 PIDTYPE_PID); 563 print_file_stats(m, task ? task->comm : "<unknown>", stats); 564 rcu_read_unlock(); 565 566 mutex_unlock(&dev->struct_mutex); 567 } 568 mutex_unlock(&dev->filelist_mutex); 569 570 return 0; 571 } 572 573 static int i915_gem_gtt_info(struct seq_file *m, void *data) 574 { 575 struct drm_info_node *node = m->private; 576 struct drm_i915_private *dev_priv = node_to_i915(node); 577 struct drm_device *dev = &dev_priv->drm; 578 struct drm_i915_gem_object **objects; 579 struct drm_i915_gem_object *obj; 580 u64 total_obj_size, total_gtt_size; 581 unsigned long nobject, n; 582 int count, ret; 583 584 nobject = READ_ONCE(dev_priv->mm.object_count); 585 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL); 586 if (!objects) 587 return -ENOMEM; 588 589 ret = mutex_lock_interruptible(&dev->struct_mutex); 590 if (ret) 591 return ret; 592 593 count = 0; 594 spin_lock(&dev_priv->mm.obj_lock); 595 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { 596 objects[count++] = obj; 597 if (count == nobject) 598 break; 599 } 600 spin_unlock(&dev_priv->mm.obj_lock); 601 602 total_obj_size = total_gtt_size = 0; 603 for (n = 0; n < count; n++) { 604 obj = objects[n]; 605 606 seq_puts(m, " "); 607 describe_obj(m, obj); 608 seq_putc(m, '\n'); 609 total_obj_size += obj->base.size; 610 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 611 } 612 613 mutex_unlock(&dev->struct_mutex); 614 615 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 616 count, total_obj_size, total_gtt_size); 617 kvfree(objects); 618 619 return 0; 620 } 621 622 static int i915_gem_batch_pool_info(struct seq_file *m, void *data) 623 { 624 struct drm_i915_private *dev_priv = node_to_i915(m->private); 625 struct drm_device *dev = &dev_priv->drm; 626 struct drm_i915_gem_object *obj; 627 struct intel_engine_cs *engine; 628 enum intel_engine_id id; 629 int total = 0; 630 int ret, j; 631 632 ret = mutex_lock_interruptible(&dev->struct_mutex); 633 if (ret) 634 return ret; 635 636 for_each_engine(engine, dev_priv, id) { 637 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { 638 int count; 639 640 count = 0; 641 list_for_each_entry(obj, 642 &engine->batch_pool.cache_list[j], 643 batch_pool_link) 644 count++; 645 seq_printf(m, "%s cache[%d]: %d objects\n", 646 engine->name, j, count); 647 648 list_for_each_entry(obj, 649 &engine->batch_pool.cache_list[j], 650 batch_pool_link) { 651 seq_puts(m, " "); 652 describe_obj(m, obj); 653 seq_putc(m, '\n'); 654 } 655 656 total += count; 657 } 658 } 659 660 seq_printf(m, "total: %d\n", total); 661 662 mutex_unlock(&dev->struct_mutex); 663 664 return 0; 665 } 666 667 static void i915_ring_seqno_info(struct seq_file *m, 668 struct intel_engine_cs *engine) 669 { 670 struct intel_breadcrumbs *b = &engine->breadcrumbs; 671 struct rb_node *rb; 672 673 seq_printf(m, "Current sequence (%s): %x\n", 674 engine->name, intel_engine_get_seqno(engine)); 675 676 spin_lock_irq(&b->rb_lock); 677 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { 678 struct intel_wait *w = rb_entry(rb, typeof(*w), node); 679 680 seq_printf(m, "Waiting (%s): %s [%d] on %x\n", 681 engine->name, w->tsk->comm, w->tsk->pid, w->seqno); 682 } 683 spin_unlock_irq(&b->rb_lock); 684 } 685 686 static int i915_gem_seqno_info(struct seq_file *m, void *data) 687 { 688 struct drm_i915_private *dev_priv = node_to_i915(m->private); 689 struct intel_engine_cs *engine; 690 enum intel_engine_id id; 691 692 for_each_engine(engine, dev_priv, id) 693 i915_ring_seqno_info(m, engine); 694 695 return 0; 696 } 697 698 699 static int i915_interrupt_info(struct seq_file *m, void *data) 700 { 701 struct drm_i915_private *dev_priv = node_to_i915(m->private); 702 struct intel_engine_cs *engine; 703 enum intel_engine_id id; 704 int i, pipe; 705 706 intel_runtime_pm_get(dev_priv); 707 708 if (IS_CHERRYVIEW(dev_priv)) { 709 seq_printf(m, "Master Interrupt Control:\t%08x\n", 710 I915_READ(GEN8_MASTER_IRQ)); 711 712 seq_printf(m, "Display IER:\t%08x\n", 713 I915_READ(VLV_IER)); 714 seq_printf(m, "Display IIR:\t%08x\n", 715 I915_READ(VLV_IIR)); 716 seq_printf(m, "Display IIR_RW:\t%08x\n", 717 I915_READ(VLV_IIR_RW)); 718 seq_printf(m, "Display IMR:\t%08x\n", 719 I915_READ(VLV_IMR)); 720 for_each_pipe(dev_priv, pipe) { 721 enum intel_display_power_domain power_domain; 722 723 power_domain = POWER_DOMAIN_PIPE(pipe); 724 if (!intel_display_power_get_if_enabled(dev_priv, 725 power_domain)) { 726 seq_printf(m, "Pipe %c power disabled\n", 727 pipe_name(pipe)); 728 continue; 729 } 730 731 seq_printf(m, "Pipe %c stat:\t%08x\n", 732 pipe_name(pipe), 733 I915_READ(PIPESTAT(pipe))); 734 735 intel_display_power_put(dev_priv, power_domain); 736 } 737 738 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 739 seq_printf(m, "Port hotplug:\t%08x\n", 740 I915_READ(PORT_HOTPLUG_EN)); 741 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 742 I915_READ(VLV_DPFLIPSTAT)); 743 seq_printf(m, "DPINVGTT:\t%08x\n", 744 I915_READ(DPINVGTT)); 745 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 746 747 for (i = 0; i < 4; i++) { 748 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 749 i, I915_READ(GEN8_GT_IMR(i))); 750 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 751 i, I915_READ(GEN8_GT_IIR(i))); 752 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 753 i, I915_READ(GEN8_GT_IER(i))); 754 } 755 756 seq_printf(m, "PCU interrupt mask:\t%08x\n", 757 I915_READ(GEN8_PCU_IMR)); 758 seq_printf(m, "PCU interrupt identity:\t%08x\n", 759 I915_READ(GEN8_PCU_IIR)); 760 seq_printf(m, "PCU interrupt enable:\t%08x\n", 761 I915_READ(GEN8_PCU_IER)); 762 } else if (INTEL_GEN(dev_priv) >= 8) { 763 seq_printf(m, "Master Interrupt Control:\t%08x\n", 764 I915_READ(GEN8_MASTER_IRQ)); 765 766 for (i = 0; i < 4; i++) { 767 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 768 i, I915_READ(GEN8_GT_IMR(i))); 769 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 770 i, I915_READ(GEN8_GT_IIR(i))); 771 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 772 i, I915_READ(GEN8_GT_IER(i))); 773 } 774 775 for_each_pipe(dev_priv, pipe) { 776 enum intel_display_power_domain power_domain; 777 778 power_domain = POWER_DOMAIN_PIPE(pipe); 779 if (!intel_display_power_get_if_enabled(dev_priv, 780 power_domain)) { 781 seq_printf(m, "Pipe %c power disabled\n", 782 pipe_name(pipe)); 783 continue; 784 } 785 seq_printf(m, "Pipe %c IMR:\t%08x\n", 786 pipe_name(pipe), 787 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 788 seq_printf(m, "Pipe %c IIR:\t%08x\n", 789 pipe_name(pipe), 790 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 791 seq_printf(m, "Pipe %c IER:\t%08x\n", 792 pipe_name(pipe), 793 I915_READ(GEN8_DE_PIPE_IER(pipe))); 794 795 intel_display_power_put(dev_priv, power_domain); 796 } 797 798 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 799 I915_READ(GEN8_DE_PORT_IMR)); 800 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 801 I915_READ(GEN8_DE_PORT_IIR)); 802 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 803 I915_READ(GEN8_DE_PORT_IER)); 804 805 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 806 I915_READ(GEN8_DE_MISC_IMR)); 807 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 808 I915_READ(GEN8_DE_MISC_IIR)); 809 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 810 I915_READ(GEN8_DE_MISC_IER)); 811 812 seq_printf(m, "PCU interrupt mask:\t%08x\n", 813 I915_READ(GEN8_PCU_IMR)); 814 seq_printf(m, "PCU interrupt identity:\t%08x\n", 815 I915_READ(GEN8_PCU_IIR)); 816 seq_printf(m, "PCU interrupt enable:\t%08x\n", 817 I915_READ(GEN8_PCU_IER)); 818 } else if (IS_VALLEYVIEW(dev_priv)) { 819 seq_printf(m, "Display IER:\t%08x\n", 820 I915_READ(VLV_IER)); 821 seq_printf(m, "Display IIR:\t%08x\n", 822 I915_READ(VLV_IIR)); 823 seq_printf(m, "Display IIR_RW:\t%08x\n", 824 I915_READ(VLV_IIR_RW)); 825 seq_printf(m, "Display IMR:\t%08x\n", 826 I915_READ(VLV_IMR)); 827 for_each_pipe(dev_priv, pipe) { 828 enum intel_display_power_domain power_domain; 829 830 power_domain = POWER_DOMAIN_PIPE(pipe); 831 if (!intel_display_power_get_if_enabled(dev_priv, 832 power_domain)) { 833 seq_printf(m, "Pipe %c power disabled\n", 834 pipe_name(pipe)); 835 continue; 836 } 837 838 seq_printf(m, "Pipe %c stat:\t%08x\n", 839 pipe_name(pipe), 840 I915_READ(PIPESTAT(pipe))); 841 intel_display_power_put(dev_priv, power_domain); 842 } 843 844 seq_printf(m, "Master IER:\t%08x\n", 845 I915_READ(VLV_MASTER_IER)); 846 847 seq_printf(m, "Render IER:\t%08x\n", 848 I915_READ(GTIER)); 849 seq_printf(m, "Render IIR:\t%08x\n", 850 I915_READ(GTIIR)); 851 seq_printf(m, "Render IMR:\t%08x\n", 852 I915_READ(GTIMR)); 853 854 seq_printf(m, "PM IER:\t\t%08x\n", 855 I915_READ(GEN6_PMIER)); 856 seq_printf(m, "PM IIR:\t\t%08x\n", 857 I915_READ(GEN6_PMIIR)); 858 seq_printf(m, "PM IMR:\t\t%08x\n", 859 I915_READ(GEN6_PMIMR)); 860 861 seq_printf(m, "Port hotplug:\t%08x\n", 862 I915_READ(PORT_HOTPLUG_EN)); 863 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 864 I915_READ(VLV_DPFLIPSTAT)); 865 seq_printf(m, "DPINVGTT:\t%08x\n", 866 I915_READ(DPINVGTT)); 867 868 } else if (!HAS_PCH_SPLIT(dev_priv)) { 869 seq_printf(m, "Interrupt enable: %08x\n", 870 I915_READ(IER)); 871 seq_printf(m, "Interrupt identity: %08x\n", 872 I915_READ(IIR)); 873 seq_printf(m, "Interrupt mask: %08x\n", 874 I915_READ(IMR)); 875 for_each_pipe(dev_priv, pipe) 876 seq_printf(m, "Pipe %c stat: %08x\n", 877 pipe_name(pipe), 878 I915_READ(PIPESTAT(pipe))); 879 } else { 880 seq_printf(m, "North Display Interrupt enable: %08x\n", 881 I915_READ(DEIER)); 882 seq_printf(m, "North Display Interrupt identity: %08x\n", 883 I915_READ(DEIIR)); 884 seq_printf(m, "North Display Interrupt mask: %08x\n", 885 I915_READ(DEIMR)); 886 seq_printf(m, "South Display Interrupt enable: %08x\n", 887 I915_READ(SDEIER)); 888 seq_printf(m, "South Display Interrupt identity: %08x\n", 889 I915_READ(SDEIIR)); 890 seq_printf(m, "South Display Interrupt mask: %08x\n", 891 I915_READ(SDEIMR)); 892 seq_printf(m, "Graphics Interrupt enable: %08x\n", 893 I915_READ(GTIER)); 894 seq_printf(m, "Graphics Interrupt identity: %08x\n", 895 I915_READ(GTIIR)); 896 seq_printf(m, "Graphics Interrupt mask: %08x\n", 897 I915_READ(GTIMR)); 898 } 899 for_each_engine(engine, dev_priv, id) { 900 if (INTEL_GEN(dev_priv) >= 6) { 901 seq_printf(m, 902 "Graphics Interrupt mask (%s): %08x\n", 903 engine->name, I915_READ_IMR(engine)); 904 } 905 i915_ring_seqno_info(m, engine); 906 } 907 intel_runtime_pm_put(dev_priv); 908 909 return 0; 910 } 911 912 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 913 { 914 struct drm_i915_private *dev_priv = node_to_i915(m->private); 915 struct drm_device *dev = &dev_priv->drm; 916 int i, ret; 917 918 ret = mutex_lock_interruptible(&dev->struct_mutex); 919 if (ret) 920 return ret; 921 922 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 923 for (i = 0; i < dev_priv->num_fence_regs; i++) { 924 struct i915_vma *vma = dev_priv->fence_regs[i].vma; 925 926 seq_printf(m, "Fence %d, pin count = %d, object = ", 927 i, dev_priv->fence_regs[i].pin_count); 928 if (!vma) 929 seq_puts(m, "unused"); 930 else 931 describe_obj(m, vma->obj); 932 seq_putc(m, '\n'); 933 } 934 935 mutex_unlock(&dev->struct_mutex); 936 return 0; 937 } 938 939 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 940 static ssize_t gpu_state_read(struct file *file, char __user *ubuf, 941 size_t count, loff_t *pos) 942 { 943 struct i915_gpu_state *error = file->private_data; 944 struct drm_i915_error_state_buf str; 945 ssize_t ret; 946 loff_t tmp; 947 948 if (!error) 949 return 0; 950 951 ret = i915_error_state_buf_init(&str, error->i915, count, *pos); 952 if (ret) 953 return ret; 954 955 ret = i915_error_state_to_str(&str, error); 956 if (ret) 957 goto out; 958 959 tmp = 0; 960 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes); 961 if (ret < 0) 962 goto out; 963 964 *pos = str.start + ret; 965 out: 966 i915_error_state_buf_release(&str); 967 return ret; 968 } 969 970 static int gpu_state_release(struct inode *inode, struct file *file) 971 { 972 i915_gpu_state_put(file->private_data); 973 return 0; 974 } 975 976 static int i915_gpu_info_open(struct inode *inode, struct file *file) 977 { 978 struct drm_i915_private *i915 = inode->i_private; 979 struct i915_gpu_state *gpu; 980 981 intel_runtime_pm_get(i915); 982 gpu = i915_capture_gpu_state(i915); 983 intel_runtime_pm_put(i915); 984 if (!gpu) 985 return -ENOMEM; 986 987 file->private_data = gpu; 988 return 0; 989 } 990 991 static const struct file_operations i915_gpu_info_fops = { 992 .owner = THIS_MODULE, 993 .open = i915_gpu_info_open, 994 .read = gpu_state_read, 995 .llseek = default_llseek, 996 .release = gpu_state_release, 997 }; 998 999 static ssize_t 1000 i915_error_state_write(struct file *filp, 1001 const char __user *ubuf, 1002 size_t cnt, 1003 loff_t *ppos) 1004 { 1005 struct i915_gpu_state *error = filp->private_data; 1006 1007 if (!error) 1008 return 0; 1009 1010 DRM_DEBUG_DRIVER("Resetting error state\n"); 1011 i915_reset_error_state(error->i915); 1012 1013 return cnt; 1014 } 1015 1016 static int i915_error_state_open(struct inode *inode, struct file *file) 1017 { 1018 file->private_data = i915_first_error_state(inode->i_private); 1019 return 0; 1020 } 1021 1022 static const struct file_operations i915_error_state_fops = { 1023 .owner = THIS_MODULE, 1024 .open = i915_error_state_open, 1025 .read = gpu_state_read, 1026 .write = i915_error_state_write, 1027 .llseek = default_llseek, 1028 .release = gpu_state_release, 1029 }; 1030 #endif 1031 1032 static int 1033 i915_next_seqno_set(void *data, u64 val) 1034 { 1035 struct drm_i915_private *dev_priv = data; 1036 struct drm_device *dev = &dev_priv->drm; 1037 int ret; 1038 1039 ret = mutex_lock_interruptible(&dev->struct_mutex); 1040 if (ret) 1041 return ret; 1042 1043 ret = i915_gem_set_global_seqno(dev, val); 1044 mutex_unlock(&dev->struct_mutex); 1045 1046 return ret; 1047 } 1048 1049 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 1050 NULL, i915_next_seqno_set, 1051 "0x%llx\n"); 1052 1053 static int i915_frequency_info(struct seq_file *m, void *unused) 1054 { 1055 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1056 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1057 int ret = 0; 1058 1059 intel_runtime_pm_get(dev_priv); 1060 1061 if (IS_GEN5(dev_priv)) { 1062 u16 rgvswctl = I915_READ16(MEMSWCTL); 1063 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1064 1065 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 1066 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 1067 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 1068 MEMSTAT_VID_SHIFT); 1069 seq_printf(m, "Current P-state: %d\n", 1070 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1071 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1072 u32 rpmodectl, freq_sts; 1073 1074 mutex_lock(&dev_priv->pcu_lock); 1075 1076 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1077 seq_printf(m, "Video Turbo Mode: %s\n", 1078 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); 1079 seq_printf(m, "HW control enabled: %s\n", 1080 yesno(rpmodectl & GEN6_RP_ENABLE)); 1081 seq_printf(m, "SW control enabled: %s\n", 1082 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == 1083 GEN6_RP_MEDIA_SW_MODE)); 1084 1085 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1086 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1087 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1088 1089 seq_printf(m, "actual GPU freq: %d MHz\n", 1090 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1091 1092 seq_printf(m, "current GPU freq: %d MHz\n", 1093 intel_gpu_freq(dev_priv, rps->cur_freq)); 1094 1095 seq_printf(m, "max GPU freq: %d MHz\n", 1096 intel_gpu_freq(dev_priv, rps->max_freq)); 1097 1098 seq_printf(m, "min GPU freq: %d MHz\n", 1099 intel_gpu_freq(dev_priv, rps->min_freq)); 1100 1101 seq_printf(m, "idle GPU freq: %d MHz\n", 1102 intel_gpu_freq(dev_priv, rps->idle_freq)); 1103 1104 seq_printf(m, 1105 "efficient (RPe) frequency: %d MHz\n", 1106 intel_gpu_freq(dev_priv, rps->efficient_freq)); 1107 mutex_unlock(&dev_priv->pcu_lock); 1108 } else if (INTEL_GEN(dev_priv) >= 6) { 1109 u32 rp_state_limits; 1110 u32 gt_perf_status; 1111 u32 rp_state_cap; 1112 u32 rpmodectl, rpinclimit, rpdeclimit; 1113 u32 rpstat, cagf, reqf; 1114 u32 rpupei, rpcurup, rpprevup; 1115 u32 rpdownei, rpcurdown, rpprevdown; 1116 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 1117 int max_freq; 1118 1119 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1120 if (IS_GEN9_LP(dev_priv)) { 1121 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 1122 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); 1123 } else { 1124 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1125 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1126 } 1127 1128 /* RPSTAT1 is in the GT power well */ 1129 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1130 1131 reqf = I915_READ(GEN6_RPNSWREQ); 1132 if (INTEL_GEN(dev_priv) >= 9) 1133 reqf >>= 23; 1134 else { 1135 reqf &= ~GEN6_TURBO_DISABLE; 1136 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1137 reqf >>= 24; 1138 else 1139 reqf >>= 25; 1140 } 1141 reqf = intel_gpu_freq(dev_priv, reqf); 1142 1143 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1144 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 1145 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 1146 1147 rpstat = I915_READ(GEN6_RPSTAT1); 1148 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 1149 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 1150 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 1151 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 1152 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 1153 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 1154 if (INTEL_GEN(dev_priv) >= 9) 1155 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; 1156 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1157 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1158 else 1159 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1160 cagf = intel_gpu_freq(dev_priv, cagf); 1161 1162 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1163 1164 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { 1165 pm_ier = I915_READ(GEN6_PMIER); 1166 pm_imr = I915_READ(GEN6_PMIMR); 1167 pm_isr = I915_READ(GEN6_PMISR); 1168 pm_iir = I915_READ(GEN6_PMIIR); 1169 pm_mask = I915_READ(GEN6_PMINTRMSK); 1170 } else { 1171 pm_ier = I915_READ(GEN8_GT_IER(2)); 1172 pm_imr = I915_READ(GEN8_GT_IMR(2)); 1173 pm_isr = I915_READ(GEN8_GT_ISR(2)); 1174 pm_iir = I915_READ(GEN8_GT_IIR(2)); 1175 pm_mask = I915_READ(GEN6_PMINTRMSK); 1176 } 1177 seq_printf(m, "Video Turbo Mode: %s\n", 1178 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); 1179 seq_printf(m, "HW control enabled: %s\n", 1180 yesno(rpmodectl & GEN6_RP_ENABLE)); 1181 seq_printf(m, "SW control enabled: %s\n", 1182 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == 1183 GEN6_RP_MEDIA_SW_MODE)); 1184 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1185 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1186 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n", 1187 rps->pm_intrmsk_mbz); 1188 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1189 seq_printf(m, "Render p-state ratio: %d\n", 1190 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8); 1191 seq_printf(m, "Render p-state VID: %d\n", 1192 gt_perf_status & 0xff); 1193 seq_printf(m, "Render p-state limit: %d\n", 1194 rp_state_limits & 0xff); 1195 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1196 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 1197 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 1198 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 1199 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1200 seq_printf(m, "CAGF: %dMHz\n", cagf); 1201 seq_printf(m, "RP CUR UP EI: %d (%dus)\n", 1202 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei)); 1203 seq_printf(m, "RP CUR UP: %d (%dus)\n", 1204 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup)); 1205 seq_printf(m, "RP PREV UP: %d (%dus)\n", 1206 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup)); 1207 seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold); 1208 1209 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n", 1210 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei)); 1211 seq_printf(m, "RP CUR DOWN: %d (%dus)\n", 1212 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown)); 1213 seq_printf(m, "RP PREV DOWN: %d (%dus)\n", 1214 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown)); 1215 seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold); 1216 1217 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : 1218 rp_state_cap >> 16) & 0xff; 1219 max_freq *= (IS_GEN9_BC(dev_priv) || 1220 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); 1221 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1222 intel_gpu_freq(dev_priv, max_freq)); 1223 1224 max_freq = (rp_state_cap & 0xff00) >> 8; 1225 max_freq *= (IS_GEN9_BC(dev_priv) || 1226 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); 1227 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1228 intel_gpu_freq(dev_priv, max_freq)); 1229 1230 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : 1231 rp_state_cap >> 0) & 0xff; 1232 max_freq *= (IS_GEN9_BC(dev_priv) || 1233 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); 1234 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1235 intel_gpu_freq(dev_priv, max_freq)); 1236 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1237 intel_gpu_freq(dev_priv, rps->max_freq)); 1238 1239 seq_printf(m, "Current freq: %d MHz\n", 1240 intel_gpu_freq(dev_priv, rps->cur_freq)); 1241 seq_printf(m, "Actual freq: %d MHz\n", cagf); 1242 seq_printf(m, "Idle freq: %d MHz\n", 1243 intel_gpu_freq(dev_priv, rps->idle_freq)); 1244 seq_printf(m, "Min freq: %d MHz\n", 1245 intel_gpu_freq(dev_priv, rps->min_freq)); 1246 seq_printf(m, "Boost freq: %d MHz\n", 1247 intel_gpu_freq(dev_priv, rps->boost_freq)); 1248 seq_printf(m, "Max freq: %d MHz\n", 1249 intel_gpu_freq(dev_priv, rps->max_freq)); 1250 seq_printf(m, 1251 "efficient (RPe) frequency: %d MHz\n", 1252 intel_gpu_freq(dev_priv, rps->efficient_freq)); 1253 } else { 1254 seq_puts(m, "no P-state info available\n"); 1255 } 1256 1257 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk); 1258 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); 1259 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); 1260 1261 intel_runtime_pm_put(dev_priv); 1262 return ret; 1263 } 1264 1265 static void i915_instdone_info(struct drm_i915_private *dev_priv, 1266 struct seq_file *m, 1267 struct intel_instdone *instdone) 1268 { 1269 int slice; 1270 int subslice; 1271 1272 seq_printf(m, "\t\tINSTDONE: 0x%08x\n", 1273 instdone->instdone); 1274 1275 if (INTEL_GEN(dev_priv) <= 3) 1276 return; 1277 1278 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n", 1279 instdone->slice_common); 1280 1281 if (INTEL_GEN(dev_priv) <= 6) 1282 return; 1283 1284 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 1285 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n", 1286 slice, subslice, instdone->sampler[slice][subslice]); 1287 1288 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 1289 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n", 1290 slice, subslice, instdone->row[slice][subslice]); 1291 } 1292 1293 static int i915_hangcheck_info(struct seq_file *m, void *unused) 1294 { 1295 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1296 struct intel_engine_cs *engine; 1297 u64 acthd[I915_NUM_ENGINES]; 1298 u32 seqno[I915_NUM_ENGINES]; 1299 struct intel_instdone instdone; 1300 enum intel_engine_id id; 1301 1302 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) 1303 seq_puts(m, "Wedged\n"); 1304 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) 1305 seq_puts(m, "Reset in progress: struct_mutex backoff\n"); 1306 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags)) 1307 seq_puts(m, "Reset in progress: reset handoff to waiter\n"); 1308 if (waitqueue_active(&dev_priv->gpu_error.wait_queue)) 1309 seq_puts(m, "Waiter holding struct mutex\n"); 1310 if (waitqueue_active(&dev_priv->gpu_error.reset_queue)) 1311 seq_puts(m, "struct_mutex blocked for reset\n"); 1312 1313 if (!i915_modparams.enable_hangcheck) { 1314 seq_puts(m, "Hangcheck disabled\n"); 1315 return 0; 1316 } 1317 1318 intel_runtime_pm_get(dev_priv); 1319 1320 for_each_engine(engine, dev_priv, id) { 1321 acthd[id] = intel_engine_get_active_head(engine); 1322 seqno[id] = intel_engine_get_seqno(engine); 1323 } 1324 1325 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); 1326 1327 intel_runtime_pm_put(dev_priv); 1328 1329 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer)) 1330 seq_printf(m, "Hangcheck active, timer fires in %dms\n", 1331 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - 1332 jiffies)); 1333 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) 1334 seq_puts(m, "Hangcheck active, work pending\n"); 1335 else 1336 seq_puts(m, "Hangcheck inactive\n"); 1337 1338 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake)); 1339 1340 for_each_engine(engine, dev_priv, id) { 1341 struct intel_breadcrumbs *b = &engine->breadcrumbs; 1342 struct rb_node *rb; 1343 1344 seq_printf(m, "%s:\n", engine->name); 1345 seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n", 1346 engine->hangcheck.seqno, seqno[id], 1347 intel_engine_last_submit(engine), 1348 engine->timeline->inflight_seqnos); 1349 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n", 1350 yesno(intel_engine_has_waiter(engine)), 1351 yesno(test_bit(engine->id, 1352 &dev_priv->gpu_error.missed_irq_rings)), 1353 yesno(engine->hangcheck.stalled)); 1354 1355 spin_lock_irq(&b->rb_lock); 1356 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { 1357 struct intel_wait *w = rb_entry(rb, typeof(*w), node); 1358 1359 seq_printf(m, "\t%s [%d] waiting for %x\n", 1360 w->tsk->comm, w->tsk->pid, w->seqno); 1361 } 1362 spin_unlock_irq(&b->rb_lock); 1363 1364 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", 1365 (long long)engine->hangcheck.acthd, 1366 (long long)acthd[id]); 1367 seq_printf(m, "\taction = %s(%d) %d ms ago\n", 1368 hangcheck_action_to_str(engine->hangcheck.action), 1369 engine->hangcheck.action, 1370 jiffies_to_msecs(jiffies - 1371 engine->hangcheck.action_timestamp)); 1372 1373 if (engine->id == RCS) { 1374 seq_puts(m, "\tinstdone read =\n"); 1375 1376 i915_instdone_info(dev_priv, m, &instdone); 1377 1378 seq_puts(m, "\tinstdone accu =\n"); 1379 1380 i915_instdone_info(dev_priv, m, 1381 &engine->hangcheck.instdone); 1382 } 1383 } 1384 1385 return 0; 1386 } 1387 1388 static int i915_reset_info(struct seq_file *m, void *unused) 1389 { 1390 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1391 struct i915_gpu_error *error = &dev_priv->gpu_error; 1392 struct intel_engine_cs *engine; 1393 enum intel_engine_id id; 1394 1395 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error)); 1396 1397 for_each_engine(engine, dev_priv, id) { 1398 seq_printf(m, "%s = %u\n", engine->name, 1399 i915_reset_engine_count(error, engine)); 1400 } 1401 1402 return 0; 1403 } 1404 1405 static int ironlake_drpc_info(struct seq_file *m) 1406 { 1407 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1408 u32 rgvmodectl, rstdbyctl; 1409 u16 crstandvid; 1410 1411 rgvmodectl = I915_READ(MEMMODECTL); 1412 rstdbyctl = I915_READ(RSTDBYCTL); 1413 crstandvid = I915_READ16(CRSTANDVID); 1414 1415 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); 1416 seq_printf(m, "Boost freq: %d\n", 1417 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1418 MEMMODE_BOOST_FREQ_SHIFT); 1419 seq_printf(m, "HW control enabled: %s\n", 1420 yesno(rgvmodectl & MEMMODE_HWIDLE_EN)); 1421 seq_printf(m, "SW control enabled: %s\n", 1422 yesno(rgvmodectl & MEMMODE_SWMODE_EN)); 1423 seq_printf(m, "Gated voltage change: %s\n", 1424 yesno(rgvmodectl & MEMMODE_RCLK_GATE)); 1425 seq_printf(m, "Starting frequency: P%d\n", 1426 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1427 seq_printf(m, "Max P-state: P%d\n", 1428 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1429 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1430 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1431 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1432 seq_printf(m, "Render standby enabled: %s\n", 1433 yesno(!(rstdbyctl & RCX_SW_EXIT))); 1434 seq_puts(m, "Current RS state: "); 1435 switch (rstdbyctl & RSX_STATUS_MASK) { 1436 case RSX_STATUS_ON: 1437 seq_puts(m, "on\n"); 1438 break; 1439 case RSX_STATUS_RC1: 1440 seq_puts(m, "RC1\n"); 1441 break; 1442 case RSX_STATUS_RC1E: 1443 seq_puts(m, "RC1E\n"); 1444 break; 1445 case RSX_STATUS_RS1: 1446 seq_puts(m, "RS1\n"); 1447 break; 1448 case RSX_STATUS_RS2: 1449 seq_puts(m, "RS2 (RC6)\n"); 1450 break; 1451 case RSX_STATUS_RS3: 1452 seq_puts(m, "RC3 (RC6+)\n"); 1453 break; 1454 default: 1455 seq_puts(m, "unknown\n"); 1456 break; 1457 } 1458 1459 return 0; 1460 } 1461 1462 static int i915_forcewake_domains(struct seq_file *m, void *data) 1463 { 1464 struct drm_i915_private *i915 = node_to_i915(m->private); 1465 struct intel_uncore_forcewake_domain *fw_domain; 1466 unsigned int tmp; 1467 1468 seq_printf(m, "user.bypass_count = %u\n", 1469 i915->uncore.user_forcewake.count); 1470 1471 for_each_fw_domain(fw_domain, i915, tmp) 1472 seq_printf(m, "%s.wake_count = %u\n", 1473 intel_uncore_forcewake_domain_to_str(fw_domain->id), 1474 READ_ONCE(fw_domain->wake_count)); 1475 1476 return 0; 1477 } 1478 1479 static void print_rc6_res(struct seq_file *m, 1480 const char *title, 1481 const i915_reg_t reg) 1482 { 1483 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1484 1485 seq_printf(m, "%s %u (%llu us)\n", 1486 title, I915_READ(reg), 1487 intel_rc6_residency_us(dev_priv, reg)); 1488 } 1489 1490 static int vlv_drpc_info(struct seq_file *m) 1491 { 1492 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1493 u32 rcctl1, pw_status; 1494 1495 pw_status = I915_READ(VLV_GTLC_PW_STATUS); 1496 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1497 1498 seq_printf(m, "RC6 Enabled: %s\n", 1499 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1500 GEN6_RC_CTL_EI_MODE(1)))); 1501 seq_printf(m, "Render Power Well: %s\n", 1502 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1503 seq_printf(m, "Media Power Well: %s\n", 1504 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1505 1506 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6); 1507 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6); 1508 1509 return i915_forcewake_domains(m, NULL); 1510 } 1511 1512 static int gen6_drpc_info(struct seq_file *m) 1513 { 1514 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1515 u32 gt_core_status, rcctl1, rc6vids = 0; 1516 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0; 1517 unsigned forcewake_count; 1518 int count = 0; 1519 1520 forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count); 1521 if (forcewake_count) { 1522 seq_puts(m, "RC information inaccurate because somebody " 1523 "holds a forcewake reference \n"); 1524 } else { 1525 /* NB: we cannot use forcewake, else we read the wrong values */ 1526 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1527 udelay(10); 1528 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1529 } 1530 1531 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS); 1532 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1533 1534 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1535 if (INTEL_GEN(dev_priv) >= 9) { 1536 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE); 1537 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS); 1538 } 1539 1540 mutex_lock(&dev_priv->pcu_lock); 1541 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1542 mutex_unlock(&dev_priv->pcu_lock); 1543 1544 seq_printf(m, "RC1e Enabled: %s\n", 1545 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1546 seq_printf(m, "RC6 Enabled: %s\n", 1547 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1548 if (INTEL_GEN(dev_priv) >= 9) { 1549 seq_printf(m, "Render Well Gating Enabled: %s\n", 1550 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE)); 1551 seq_printf(m, "Media Well Gating Enabled: %s\n", 1552 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE)); 1553 } 1554 seq_printf(m, "Deep RC6 Enabled: %s\n", 1555 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1556 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1557 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1558 seq_puts(m, "Current RC state: "); 1559 switch (gt_core_status & GEN6_RCn_MASK) { 1560 case GEN6_RC0: 1561 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1562 seq_puts(m, "Core Power Down\n"); 1563 else 1564 seq_puts(m, "on\n"); 1565 break; 1566 case GEN6_RC3: 1567 seq_puts(m, "RC3\n"); 1568 break; 1569 case GEN6_RC6: 1570 seq_puts(m, "RC6\n"); 1571 break; 1572 case GEN6_RC7: 1573 seq_puts(m, "RC7\n"); 1574 break; 1575 default: 1576 seq_puts(m, "Unknown\n"); 1577 break; 1578 } 1579 1580 seq_printf(m, "Core Power Down: %s\n", 1581 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1582 if (INTEL_GEN(dev_priv) >= 9) { 1583 seq_printf(m, "Render Power Well: %s\n", 1584 (gen9_powergate_status & 1585 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down"); 1586 seq_printf(m, "Media Power Well: %s\n", 1587 (gen9_powergate_status & 1588 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1589 } 1590 1591 /* Not exactly sure what this is */ 1592 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:", 1593 GEN6_GT_GFX_RC6_LOCKED); 1594 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6); 1595 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p); 1596 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp); 1597 1598 seq_printf(m, "RC6 voltage: %dmV\n", 1599 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1600 seq_printf(m, "RC6+ voltage: %dmV\n", 1601 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1602 seq_printf(m, "RC6++ voltage: %dmV\n", 1603 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1604 return i915_forcewake_domains(m, NULL); 1605 } 1606 1607 static int i915_drpc_info(struct seq_file *m, void *unused) 1608 { 1609 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1610 int err; 1611 1612 intel_runtime_pm_get(dev_priv); 1613 1614 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1615 err = vlv_drpc_info(m); 1616 else if (INTEL_GEN(dev_priv) >= 6) 1617 err = gen6_drpc_info(m); 1618 else 1619 err = ironlake_drpc_info(m); 1620 1621 intel_runtime_pm_put(dev_priv); 1622 1623 return err; 1624 } 1625 1626 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) 1627 { 1628 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1629 1630 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 1631 dev_priv->fb_tracking.busy_bits); 1632 1633 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 1634 dev_priv->fb_tracking.flip_bits); 1635 1636 return 0; 1637 } 1638 1639 static int i915_fbc_status(struct seq_file *m, void *unused) 1640 { 1641 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1642 1643 if (!HAS_FBC(dev_priv)) { 1644 seq_puts(m, "FBC unsupported on this chipset\n"); 1645 return 0; 1646 } 1647 1648 intel_runtime_pm_get(dev_priv); 1649 mutex_lock(&dev_priv->fbc.lock); 1650 1651 if (intel_fbc_is_active(dev_priv)) 1652 seq_puts(m, "FBC enabled\n"); 1653 else 1654 seq_printf(m, "FBC disabled: %s\n", 1655 dev_priv->fbc.no_fbc_reason); 1656 1657 if (intel_fbc_is_active(dev_priv)) { 1658 u32 mask; 1659 1660 if (INTEL_GEN(dev_priv) >= 8) 1661 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK; 1662 else if (INTEL_GEN(dev_priv) >= 7) 1663 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK; 1664 else if (INTEL_GEN(dev_priv) >= 5) 1665 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK; 1666 else if (IS_G4X(dev_priv)) 1667 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK; 1668 else 1669 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING | 1670 FBC_STAT_COMPRESSED); 1671 1672 seq_printf(m, "Compressing: %s\n", yesno(mask)); 1673 } 1674 1675 mutex_unlock(&dev_priv->fbc.lock); 1676 intel_runtime_pm_put(dev_priv); 1677 1678 return 0; 1679 } 1680 1681 static int i915_fbc_false_color_get(void *data, u64 *val) 1682 { 1683 struct drm_i915_private *dev_priv = data; 1684 1685 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 1686 return -ENODEV; 1687 1688 *val = dev_priv->fbc.false_color; 1689 1690 return 0; 1691 } 1692 1693 static int i915_fbc_false_color_set(void *data, u64 val) 1694 { 1695 struct drm_i915_private *dev_priv = data; 1696 u32 reg; 1697 1698 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 1699 return -ENODEV; 1700 1701 mutex_lock(&dev_priv->fbc.lock); 1702 1703 reg = I915_READ(ILK_DPFC_CONTROL); 1704 dev_priv->fbc.false_color = val; 1705 1706 I915_WRITE(ILK_DPFC_CONTROL, val ? 1707 (reg | FBC_CTL_FALSE_COLOR) : 1708 (reg & ~FBC_CTL_FALSE_COLOR)); 1709 1710 mutex_unlock(&dev_priv->fbc.lock); 1711 return 0; 1712 } 1713 1714 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, 1715 i915_fbc_false_color_get, i915_fbc_false_color_set, 1716 "%llu\n"); 1717 1718 static int i915_ips_status(struct seq_file *m, void *unused) 1719 { 1720 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1721 1722 if (!HAS_IPS(dev_priv)) { 1723 seq_puts(m, "not supported\n"); 1724 return 0; 1725 } 1726 1727 intel_runtime_pm_get(dev_priv); 1728 1729 seq_printf(m, "Enabled by kernel parameter: %s\n", 1730 yesno(i915_modparams.enable_ips)); 1731 1732 if (INTEL_GEN(dev_priv) >= 8) { 1733 seq_puts(m, "Currently: unknown\n"); 1734 } else { 1735 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1736 seq_puts(m, "Currently: enabled\n"); 1737 else 1738 seq_puts(m, "Currently: disabled\n"); 1739 } 1740 1741 intel_runtime_pm_put(dev_priv); 1742 1743 return 0; 1744 } 1745 1746 static int i915_sr_status(struct seq_file *m, void *unused) 1747 { 1748 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1749 bool sr_enabled = false; 1750 1751 intel_runtime_pm_get(dev_priv); 1752 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 1753 1754 if (INTEL_GEN(dev_priv) >= 9) 1755 /* no global SR status; inspect per-plane WM */; 1756 else if (HAS_PCH_SPLIT(dev_priv)) 1757 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1758 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || 1759 IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 1760 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1761 else if (IS_I915GM(dev_priv)) 1762 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1763 else if (IS_PINEVIEW(dev_priv)) 1764 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1765 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1766 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 1767 1768 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 1769 intel_runtime_pm_put(dev_priv); 1770 1771 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); 1772 1773 return 0; 1774 } 1775 1776 static int i915_emon_status(struct seq_file *m, void *unused) 1777 { 1778 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1779 struct drm_device *dev = &dev_priv->drm; 1780 unsigned long temp, chipset, gfx; 1781 int ret; 1782 1783 if (!IS_GEN5(dev_priv)) 1784 return -ENODEV; 1785 1786 ret = mutex_lock_interruptible(&dev->struct_mutex); 1787 if (ret) 1788 return ret; 1789 1790 temp = i915_mch_val(dev_priv); 1791 chipset = i915_chipset_val(dev_priv); 1792 gfx = i915_gfx_val(dev_priv); 1793 mutex_unlock(&dev->struct_mutex); 1794 1795 seq_printf(m, "GMCH temp: %ld\n", temp); 1796 seq_printf(m, "Chipset power: %ld\n", chipset); 1797 seq_printf(m, "GFX power: %ld\n", gfx); 1798 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1799 1800 return 0; 1801 } 1802 1803 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1804 { 1805 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1806 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1807 int ret = 0; 1808 int gpu_freq, ia_freq; 1809 unsigned int max_gpu_freq, min_gpu_freq; 1810 1811 if (!HAS_LLC(dev_priv)) { 1812 seq_puts(m, "unsupported on this chipset\n"); 1813 return 0; 1814 } 1815 1816 intel_runtime_pm_get(dev_priv); 1817 1818 ret = mutex_lock_interruptible(&dev_priv->pcu_lock); 1819 if (ret) 1820 goto out; 1821 1822 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { 1823 /* Convert GT frequency to 50 HZ units */ 1824 min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER; 1825 max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER; 1826 } else { 1827 min_gpu_freq = rps->min_freq_softlimit; 1828 max_gpu_freq = rps->max_freq_softlimit; 1829 } 1830 1831 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1832 1833 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { 1834 ia_freq = gpu_freq; 1835 sandybridge_pcode_read(dev_priv, 1836 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1837 &ia_freq); 1838 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1839 intel_gpu_freq(dev_priv, (gpu_freq * 1840 (IS_GEN9_BC(dev_priv) || 1841 IS_CANNONLAKE(dev_priv) ? 1842 GEN9_FREQ_SCALER : 1))), 1843 ((ia_freq >> 0) & 0xff) * 100, 1844 ((ia_freq >> 8) & 0xff) * 100); 1845 } 1846 1847 mutex_unlock(&dev_priv->pcu_lock); 1848 1849 out: 1850 intel_runtime_pm_put(dev_priv); 1851 return ret; 1852 } 1853 1854 static int i915_opregion(struct seq_file *m, void *unused) 1855 { 1856 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1857 struct drm_device *dev = &dev_priv->drm; 1858 struct intel_opregion *opregion = &dev_priv->opregion; 1859 int ret; 1860 1861 ret = mutex_lock_interruptible(&dev->struct_mutex); 1862 if (ret) 1863 goto out; 1864 1865 if (opregion->header) 1866 seq_write(m, opregion->header, OPREGION_SIZE); 1867 1868 mutex_unlock(&dev->struct_mutex); 1869 1870 out: 1871 return 0; 1872 } 1873 1874 static int i915_vbt(struct seq_file *m, void *unused) 1875 { 1876 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; 1877 1878 if (opregion->vbt) 1879 seq_write(m, opregion->vbt, opregion->vbt_size); 1880 1881 return 0; 1882 } 1883 1884 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1885 { 1886 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1887 struct drm_device *dev = &dev_priv->drm; 1888 struct intel_framebuffer *fbdev_fb = NULL; 1889 struct drm_framebuffer *drm_fb; 1890 int ret; 1891 1892 ret = mutex_lock_interruptible(&dev->struct_mutex); 1893 if (ret) 1894 return ret; 1895 1896 #ifdef CONFIG_DRM_FBDEV_EMULATION 1897 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) { 1898 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb); 1899 1900 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1901 fbdev_fb->base.width, 1902 fbdev_fb->base.height, 1903 fbdev_fb->base.format->depth, 1904 fbdev_fb->base.format->cpp[0] * 8, 1905 fbdev_fb->base.modifier, 1906 drm_framebuffer_read_refcount(&fbdev_fb->base)); 1907 describe_obj(m, fbdev_fb->obj); 1908 seq_putc(m, '\n'); 1909 } 1910 #endif 1911 1912 mutex_lock(&dev->mode_config.fb_lock); 1913 drm_for_each_fb(drm_fb, dev) { 1914 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 1915 if (fb == fbdev_fb) 1916 continue; 1917 1918 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1919 fb->base.width, 1920 fb->base.height, 1921 fb->base.format->depth, 1922 fb->base.format->cpp[0] * 8, 1923 fb->base.modifier, 1924 drm_framebuffer_read_refcount(&fb->base)); 1925 describe_obj(m, fb->obj); 1926 seq_putc(m, '\n'); 1927 } 1928 mutex_unlock(&dev->mode_config.fb_lock); 1929 mutex_unlock(&dev->struct_mutex); 1930 1931 return 0; 1932 } 1933 1934 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) 1935 { 1936 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)", 1937 ring->space, ring->head, ring->tail); 1938 } 1939 1940 static int i915_context_status(struct seq_file *m, void *unused) 1941 { 1942 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1943 struct drm_device *dev = &dev_priv->drm; 1944 struct intel_engine_cs *engine; 1945 struct i915_gem_context *ctx; 1946 enum intel_engine_id id; 1947 int ret; 1948 1949 ret = mutex_lock_interruptible(&dev->struct_mutex); 1950 if (ret) 1951 return ret; 1952 1953 list_for_each_entry(ctx, &dev_priv->contexts.list, link) { 1954 seq_printf(m, "HW context %u ", ctx->hw_id); 1955 if (ctx->pid) { 1956 struct task_struct *task; 1957 1958 task = get_pid_task(ctx->pid, PIDTYPE_PID); 1959 if (task) { 1960 seq_printf(m, "(%s [%d]) ", 1961 task->comm, task->pid); 1962 put_task_struct(task); 1963 } 1964 } else if (IS_ERR(ctx->file_priv)) { 1965 seq_puts(m, "(deleted) "); 1966 } else { 1967 seq_puts(m, "(kernel) "); 1968 } 1969 1970 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 1971 seq_putc(m, '\n'); 1972 1973 for_each_engine(engine, dev_priv, id) { 1974 struct intel_context *ce = &ctx->engine[engine->id]; 1975 1976 seq_printf(m, "%s: ", engine->name); 1977 seq_putc(m, ce->initialised ? 'I' : 'i'); 1978 if (ce->state) 1979 describe_obj(m, ce->state->obj); 1980 if (ce->ring) 1981 describe_ctx_ring(m, ce->ring); 1982 seq_putc(m, '\n'); 1983 } 1984 1985 seq_putc(m, '\n'); 1986 } 1987 1988 mutex_unlock(&dev->struct_mutex); 1989 1990 return 0; 1991 } 1992 1993 static void i915_dump_lrc_obj(struct seq_file *m, 1994 struct i915_gem_context *ctx, 1995 struct intel_engine_cs *engine) 1996 { 1997 struct i915_vma *vma = ctx->engine[engine->id].state; 1998 struct page *page; 1999 int j; 2000 2001 seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id); 2002 2003 if (!vma) { 2004 seq_puts(m, "\tFake context\n"); 2005 return; 2006 } 2007 2008 if (vma->flags & I915_VMA_GLOBAL_BIND) 2009 seq_printf(m, "\tBound in GGTT at 0x%08x\n", 2010 i915_ggtt_offset(vma)); 2011 2012 if (i915_gem_object_pin_pages(vma->obj)) { 2013 seq_puts(m, "\tFailed to get pages for context object\n\n"); 2014 return; 2015 } 2016 2017 page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN); 2018 if (page) { 2019 u32 *reg_state = kmap_atomic(page); 2020 2021 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { 2022 seq_printf(m, 2023 "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n", 2024 j * 4, 2025 reg_state[j], reg_state[j + 1], 2026 reg_state[j + 2], reg_state[j + 3]); 2027 } 2028 kunmap_atomic(reg_state); 2029 } 2030 2031 i915_gem_object_unpin_pages(vma->obj); 2032 seq_putc(m, '\n'); 2033 } 2034 2035 static int i915_dump_lrc(struct seq_file *m, void *unused) 2036 { 2037 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2038 struct drm_device *dev = &dev_priv->drm; 2039 struct intel_engine_cs *engine; 2040 struct i915_gem_context *ctx; 2041 enum intel_engine_id id; 2042 int ret; 2043 2044 if (!i915_modparams.enable_execlists) { 2045 seq_printf(m, "Logical Ring Contexts are disabled\n"); 2046 return 0; 2047 } 2048 2049 ret = mutex_lock_interruptible(&dev->struct_mutex); 2050 if (ret) 2051 return ret; 2052 2053 list_for_each_entry(ctx, &dev_priv->contexts.list, link) 2054 for_each_engine(engine, dev_priv, id) 2055 i915_dump_lrc_obj(m, ctx, engine); 2056 2057 mutex_unlock(&dev->struct_mutex); 2058 2059 return 0; 2060 } 2061 2062 static const char *swizzle_string(unsigned swizzle) 2063 { 2064 switch (swizzle) { 2065 case I915_BIT_6_SWIZZLE_NONE: 2066 return "none"; 2067 case I915_BIT_6_SWIZZLE_9: 2068 return "bit9"; 2069 case I915_BIT_6_SWIZZLE_9_10: 2070 return "bit9/bit10"; 2071 case I915_BIT_6_SWIZZLE_9_11: 2072 return "bit9/bit11"; 2073 case I915_BIT_6_SWIZZLE_9_10_11: 2074 return "bit9/bit10/bit11"; 2075 case I915_BIT_6_SWIZZLE_9_17: 2076 return "bit9/bit17"; 2077 case I915_BIT_6_SWIZZLE_9_10_17: 2078 return "bit9/bit10/bit17"; 2079 case I915_BIT_6_SWIZZLE_UNKNOWN: 2080 return "unknown"; 2081 } 2082 2083 return "bug"; 2084 } 2085 2086 static int i915_swizzle_info(struct seq_file *m, void *data) 2087 { 2088 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2089 2090 intel_runtime_pm_get(dev_priv); 2091 2092 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 2093 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 2094 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 2095 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 2096 2097 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) { 2098 seq_printf(m, "DDC = 0x%08x\n", 2099 I915_READ(DCC)); 2100 seq_printf(m, "DDC2 = 0x%08x\n", 2101 I915_READ(DCC2)); 2102 seq_printf(m, "C0DRB3 = 0x%04x\n", 2103 I915_READ16(C0DRB3)); 2104 seq_printf(m, "C1DRB3 = 0x%04x\n", 2105 I915_READ16(C1DRB3)); 2106 } else if (INTEL_GEN(dev_priv) >= 6) { 2107 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 2108 I915_READ(MAD_DIMM_C0)); 2109 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 2110 I915_READ(MAD_DIMM_C1)); 2111 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 2112 I915_READ(MAD_DIMM_C2)); 2113 seq_printf(m, "TILECTL = 0x%08x\n", 2114 I915_READ(TILECTL)); 2115 if (INTEL_GEN(dev_priv) >= 8) 2116 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 2117 I915_READ(GAMTARBMODE)); 2118 else 2119 seq_printf(m, "ARB_MODE = 0x%08x\n", 2120 I915_READ(ARB_MODE)); 2121 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 2122 I915_READ(DISP_ARB_CTL)); 2123 } 2124 2125 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 2126 seq_puts(m, "L-shaped memory detected\n"); 2127 2128 intel_runtime_pm_put(dev_priv); 2129 2130 return 0; 2131 } 2132 2133 static int per_file_ctx(int id, void *ptr, void *data) 2134 { 2135 struct i915_gem_context *ctx = ptr; 2136 struct seq_file *m = data; 2137 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2138 2139 if (!ppgtt) { 2140 seq_printf(m, " no ppgtt for context %d\n", 2141 ctx->user_handle); 2142 return 0; 2143 } 2144 2145 if (i915_gem_context_is_default(ctx)) 2146 seq_puts(m, " default context:\n"); 2147 else 2148 seq_printf(m, " context %d:\n", ctx->user_handle); 2149 ppgtt->debug_dump(ppgtt, m); 2150 2151 return 0; 2152 } 2153 2154 static void gen8_ppgtt_info(struct seq_file *m, 2155 struct drm_i915_private *dev_priv) 2156 { 2157 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2158 struct intel_engine_cs *engine; 2159 enum intel_engine_id id; 2160 int i; 2161 2162 if (!ppgtt) 2163 return; 2164 2165 for_each_engine(engine, dev_priv, id) { 2166 seq_printf(m, "%s\n", engine->name); 2167 for (i = 0; i < 4; i++) { 2168 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i)); 2169 pdp <<= 32; 2170 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i)); 2171 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 2172 } 2173 } 2174 } 2175 2176 static void gen6_ppgtt_info(struct seq_file *m, 2177 struct drm_i915_private *dev_priv) 2178 { 2179 struct intel_engine_cs *engine; 2180 enum intel_engine_id id; 2181 2182 if (IS_GEN6(dev_priv)) 2183 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2184 2185 for_each_engine(engine, dev_priv, id) { 2186 seq_printf(m, "%s\n", engine->name); 2187 if (IS_GEN7(dev_priv)) 2188 seq_printf(m, "GFX_MODE: 0x%08x\n", 2189 I915_READ(RING_MODE_GEN7(engine))); 2190 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", 2191 I915_READ(RING_PP_DIR_BASE(engine))); 2192 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", 2193 I915_READ(RING_PP_DIR_BASE_READ(engine))); 2194 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", 2195 I915_READ(RING_PP_DIR_DCLV(engine))); 2196 } 2197 if (dev_priv->mm.aliasing_ppgtt) { 2198 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2199 2200 seq_puts(m, "aliasing PPGTT:\n"); 2201 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset); 2202 2203 ppgtt->debug_dump(ppgtt, m); 2204 } 2205 2206 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 2207 } 2208 2209 static int i915_ppgtt_info(struct seq_file *m, void *data) 2210 { 2211 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2212 struct drm_device *dev = &dev_priv->drm; 2213 struct drm_file *file; 2214 int ret; 2215 2216 mutex_lock(&dev->filelist_mutex); 2217 ret = mutex_lock_interruptible(&dev->struct_mutex); 2218 if (ret) 2219 goto out_unlock; 2220 2221 intel_runtime_pm_get(dev_priv); 2222 2223 if (INTEL_GEN(dev_priv) >= 8) 2224 gen8_ppgtt_info(m, dev_priv); 2225 else if (INTEL_GEN(dev_priv) >= 6) 2226 gen6_ppgtt_info(m, dev_priv); 2227 2228 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2229 struct drm_i915_file_private *file_priv = file->driver_priv; 2230 struct task_struct *task; 2231 2232 task = get_pid_task(file->pid, PIDTYPE_PID); 2233 if (!task) { 2234 ret = -ESRCH; 2235 goto out_rpm; 2236 } 2237 seq_printf(m, "\nproc: %s\n", task->comm); 2238 put_task_struct(task); 2239 idr_for_each(&file_priv->context_idr, per_file_ctx, 2240 (void *)(unsigned long)m); 2241 } 2242 2243 out_rpm: 2244 intel_runtime_pm_put(dev_priv); 2245 mutex_unlock(&dev->struct_mutex); 2246 out_unlock: 2247 mutex_unlock(&dev->filelist_mutex); 2248 return ret; 2249 } 2250 2251 static int count_irq_waiters(struct drm_i915_private *i915) 2252 { 2253 struct intel_engine_cs *engine; 2254 enum intel_engine_id id; 2255 int count = 0; 2256 2257 for_each_engine(engine, i915, id) 2258 count += intel_engine_has_waiter(engine); 2259 2260 return count; 2261 } 2262 2263 static const char *rps_power_to_str(unsigned int power) 2264 { 2265 static const char * const strings[] = { 2266 [LOW_POWER] = "low power", 2267 [BETWEEN] = "mixed", 2268 [HIGH_POWER] = "high power", 2269 }; 2270 2271 if (power >= ARRAY_SIZE(strings) || !strings[power]) 2272 return "unknown"; 2273 2274 return strings[power]; 2275 } 2276 2277 static int i915_rps_boost_info(struct seq_file *m, void *data) 2278 { 2279 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2280 struct drm_device *dev = &dev_priv->drm; 2281 struct intel_rps *rps = &dev_priv->gt_pm.rps; 2282 struct drm_file *file; 2283 2284 seq_printf(m, "RPS enabled? %d\n", rps->enabled); 2285 seq_printf(m, "GPU busy? %s [%d requests]\n", 2286 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); 2287 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); 2288 seq_printf(m, "Boosts outstanding? %d\n", 2289 atomic_read(&rps->num_waiters)); 2290 seq_printf(m, "Frequency requested %d\n", 2291 intel_gpu_freq(dev_priv, rps->cur_freq)); 2292 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", 2293 intel_gpu_freq(dev_priv, rps->min_freq), 2294 intel_gpu_freq(dev_priv, rps->min_freq_softlimit), 2295 intel_gpu_freq(dev_priv, rps->max_freq_softlimit), 2296 intel_gpu_freq(dev_priv, rps->max_freq)); 2297 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n", 2298 intel_gpu_freq(dev_priv, rps->idle_freq), 2299 intel_gpu_freq(dev_priv, rps->efficient_freq), 2300 intel_gpu_freq(dev_priv, rps->boost_freq)); 2301 2302 mutex_lock(&dev->filelist_mutex); 2303 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2304 struct drm_i915_file_private *file_priv = file->driver_priv; 2305 struct task_struct *task; 2306 2307 rcu_read_lock(); 2308 task = pid_task(file->pid, PIDTYPE_PID); 2309 seq_printf(m, "%s [%d]: %d boosts\n", 2310 task ? task->comm : "<unknown>", 2311 task ? task->pid : -1, 2312 atomic_read(&file_priv->rps_client.boosts)); 2313 rcu_read_unlock(); 2314 } 2315 seq_printf(m, "Kernel (anonymous) boosts: %d\n", 2316 atomic_read(&rps->boosts)); 2317 mutex_unlock(&dev->filelist_mutex); 2318 2319 if (INTEL_GEN(dev_priv) >= 6 && 2320 rps->enabled && 2321 dev_priv->gt.active_requests) { 2322 u32 rpup, rpupei; 2323 u32 rpdown, rpdownei; 2324 2325 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 2326 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK; 2327 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK; 2328 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK; 2329 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK; 2330 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 2331 2332 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", 2333 rps_power_to_str(rps->power)); 2334 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n", 2335 rpup && rpupei ? 100 * rpup / rpupei : 0, 2336 rps->up_threshold); 2337 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n", 2338 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0, 2339 rps->down_threshold); 2340 } else { 2341 seq_puts(m, "\nRPS Autotuning inactive\n"); 2342 } 2343 2344 return 0; 2345 } 2346 2347 static int i915_llc(struct seq_file *m, void *data) 2348 { 2349 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2350 const bool edram = INTEL_GEN(dev_priv) > 8; 2351 2352 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv))); 2353 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC", 2354 intel_uncore_edram_size(dev_priv)/1024/1024); 2355 2356 return 0; 2357 } 2358 2359 static int i915_huc_load_status_info(struct seq_file *m, void *data) 2360 { 2361 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2362 struct drm_printer p; 2363 2364 if (!HAS_HUC_UCODE(dev_priv)) 2365 return 0; 2366 2367 p = drm_seq_file_printer(m); 2368 intel_uc_fw_dump(&dev_priv->huc.fw, &p); 2369 2370 intel_runtime_pm_get(dev_priv); 2371 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); 2372 intel_runtime_pm_put(dev_priv); 2373 2374 return 0; 2375 } 2376 2377 static int i915_guc_load_status_info(struct seq_file *m, void *data) 2378 { 2379 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2380 struct drm_printer p; 2381 u32 tmp, i; 2382 2383 if (!HAS_GUC_UCODE(dev_priv)) 2384 return 0; 2385 2386 p = drm_seq_file_printer(m); 2387 intel_uc_fw_dump(&dev_priv->guc.fw, &p); 2388 2389 intel_runtime_pm_get(dev_priv); 2390 2391 tmp = I915_READ(GUC_STATUS); 2392 2393 seq_printf(m, "\nGuC status 0x%08x:\n", tmp); 2394 seq_printf(m, "\tBootrom status = 0x%x\n", 2395 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); 2396 seq_printf(m, "\tuKernel status = 0x%x\n", 2397 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); 2398 seq_printf(m, "\tMIA Core status = 0x%x\n", 2399 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); 2400 seq_puts(m, "\nScratch registers:\n"); 2401 for (i = 0; i < 16; i++) 2402 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i))); 2403 2404 intel_runtime_pm_put(dev_priv); 2405 2406 return 0; 2407 } 2408 2409 static void i915_guc_log_info(struct seq_file *m, 2410 struct drm_i915_private *dev_priv) 2411 { 2412 struct intel_guc *guc = &dev_priv->guc; 2413 2414 seq_puts(m, "\nGuC logging stats:\n"); 2415 2416 seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n", 2417 guc->log.flush_count[GUC_ISR_LOG_BUFFER], 2418 guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]); 2419 2420 seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n", 2421 guc->log.flush_count[GUC_DPC_LOG_BUFFER], 2422 guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]); 2423 2424 seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n", 2425 guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER], 2426 guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]); 2427 2428 seq_printf(m, "\tTotal flush interrupt count: %u\n", 2429 guc->log.flush_interrupt_count); 2430 2431 seq_printf(m, "\tCapture miss count: %u\n", 2432 guc->log.capture_miss_count); 2433 } 2434 2435 static void i915_guc_client_info(struct seq_file *m, 2436 struct drm_i915_private *dev_priv, 2437 struct i915_guc_client *client) 2438 { 2439 struct intel_engine_cs *engine; 2440 enum intel_engine_id id; 2441 uint64_t tot = 0; 2442 2443 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n", 2444 client->priority, client->stage_id, client->proc_desc_offset); 2445 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n", 2446 client->doorbell_id, client->doorbell_offset); 2447 2448 for_each_engine(engine, dev_priv, id) { 2449 u64 submissions = client->submissions[id]; 2450 tot += submissions; 2451 seq_printf(m, "\tSubmissions: %llu %s\n", 2452 submissions, engine->name); 2453 } 2454 seq_printf(m, "\tTotal: %llu\n", tot); 2455 } 2456 2457 static bool check_guc_submission(struct seq_file *m) 2458 { 2459 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2460 const struct intel_guc *guc = &dev_priv->guc; 2461 2462 if (!guc->execbuf_client) { 2463 seq_printf(m, "GuC submission %s\n", 2464 HAS_GUC_SCHED(dev_priv) ? 2465 "disabled" : 2466 "not supported"); 2467 return false; 2468 } 2469 2470 return true; 2471 } 2472 2473 static int i915_guc_info(struct seq_file *m, void *data) 2474 { 2475 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2476 const struct intel_guc *guc = &dev_priv->guc; 2477 2478 if (!check_guc_submission(m)) 2479 return 0; 2480 2481 seq_printf(m, "Doorbell map:\n"); 2482 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap); 2483 seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline); 2484 2485 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client); 2486 i915_guc_client_info(m, dev_priv, guc->execbuf_client); 2487 2488 i915_guc_log_info(m, dev_priv); 2489 2490 /* Add more as required ... */ 2491 2492 return 0; 2493 } 2494 2495 static int i915_guc_stage_pool(struct seq_file *m, void *data) 2496 { 2497 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2498 const struct intel_guc *guc = &dev_priv->guc; 2499 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr; 2500 struct i915_guc_client *client = guc->execbuf_client; 2501 unsigned int tmp; 2502 int index; 2503 2504 if (!check_guc_submission(m)) 2505 return 0; 2506 2507 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) { 2508 struct intel_engine_cs *engine; 2509 2510 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE)) 2511 continue; 2512 2513 seq_printf(m, "GuC stage descriptor %u:\n", index); 2514 seq_printf(m, "\tIndex: %u\n", desc->stage_id); 2515 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute); 2516 seq_printf(m, "\tPriority: %d\n", desc->priority); 2517 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id); 2518 seq_printf(m, "\tEngines used: 0x%x\n", 2519 desc->engines_used); 2520 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n", 2521 desc->db_trigger_phy, 2522 desc->db_trigger_cpu, 2523 desc->db_trigger_uk); 2524 seq_printf(m, "\tProcess descriptor: 0x%x\n", 2525 desc->process_desc); 2526 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n", 2527 desc->wq_addr, desc->wq_size); 2528 seq_putc(m, '\n'); 2529 2530 for_each_engine_masked(engine, dev_priv, client->engines, tmp) { 2531 u32 guc_engine_id = engine->guc_id; 2532 struct guc_execlist_context *lrc = 2533 &desc->lrc[guc_engine_id]; 2534 2535 seq_printf(m, "\t%s LRC:\n", engine->name); 2536 seq_printf(m, "\t\tContext desc: 0x%x\n", 2537 lrc->context_desc); 2538 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id); 2539 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca); 2540 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin); 2541 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end); 2542 seq_putc(m, '\n'); 2543 } 2544 } 2545 2546 return 0; 2547 } 2548 2549 static int i915_guc_log_dump(struct seq_file *m, void *data) 2550 { 2551 struct drm_info_node *node = m->private; 2552 struct drm_i915_private *dev_priv = node_to_i915(node); 2553 bool dump_load_err = !!node->info_ent->data; 2554 struct drm_i915_gem_object *obj = NULL; 2555 u32 *log; 2556 int i = 0; 2557 2558 if (dump_load_err) 2559 obj = dev_priv->guc.load_err_log; 2560 else if (dev_priv->guc.log.vma) 2561 obj = dev_priv->guc.log.vma->obj; 2562 2563 if (!obj) 2564 return 0; 2565 2566 log = i915_gem_object_pin_map(obj, I915_MAP_WC); 2567 if (IS_ERR(log)) { 2568 DRM_DEBUG("Failed to pin object\n"); 2569 seq_puts(m, "(log data unaccessible)\n"); 2570 return PTR_ERR(log); 2571 } 2572 2573 for (i = 0; i < obj->base.size / sizeof(u32); i += 4) 2574 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n", 2575 *(log + i), *(log + i + 1), 2576 *(log + i + 2), *(log + i + 3)); 2577 2578 seq_putc(m, '\n'); 2579 2580 i915_gem_object_unpin_map(obj); 2581 2582 return 0; 2583 } 2584 2585 static int i915_guc_log_control_get(void *data, u64 *val) 2586 { 2587 struct drm_i915_private *dev_priv = data; 2588 2589 if (!dev_priv->guc.log.vma) 2590 return -EINVAL; 2591 2592 *val = i915_modparams.guc_log_level; 2593 2594 return 0; 2595 } 2596 2597 static int i915_guc_log_control_set(void *data, u64 val) 2598 { 2599 struct drm_i915_private *dev_priv = data; 2600 int ret; 2601 2602 if (!dev_priv->guc.log.vma) 2603 return -EINVAL; 2604 2605 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); 2606 if (ret) 2607 return ret; 2608 2609 intel_runtime_pm_get(dev_priv); 2610 ret = i915_guc_log_control(dev_priv, val); 2611 intel_runtime_pm_put(dev_priv); 2612 2613 mutex_unlock(&dev_priv->drm.struct_mutex); 2614 return ret; 2615 } 2616 2617 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops, 2618 i915_guc_log_control_get, i915_guc_log_control_set, 2619 "%lld\n"); 2620 2621 static const char *psr2_live_status(u32 val) 2622 { 2623 static const char * const live_status[] = { 2624 "IDLE", 2625 "CAPTURE", 2626 "CAPTURE_FS", 2627 "SLEEP", 2628 "BUFON_FW", 2629 "ML_UP", 2630 "SU_STANDBY", 2631 "FAST_SLEEP", 2632 "DEEP_SLEEP", 2633 "BUF_ON", 2634 "TG_ON" 2635 }; 2636 2637 val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT; 2638 if (val < ARRAY_SIZE(live_status)) 2639 return live_status[val]; 2640 2641 return "unknown"; 2642 } 2643 2644 static int i915_edp_psr_status(struct seq_file *m, void *data) 2645 { 2646 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2647 u32 psrperf = 0; 2648 u32 stat[3]; 2649 enum pipe pipe; 2650 bool enabled = false; 2651 2652 if (!HAS_PSR(dev_priv)) { 2653 seq_puts(m, "PSR not supported\n"); 2654 return 0; 2655 } 2656 2657 intel_runtime_pm_get(dev_priv); 2658 2659 mutex_lock(&dev_priv->psr.lock); 2660 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 2661 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 2662 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); 2663 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); 2664 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", 2665 dev_priv->psr.busy_frontbuffer_bits); 2666 seq_printf(m, "Re-enable work scheduled: %s\n", 2667 yesno(work_busy(&dev_priv->psr.work.work))); 2668 2669 if (HAS_DDI(dev_priv)) { 2670 if (dev_priv->psr.psr2_support) 2671 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; 2672 else 2673 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 2674 } else { 2675 for_each_pipe(dev_priv, pipe) { 2676 enum transcoder cpu_transcoder = 2677 intel_pipe_to_cpu_transcoder(dev_priv, pipe); 2678 enum intel_display_power_domain power_domain; 2679 2680 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 2681 if (!intel_display_power_get_if_enabled(dev_priv, 2682 power_domain)) 2683 continue; 2684 2685 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & 2686 VLV_EDP_PSR_CURR_STATE_MASK; 2687 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2688 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2689 enabled = true; 2690 2691 intel_display_power_put(dev_priv, power_domain); 2692 } 2693 } 2694 2695 seq_printf(m, "Main link in standby mode: %s\n", 2696 yesno(dev_priv->psr.link_standby)); 2697 2698 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); 2699 2700 if (!HAS_DDI(dev_priv)) 2701 for_each_pipe(dev_priv, pipe) { 2702 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2703 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2704 seq_printf(m, " pipe %c", pipe_name(pipe)); 2705 } 2706 seq_puts(m, "\n"); 2707 2708 /* 2709 * VLV/CHV PSR has no kind of performance counter 2710 * SKL+ Perf counter is reset to 0 everytime DC state is entered 2711 */ 2712 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2713 psrperf = I915_READ(EDP_PSR_PERF_CNT) & 2714 EDP_PSR_PERF_CNT_MASK; 2715 2716 seq_printf(m, "Performance_Counter: %u\n", psrperf); 2717 } 2718 if (dev_priv->psr.psr2_support) { 2719 u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL); 2720 2721 seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n", 2722 psr2, psr2_live_status(psr2)); 2723 } 2724 mutex_unlock(&dev_priv->psr.lock); 2725 2726 intel_runtime_pm_put(dev_priv); 2727 return 0; 2728 } 2729 2730 static int i915_sink_crc(struct seq_file *m, void *data) 2731 { 2732 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2733 struct drm_device *dev = &dev_priv->drm; 2734 struct intel_connector *connector; 2735 struct drm_connector_list_iter conn_iter; 2736 struct intel_dp *intel_dp = NULL; 2737 int ret; 2738 u8 crc[6]; 2739 2740 drm_modeset_lock_all(dev); 2741 drm_connector_list_iter_begin(dev, &conn_iter); 2742 for_each_intel_connector_iter(connector, &conn_iter) { 2743 struct drm_crtc *crtc; 2744 2745 if (!connector->base.state->best_encoder) 2746 continue; 2747 2748 crtc = connector->base.state->crtc; 2749 if (!crtc->state->active) 2750 continue; 2751 2752 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) 2753 continue; 2754 2755 intel_dp = enc_to_intel_dp(connector->base.state->best_encoder); 2756 2757 ret = intel_dp_sink_crc(intel_dp, crc); 2758 if (ret) 2759 goto out; 2760 2761 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 2762 crc[0], crc[1], crc[2], 2763 crc[3], crc[4], crc[5]); 2764 goto out; 2765 } 2766 ret = -ENODEV; 2767 out: 2768 drm_connector_list_iter_end(&conn_iter); 2769 drm_modeset_unlock_all(dev); 2770 return ret; 2771 } 2772 2773 static int i915_energy_uJ(struct seq_file *m, void *data) 2774 { 2775 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2776 unsigned long long power; 2777 u32 units; 2778 2779 if (INTEL_GEN(dev_priv) < 6) 2780 return -ENODEV; 2781 2782 intel_runtime_pm_get(dev_priv); 2783 2784 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) { 2785 intel_runtime_pm_put(dev_priv); 2786 return -ENODEV; 2787 } 2788 2789 units = (power & 0x1f00) >> 8; 2790 power = I915_READ(MCH_SECP_NRG_STTS); 2791 power = (1000000 * power) >> units; /* convert to uJ */ 2792 2793 intel_runtime_pm_put(dev_priv); 2794 2795 seq_printf(m, "%llu", power); 2796 2797 return 0; 2798 } 2799 2800 static int i915_runtime_pm_status(struct seq_file *m, void *unused) 2801 { 2802 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2803 struct pci_dev *pdev = dev_priv->drm.pdev; 2804 2805 if (!HAS_RUNTIME_PM(dev_priv)) 2806 seq_puts(m, "Runtime power management not supported\n"); 2807 2808 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake)); 2809 seq_printf(m, "IRQs disabled: %s\n", 2810 yesno(!intel_irqs_enabled(dev_priv))); 2811 #ifdef CONFIG_PM 2812 seq_printf(m, "Usage count: %d\n", 2813 atomic_read(&dev_priv->drm.dev->power.usage_count)); 2814 #else 2815 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 2816 #endif 2817 seq_printf(m, "PCI device power state: %s [%d]\n", 2818 pci_power_name(pdev->current_state), 2819 pdev->current_state); 2820 2821 return 0; 2822 } 2823 2824 static int i915_power_domain_info(struct seq_file *m, void *unused) 2825 { 2826 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2827 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2828 int i; 2829 2830 mutex_lock(&power_domains->lock); 2831 2832 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2833 for (i = 0; i < power_domains->power_well_count; i++) { 2834 struct i915_power_well *power_well; 2835 enum intel_display_power_domain power_domain; 2836 2837 power_well = &power_domains->power_wells[i]; 2838 seq_printf(m, "%-25s %d\n", power_well->name, 2839 power_well->count); 2840 2841 for_each_power_domain(power_domain, power_well->domains) 2842 seq_printf(m, " %-23s %d\n", 2843 intel_display_power_domain_str(power_domain), 2844 power_domains->domain_use_count[power_domain]); 2845 } 2846 2847 mutex_unlock(&power_domains->lock); 2848 2849 return 0; 2850 } 2851 2852 static int i915_dmc_info(struct seq_file *m, void *unused) 2853 { 2854 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2855 struct intel_csr *csr; 2856 2857 if (!HAS_CSR(dev_priv)) { 2858 seq_puts(m, "not supported\n"); 2859 return 0; 2860 } 2861 2862 csr = &dev_priv->csr; 2863 2864 intel_runtime_pm_get(dev_priv); 2865 2866 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); 2867 seq_printf(m, "path: %s\n", csr->fw_path); 2868 2869 if (!csr->dmc_payload) 2870 goto out; 2871 2872 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), 2873 CSR_VERSION_MINOR(csr->version)); 2874 2875 if (IS_KABYLAKE(dev_priv) || 2876 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) { 2877 seq_printf(m, "DC3 -> DC5 count: %d\n", 2878 I915_READ(SKL_CSR_DC3_DC5_COUNT)); 2879 seq_printf(m, "DC5 -> DC6 count: %d\n", 2880 I915_READ(SKL_CSR_DC5_DC6_COUNT)); 2881 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) { 2882 seq_printf(m, "DC3 -> DC5 count: %d\n", 2883 I915_READ(BXT_CSR_DC3_DC5_COUNT)); 2884 } 2885 2886 out: 2887 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); 2888 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); 2889 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); 2890 2891 intel_runtime_pm_put(dev_priv); 2892 2893 return 0; 2894 } 2895 2896 static void intel_seq_print_mode(struct seq_file *m, int tabs, 2897 struct drm_display_mode *mode) 2898 { 2899 int i; 2900 2901 for (i = 0; i < tabs; i++) 2902 seq_putc(m, '\t'); 2903 2904 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2905 mode->base.id, mode->name, 2906 mode->vrefresh, mode->clock, 2907 mode->hdisplay, mode->hsync_start, 2908 mode->hsync_end, mode->htotal, 2909 mode->vdisplay, mode->vsync_start, 2910 mode->vsync_end, mode->vtotal, 2911 mode->type, mode->flags); 2912 } 2913 2914 static void intel_encoder_info(struct seq_file *m, 2915 struct intel_crtc *intel_crtc, 2916 struct intel_encoder *intel_encoder) 2917 { 2918 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2919 struct drm_device *dev = &dev_priv->drm; 2920 struct drm_crtc *crtc = &intel_crtc->base; 2921 struct intel_connector *intel_connector; 2922 struct drm_encoder *encoder; 2923 2924 encoder = &intel_encoder->base; 2925 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2926 encoder->base.id, encoder->name); 2927 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2928 struct drm_connector *connector = &intel_connector->base; 2929 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2930 connector->base.id, 2931 connector->name, 2932 drm_get_connector_status_name(connector->status)); 2933 if (connector->status == connector_status_connected) { 2934 struct drm_display_mode *mode = &crtc->mode; 2935 seq_printf(m, ", mode:\n"); 2936 intel_seq_print_mode(m, 2, mode); 2937 } else { 2938 seq_putc(m, '\n'); 2939 } 2940 } 2941 } 2942 2943 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2944 { 2945 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2946 struct drm_device *dev = &dev_priv->drm; 2947 struct drm_crtc *crtc = &intel_crtc->base; 2948 struct intel_encoder *intel_encoder; 2949 struct drm_plane_state *plane_state = crtc->primary->state; 2950 struct drm_framebuffer *fb = plane_state->fb; 2951 2952 if (fb) 2953 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2954 fb->base.id, plane_state->src_x >> 16, 2955 plane_state->src_y >> 16, fb->width, fb->height); 2956 else 2957 seq_puts(m, "\tprimary plane disabled\n"); 2958 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2959 intel_encoder_info(m, intel_crtc, intel_encoder); 2960 } 2961 2962 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 2963 { 2964 struct drm_display_mode *mode = panel->fixed_mode; 2965 2966 seq_printf(m, "\tfixed mode:\n"); 2967 intel_seq_print_mode(m, 2, mode); 2968 } 2969 2970 static void intel_dp_info(struct seq_file *m, 2971 struct intel_connector *intel_connector) 2972 { 2973 struct intel_encoder *intel_encoder = intel_connector->encoder; 2974 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2975 2976 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2977 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); 2978 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) 2979 intel_panel_info(m, &intel_connector->panel); 2980 2981 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, 2982 &intel_dp->aux); 2983 } 2984 2985 static void intel_dp_mst_info(struct seq_file *m, 2986 struct intel_connector *intel_connector) 2987 { 2988 struct intel_encoder *intel_encoder = intel_connector->encoder; 2989 struct intel_dp_mst_encoder *intel_mst = 2990 enc_to_mst(&intel_encoder->base); 2991 struct intel_digital_port *intel_dig_port = intel_mst->primary; 2992 struct intel_dp *intel_dp = &intel_dig_port->dp; 2993 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, 2994 intel_connector->port); 2995 2996 seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); 2997 } 2998 2999 static void intel_hdmi_info(struct seq_file *m, 3000 struct intel_connector *intel_connector) 3001 { 3002 struct intel_encoder *intel_encoder = intel_connector->encoder; 3003 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 3004 3005 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); 3006 } 3007 3008 static void intel_lvds_info(struct seq_file *m, 3009 struct intel_connector *intel_connector) 3010 { 3011 intel_panel_info(m, &intel_connector->panel); 3012 } 3013 3014 static void intel_connector_info(struct seq_file *m, 3015 struct drm_connector *connector) 3016 { 3017 struct intel_connector *intel_connector = to_intel_connector(connector); 3018 struct intel_encoder *intel_encoder = intel_connector->encoder; 3019 struct drm_display_mode *mode; 3020 3021 seq_printf(m, "connector %d: type %s, status: %s\n", 3022 connector->base.id, connector->name, 3023 drm_get_connector_status_name(connector->status)); 3024 if (connector->status == connector_status_connected) { 3025 seq_printf(m, "\tname: %s\n", connector->display_info.name); 3026 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 3027 connector->display_info.width_mm, 3028 connector->display_info.height_mm); 3029 seq_printf(m, "\tsubpixel order: %s\n", 3030 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 3031 seq_printf(m, "\tCEA rev: %d\n", 3032 connector->display_info.cea_rev); 3033 } 3034 3035 if (!intel_encoder) 3036 return; 3037 3038 switch (connector->connector_type) { 3039 case DRM_MODE_CONNECTOR_DisplayPort: 3040 case DRM_MODE_CONNECTOR_eDP: 3041 if (intel_encoder->type == INTEL_OUTPUT_DP_MST) 3042 intel_dp_mst_info(m, intel_connector); 3043 else 3044 intel_dp_info(m, intel_connector); 3045 break; 3046 case DRM_MODE_CONNECTOR_LVDS: 3047 if (intel_encoder->type == INTEL_OUTPUT_LVDS) 3048 intel_lvds_info(m, intel_connector); 3049 break; 3050 case DRM_MODE_CONNECTOR_HDMIA: 3051 if (intel_encoder->type == INTEL_OUTPUT_HDMI || 3052 intel_encoder->type == INTEL_OUTPUT_UNKNOWN) 3053 intel_hdmi_info(m, intel_connector); 3054 break; 3055 default: 3056 break; 3057 } 3058 3059 seq_printf(m, "\tmodes:\n"); 3060 list_for_each_entry(mode, &connector->modes, head) 3061 intel_seq_print_mode(m, 2, mode); 3062 } 3063 3064 static const char *plane_type(enum drm_plane_type type) 3065 { 3066 switch (type) { 3067 case DRM_PLANE_TYPE_OVERLAY: 3068 return "OVL"; 3069 case DRM_PLANE_TYPE_PRIMARY: 3070 return "PRI"; 3071 case DRM_PLANE_TYPE_CURSOR: 3072 return "CUR"; 3073 /* 3074 * Deliberately omitting default: to generate compiler warnings 3075 * when a new drm_plane_type gets added. 3076 */ 3077 } 3078 3079 return "unknown"; 3080 } 3081 3082 static const char *plane_rotation(unsigned int rotation) 3083 { 3084 static char buf[48]; 3085 /* 3086 * According to doc only one DRM_MODE_ROTATE_ is allowed but this 3087 * will print them all to visualize if the values are misused 3088 */ 3089 snprintf(buf, sizeof(buf), 3090 "%s%s%s%s%s%s(0x%08x)", 3091 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "", 3092 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "", 3093 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "", 3094 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "", 3095 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "", 3096 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "", 3097 rotation); 3098 3099 return buf; 3100 } 3101 3102 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3103 { 3104 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3105 struct drm_device *dev = &dev_priv->drm; 3106 struct intel_plane *intel_plane; 3107 3108 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3109 struct drm_plane_state *state; 3110 struct drm_plane *plane = &intel_plane->base; 3111 struct drm_format_name_buf format_name; 3112 3113 if (!plane->state) { 3114 seq_puts(m, "plane->state is NULL!\n"); 3115 continue; 3116 } 3117 3118 state = plane->state; 3119 3120 if (state->fb) { 3121 drm_get_format_name(state->fb->format->format, 3122 &format_name); 3123 } else { 3124 sprintf(format_name.str, "N/A"); 3125 } 3126 3127 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n", 3128 plane->base.id, 3129 plane_type(intel_plane->base.type), 3130 state->crtc_x, state->crtc_y, 3131 state->crtc_w, state->crtc_h, 3132 (state->src_x >> 16), 3133 ((state->src_x & 0xffff) * 15625) >> 10, 3134 (state->src_y >> 16), 3135 ((state->src_y & 0xffff) * 15625) >> 10, 3136 (state->src_w >> 16), 3137 ((state->src_w & 0xffff) * 15625) >> 10, 3138 (state->src_h >> 16), 3139 ((state->src_h & 0xffff) * 15625) >> 10, 3140 format_name.str, 3141 plane_rotation(state->rotation)); 3142 } 3143 } 3144 3145 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3146 { 3147 struct intel_crtc_state *pipe_config; 3148 int num_scalers = intel_crtc->num_scalers; 3149 int i; 3150 3151 pipe_config = to_intel_crtc_state(intel_crtc->base.state); 3152 3153 /* Not all platformas have a scaler */ 3154 if (num_scalers) { 3155 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", 3156 num_scalers, 3157 pipe_config->scaler_state.scaler_users, 3158 pipe_config->scaler_state.scaler_id); 3159 3160 for (i = 0; i < num_scalers; i++) { 3161 struct intel_scaler *sc = 3162 &pipe_config->scaler_state.scalers[i]; 3163 3164 seq_printf(m, ", scalers[%d]: use=%s, mode=%x", 3165 i, yesno(sc->in_use), sc->mode); 3166 } 3167 seq_puts(m, "\n"); 3168 } else { 3169 seq_puts(m, "\tNo scalers available on this platform\n"); 3170 } 3171 } 3172 3173 static int i915_display_info(struct seq_file *m, void *unused) 3174 { 3175 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3176 struct drm_device *dev = &dev_priv->drm; 3177 struct intel_crtc *crtc; 3178 struct drm_connector *connector; 3179 struct drm_connector_list_iter conn_iter; 3180 3181 intel_runtime_pm_get(dev_priv); 3182 seq_printf(m, "CRTC info\n"); 3183 seq_printf(m, "---------\n"); 3184 for_each_intel_crtc(dev, crtc) { 3185 struct intel_crtc_state *pipe_config; 3186 3187 drm_modeset_lock(&crtc->base.mutex, NULL); 3188 pipe_config = to_intel_crtc_state(crtc->base.state); 3189 3190 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n", 3191 crtc->base.base.id, pipe_name(crtc->pipe), 3192 yesno(pipe_config->base.active), 3193 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 3194 yesno(pipe_config->dither), pipe_config->pipe_bpp); 3195 3196 if (pipe_config->base.active) { 3197 struct intel_plane *cursor = 3198 to_intel_plane(crtc->base.cursor); 3199 3200 intel_crtc_info(m, crtc); 3201 3202 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n", 3203 yesno(cursor->base.state->visible), 3204 cursor->base.state->crtc_x, 3205 cursor->base.state->crtc_y, 3206 cursor->base.state->crtc_w, 3207 cursor->base.state->crtc_h, 3208 cursor->cursor.base); 3209 intel_scaler_info(m, crtc); 3210 intel_plane_info(m, crtc); 3211 } 3212 3213 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 3214 yesno(!crtc->cpu_fifo_underrun_disabled), 3215 yesno(!crtc->pch_fifo_underrun_disabled)); 3216 drm_modeset_unlock(&crtc->base.mutex); 3217 } 3218 3219 seq_printf(m, "\n"); 3220 seq_printf(m, "Connector info\n"); 3221 seq_printf(m, "--------------\n"); 3222 mutex_lock(&dev->mode_config.mutex); 3223 drm_connector_list_iter_begin(dev, &conn_iter); 3224 drm_for_each_connector_iter(connector, &conn_iter) 3225 intel_connector_info(m, connector); 3226 drm_connector_list_iter_end(&conn_iter); 3227 mutex_unlock(&dev->mode_config.mutex); 3228 3229 intel_runtime_pm_put(dev_priv); 3230 3231 return 0; 3232 } 3233 3234 static int i915_engine_info(struct seq_file *m, void *unused) 3235 { 3236 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3237 struct intel_engine_cs *engine; 3238 enum intel_engine_id id; 3239 struct drm_printer p; 3240 3241 intel_runtime_pm_get(dev_priv); 3242 3243 seq_printf(m, "GT awake? %s\n", 3244 yesno(dev_priv->gt.awake)); 3245 seq_printf(m, "Global active requests: %d\n", 3246 dev_priv->gt.active_requests); 3247 3248 p = drm_seq_file_printer(m); 3249 for_each_engine(engine, dev_priv, id) 3250 intel_engine_dump(engine, &p); 3251 3252 intel_runtime_pm_put(dev_priv); 3253 3254 return 0; 3255 } 3256 3257 static int i915_shrinker_info(struct seq_file *m, void *unused) 3258 { 3259 struct drm_i915_private *i915 = node_to_i915(m->private); 3260 3261 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks); 3262 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch); 3263 3264 return 0; 3265 } 3266 3267 static int i915_semaphore_status(struct seq_file *m, void *unused) 3268 { 3269 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3270 struct drm_device *dev = &dev_priv->drm; 3271 struct intel_engine_cs *engine; 3272 int num_rings = INTEL_INFO(dev_priv)->num_rings; 3273 enum intel_engine_id id; 3274 int j, ret; 3275 3276 if (!i915_modparams.semaphores) { 3277 seq_puts(m, "Semaphores are disabled\n"); 3278 return 0; 3279 } 3280 3281 ret = mutex_lock_interruptible(&dev->struct_mutex); 3282 if (ret) 3283 return ret; 3284 intel_runtime_pm_get(dev_priv); 3285 3286 if (IS_BROADWELL(dev_priv)) { 3287 struct page *page; 3288 uint64_t *seqno; 3289 3290 page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0); 3291 3292 seqno = (uint64_t *)kmap_atomic(page); 3293 for_each_engine(engine, dev_priv, id) { 3294 uint64_t offset; 3295 3296 seq_printf(m, "%s\n", engine->name); 3297 3298 seq_puts(m, " Last signal:"); 3299 for (j = 0; j < num_rings; j++) { 3300 offset = id * I915_NUM_ENGINES + j; 3301 seq_printf(m, "0x%08llx (0x%02llx) ", 3302 seqno[offset], offset * 8); 3303 } 3304 seq_putc(m, '\n'); 3305 3306 seq_puts(m, " Last wait: "); 3307 for (j = 0; j < num_rings; j++) { 3308 offset = id + (j * I915_NUM_ENGINES); 3309 seq_printf(m, "0x%08llx (0x%02llx) ", 3310 seqno[offset], offset * 8); 3311 } 3312 seq_putc(m, '\n'); 3313 3314 } 3315 kunmap_atomic(seqno); 3316 } else { 3317 seq_puts(m, " Last signal:"); 3318 for_each_engine(engine, dev_priv, id) 3319 for (j = 0; j < num_rings; j++) 3320 seq_printf(m, "0x%08x\n", 3321 I915_READ(engine->semaphore.mbox.signal[j])); 3322 seq_putc(m, '\n'); 3323 } 3324 3325 intel_runtime_pm_put(dev_priv); 3326 mutex_unlock(&dev->struct_mutex); 3327 return 0; 3328 } 3329 3330 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 3331 { 3332 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3333 struct drm_device *dev = &dev_priv->drm; 3334 int i; 3335 3336 drm_modeset_lock_all(dev); 3337 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3338 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 3339 3340 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); 3341 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", 3342 pll->state.crtc_mask, pll->active_mask, yesno(pll->on)); 3343 seq_printf(m, " tracked hardware state:\n"); 3344 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); 3345 seq_printf(m, " dpll_md: 0x%08x\n", 3346 pll->state.hw_state.dpll_md); 3347 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0); 3348 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1); 3349 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); 3350 } 3351 drm_modeset_unlock_all(dev); 3352 3353 return 0; 3354 } 3355 3356 static int i915_wa_registers(struct seq_file *m, void *unused) 3357 { 3358 int i; 3359 int ret; 3360 struct intel_engine_cs *engine; 3361 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3362 struct drm_device *dev = &dev_priv->drm; 3363 struct i915_workarounds *workarounds = &dev_priv->workarounds; 3364 enum intel_engine_id id; 3365 3366 ret = mutex_lock_interruptible(&dev->struct_mutex); 3367 if (ret) 3368 return ret; 3369 3370 intel_runtime_pm_get(dev_priv); 3371 3372 seq_printf(m, "Workarounds applied: %d\n", workarounds->count); 3373 for_each_engine(engine, dev_priv, id) 3374 seq_printf(m, "HW whitelist count for %s: %d\n", 3375 engine->name, workarounds->hw_whitelist_count[id]); 3376 for (i = 0; i < workarounds->count; ++i) { 3377 i915_reg_t addr; 3378 u32 mask, value, read; 3379 bool ok; 3380 3381 addr = workarounds->reg[i].addr; 3382 mask = workarounds->reg[i].mask; 3383 value = workarounds->reg[i].value; 3384 read = I915_READ(addr); 3385 ok = (value & mask) == (read & mask); 3386 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", 3387 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL"); 3388 } 3389 3390 intel_runtime_pm_put(dev_priv); 3391 mutex_unlock(&dev->struct_mutex); 3392 3393 return 0; 3394 } 3395 3396 static int i915_ipc_status_show(struct seq_file *m, void *data) 3397 { 3398 struct drm_i915_private *dev_priv = m->private; 3399 3400 seq_printf(m, "Isochronous Priority Control: %s\n", 3401 yesno(dev_priv->ipc_enabled)); 3402 return 0; 3403 } 3404 3405 static int i915_ipc_status_open(struct inode *inode, struct file *file) 3406 { 3407 struct drm_i915_private *dev_priv = inode->i_private; 3408 3409 if (!HAS_IPC(dev_priv)) 3410 return -ENODEV; 3411 3412 return single_open(file, i915_ipc_status_show, dev_priv); 3413 } 3414 3415 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, 3416 size_t len, loff_t *offp) 3417 { 3418 struct seq_file *m = file->private_data; 3419 struct drm_i915_private *dev_priv = m->private; 3420 int ret; 3421 bool enable; 3422 3423 ret = kstrtobool_from_user(ubuf, len, &enable); 3424 if (ret < 0) 3425 return ret; 3426 3427 intel_runtime_pm_get(dev_priv); 3428 if (!dev_priv->ipc_enabled && enable) 3429 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n"); 3430 dev_priv->wm.distrust_bios_wm = true; 3431 dev_priv->ipc_enabled = enable; 3432 intel_enable_ipc(dev_priv); 3433 intel_runtime_pm_put(dev_priv); 3434 3435 return len; 3436 } 3437 3438 static const struct file_operations i915_ipc_status_fops = { 3439 .owner = THIS_MODULE, 3440 .open = i915_ipc_status_open, 3441 .read = seq_read, 3442 .llseek = seq_lseek, 3443 .release = single_release, 3444 .write = i915_ipc_status_write 3445 }; 3446 3447 static int i915_ddb_info(struct seq_file *m, void *unused) 3448 { 3449 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3450 struct drm_device *dev = &dev_priv->drm; 3451 struct skl_ddb_allocation *ddb; 3452 struct skl_ddb_entry *entry; 3453 enum pipe pipe; 3454 int plane; 3455 3456 if (INTEL_GEN(dev_priv) < 9) 3457 return 0; 3458 3459 drm_modeset_lock_all(dev); 3460 3461 ddb = &dev_priv->wm.skl_hw.ddb; 3462 3463 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 3464 3465 for_each_pipe(dev_priv, pipe) { 3466 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 3467 3468 for_each_universal_plane(dev_priv, pipe, plane) { 3469 entry = &ddb->plane[pipe][plane]; 3470 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, 3471 entry->start, entry->end, 3472 skl_ddb_entry_size(entry)); 3473 } 3474 3475 entry = &ddb->plane[pipe][PLANE_CURSOR]; 3476 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 3477 entry->end, skl_ddb_entry_size(entry)); 3478 } 3479 3480 drm_modeset_unlock_all(dev); 3481 3482 return 0; 3483 } 3484 3485 static void drrs_status_per_crtc(struct seq_file *m, 3486 struct drm_device *dev, 3487 struct intel_crtc *intel_crtc) 3488 { 3489 struct drm_i915_private *dev_priv = to_i915(dev); 3490 struct i915_drrs *drrs = &dev_priv->drrs; 3491 int vrefresh = 0; 3492 struct drm_connector *connector; 3493 struct drm_connector_list_iter conn_iter; 3494 3495 drm_connector_list_iter_begin(dev, &conn_iter); 3496 drm_for_each_connector_iter(connector, &conn_iter) { 3497 if (connector->state->crtc != &intel_crtc->base) 3498 continue; 3499 3500 seq_printf(m, "%s:\n", connector->name); 3501 } 3502 drm_connector_list_iter_end(&conn_iter); 3503 3504 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) 3505 seq_puts(m, "\tVBT: DRRS_type: Static"); 3506 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT) 3507 seq_puts(m, "\tVBT: DRRS_type: Seamless"); 3508 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED) 3509 seq_puts(m, "\tVBT: DRRS_type: None"); 3510 else 3511 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value"); 3512 3513 seq_puts(m, "\n\n"); 3514 3515 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { 3516 struct intel_panel *panel; 3517 3518 mutex_lock(&drrs->mutex); 3519 /* DRRS Supported */ 3520 seq_puts(m, "\tDRRS Supported: Yes\n"); 3521 3522 /* disable_drrs() will make drrs->dp NULL */ 3523 if (!drrs->dp) { 3524 seq_puts(m, "Idleness DRRS: Disabled"); 3525 mutex_unlock(&drrs->mutex); 3526 return; 3527 } 3528 3529 panel = &drrs->dp->attached_connector->panel; 3530 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", 3531 drrs->busy_frontbuffer_bits); 3532 3533 seq_puts(m, "\n\t\t"); 3534 if (drrs->refresh_rate_type == DRRS_HIGH_RR) { 3535 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); 3536 vrefresh = panel->fixed_mode->vrefresh; 3537 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { 3538 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); 3539 vrefresh = panel->downclock_mode->vrefresh; 3540 } else { 3541 seq_printf(m, "DRRS_State: Unknown(%d)\n", 3542 drrs->refresh_rate_type); 3543 mutex_unlock(&drrs->mutex); 3544 return; 3545 } 3546 seq_printf(m, "\t\tVrefresh: %d", vrefresh); 3547 3548 seq_puts(m, "\n\t\t"); 3549 mutex_unlock(&drrs->mutex); 3550 } else { 3551 /* DRRS not supported. Print the VBT parameter*/ 3552 seq_puts(m, "\tDRRS Supported : No"); 3553 } 3554 seq_puts(m, "\n"); 3555 } 3556 3557 static int i915_drrs_status(struct seq_file *m, void *unused) 3558 { 3559 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3560 struct drm_device *dev = &dev_priv->drm; 3561 struct intel_crtc *intel_crtc; 3562 int active_crtc_cnt = 0; 3563 3564 drm_modeset_lock_all(dev); 3565 for_each_intel_crtc(dev, intel_crtc) { 3566 if (intel_crtc->base.state->active) { 3567 active_crtc_cnt++; 3568 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 3569 3570 drrs_status_per_crtc(m, dev, intel_crtc); 3571 } 3572 } 3573 drm_modeset_unlock_all(dev); 3574 3575 if (!active_crtc_cnt) 3576 seq_puts(m, "No active crtc found\n"); 3577 3578 return 0; 3579 } 3580 3581 static int i915_dp_mst_info(struct seq_file *m, void *unused) 3582 { 3583 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3584 struct drm_device *dev = &dev_priv->drm; 3585 struct intel_encoder *intel_encoder; 3586 struct intel_digital_port *intel_dig_port; 3587 struct drm_connector *connector; 3588 struct drm_connector_list_iter conn_iter; 3589 3590 drm_connector_list_iter_begin(dev, &conn_iter); 3591 drm_for_each_connector_iter(connector, &conn_iter) { 3592 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 3593 continue; 3594 3595 intel_encoder = intel_attached_encoder(connector); 3596 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) 3597 continue; 3598 3599 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 3600 if (!intel_dig_port->dp.can_mst) 3601 continue; 3602 3603 seq_printf(m, "MST Source Port %c\n", 3604 port_name(intel_dig_port->port)); 3605 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 3606 } 3607 drm_connector_list_iter_end(&conn_iter); 3608 3609 return 0; 3610 } 3611 3612 static ssize_t i915_displayport_test_active_write(struct file *file, 3613 const char __user *ubuf, 3614 size_t len, loff_t *offp) 3615 { 3616 char *input_buffer; 3617 int status = 0; 3618 struct drm_device *dev; 3619 struct drm_connector *connector; 3620 struct drm_connector_list_iter conn_iter; 3621 struct intel_dp *intel_dp; 3622 int val = 0; 3623 3624 dev = ((struct seq_file *)file->private_data)->private; 3625 3626 if (len == 0) 3627 return 0; 3628 3629 input_buffer = memdup_user_nul(ubuf, len); 3630 if (IS_ERR(input_buffer)) 3631 return PTR_ERR(input_buffer); 3632 3633 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); 3634 3635 drm_connector_list_iter_begin(dev, &conn_iter); 3636 drm_for_each_connector_iter(connector, &conn_iter) { 3637 struct intel_encoder *encoder; 3638 3639 if (connector->connector_type != 3640 DRM_MODE_CONNECTOR_DisplayPort) 3641 continue; 3642 3643 encoder = to_intel_encoder(connector->encoder); 3644 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3645 continue; 3646 3647 if (encoder && connector->status == connector_status_connected) { 3648 intel_dp = enc_to_intel_dp(&encoder->base); 3649 status = kstrtoint(input_buffer, 10, &val); 3650 if (status < 0) 3651 break; 3652 DRM_DEBUG_DRIVER("Got %d for test active\n", val); 3653 /* To prevent erroneous activation of the compliance 3654 * testing code, only accept an actual value of 1 here 3655 */ 3656 if (val == 1) 3657 intel_dp->compliance.test_active = 1; 3658 else 3659 intel_dp->compliance.test_active = 0; 3660 } 3661 } 3662 drm_connector_list_iter_end(&conn_iter); 3663 kfree(input_buffer); 3664 if (status < 0) 3665 return status; 3666 3667 *offp += len; 3668 return len; 3669 } 3670 3671 static int i915_displayport_test_active_show(struct seq_file *m, void *data) 3672 { 3673 struct drm_device *dev = m->private; 3674 struct drm_connector *connector; 3675 struct drm_connector_list_iter conn_iter; 3676 struct intel_dp *intel_dp; 3677 3678 drm_connector_list_iter_begin(dev, &conn_iter); 3679 drm_for_each_connector_iter(connector, &conn_iter) { 3680 struct intel_encoder *encoder; 3681 3682 if (connector->connector_type != 3683 DRM_MODE_CONNECTOR_DisplayPort) 3684 continue; 3685 3686 encoder = to_intel_encoder(connector->encoder); 3687 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3688 continue; 3689 3690 if (encoder && connector->status == connector_status_connected) { 3691 intel_dp = enc_to_intel_dp(&encoder->base); 3692 if (intel_dp->compliance.test_active) 3693 seq_puts(m, "1"); 3694 else 3695 seq_puts(m, "0"); 3696 } else 3697 seq_puts(m, "0"); 3698 } 3699 drm_connector_list_iter_end(&conn_iter); 3700 3701 return 0; 3702 } 3703 3704 static int i915_displayport_test_active_open(struct inode *inode, 3705 struct file *file) 3706 { 3707 struct drm_i915_private *dev_priv = inode->i_private; 3708 3709 return single_open(file, i915_displayport_test_active_show, 3710 &dev_priv->drm); 3711 } 3712 3713 static const struct file_operations i915_displayport_test_active_fops = { 3714 .owner = THIS_MODULE, 3715 .open = i915_displayport_test_active_open, 3716 .read = seq_read, 3717 .llseek = seq_lseek, 3718 .release = single_release, 3719 .write = i915_displayport_test_active_write 3720 }; 3721 3722 static int i915_displayport_test_data_show(struct seq_file *m, void *data) 3723 { 3724 struct drm_device *dev = m->private; 3725 struct drm_connector *connector; 3726 struct drm_connector_list_iter conn_iter; 3727 struct intel_dp *intel_dp; 3728 3729 drm_connector_list_iter_begin(dev, &conn_iter); 3730 drm_for_each_connector_iter(connector, &conn_iter) { 3731 struct intel_encoder *encoder; 3732 3733 if (connector->connector_type != 3734 DRM_MODE_CONNECTOR_DisplayPort) 3735 continue; 3736 3737 encoder = to_intel_encoder(connector->encoder); 3738 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3739 continue; 3740 3741 if (encoder && connector->status == connector_status_connected) { 3742 intel_dp = enc_to_intel_dp(&encoder->base); 3743 if (intel_dp->compliance.test_type == 3744 DP_TEST_LINK_EDID_READ) 3745 seq_printf(m, "%lx", 3746 intel_dp->compliance.test_data.edid); 3747 else if (intel_dp->compliance.test_type == 3748 DP_TEST_LINK_VIDEO_PATTERN) { 3749 seq_printf(m, "hdisplay: %d\n", 3750 intel_dp->compliance.test_data.hdisplay); 3751 seq_printf(m, "vdisplay: %d\n", 3752 intel_dp->compliance.test_data.vdisplay); 3753 seq_printf(m, "bpc: %u\n", 3754 intel_dp->compliance.test_data.bpc); 3755 } 3756 } else 3757 seq_puts(m, "0"); 3758 } 3759 drm_connector_list_iter_end(&conn_iter); 3760 3761 return 0; 3762 } 3763 static int i915_displayport_test_data_open(struct inode *inode, 3764 struct file *file) 3765 { 3766 struct drm_i915_private *dev_priv = inode->i_private; 3767 3768 return single_open(file, i915_displayport_test_data_show, 3769 &dev_priv->drm); 3770 } 3771 3772 static const struct file_operations i915_displayport_test_data_fops = { 3773 .owner = THIS_MODULE, 3774 .open = i915_displayport_test_data_open, 3775 .read = seq_read, 3776 .llseek = seq_lseek, 3777 .release = single_release 3778 }; 3779 3780 static int i915_displayport_test_type_show(struct seq_file *m, void *data) 3781 { 3782 struct drm_device *dev = m->private; 3783 struct drm_connector *connector; 3784 struct drm_connector_list_iter conn_iter; 3785 struct intel_dp *intel_dp; 3786 3787 drm_connector_list_iter_begin(dev, &conn_iter); 3788 drm_for_each_connector_iter(connector, &conn_iter) { 3789 struct intel_encoder *encoder; 3790 3791 if (connector->connector_type != 3792 DRM_MODE_CONNECTOR_DisplayPort) 3793 continue; 3794 3795 encoder = to_intel_encoder(connector->encoder); 3796 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3797 continue; 3798 3799 if (encoder && connector->status == connector_status_connected) { 3800 intel_dp = enc_to_intel_dp(&encoder->base); 3801 seq_printf(m, "%02lx", intel_dp->compliance.test_type); 3802 } else 3803 seq_puts(m, "0"); 3804 } 3805 drm_connector_list_iter_end(&conn_iter); 3806 3807 return 0; 3808 } 3809 3810 static int i915_displayport_test_type_open(struct inode *inode, 3811 struct file *file) 3812 { 3813 struct drm_i915_private *dev_priv = inode->i_private; 3814 3815 return single_open(file, i915_displayport_test_type_show, 3816 &dev_priv->drm); 3817 } 3818 3819 static const struct file_operations i915_displayport_test_type_fops = { 3820 .owner = THIS_MODULE, 3821 .open = i915_displayport_test_type_open, 3822 .read = seq_read, 3823 .llseek = seq_lseek, 3824 .release = single_release 3825 }; 3826 3827 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) 3828 { 3829 struct drm_i915_private *dev_priv = m->private; 3830 struct drm_device *dev = &dev_priv->drm; 3831 int level; 3832 int num_levels; 3833 3834 if (IS_CHERRYVIEW(dev_priv)) 3835 num_levels = 3; 3836 else if (IS_VALLEYVIEW(dev_priv)) 3837 num_levels = 1; 3838 else if (IS_G4X(dev_priv)) 3839 num_levels = 3; 3840 else 3841 num_levels = ilk_wm_max_level(dev_priv) + 1; 3842 3843 drm_modeset_lock_all(dev); 3844 3845 for (level = 0; level < num_levels; level++) { 3846 unsigned int latency = wm[level]; 3847 3848 /* 3849 * - WM1+ latency values in 0.5us units 3850 * - latencies are in us on gen9/vlv/chv 3851 */ 3852 if (INTEL_GEN(dev_priv) >= 9 || 3853 IS_VALLEYVIEW(dev_priv) || 3854 IS_CHERRYVIEW(dev_priv) || 3855 IS_G4X(dev_priv)) 3856 latency *= 10; 3857 else if (level > 0) 3858 latency *= 5; 3859 3860 seq_printf(m, "WM%d %u (%u.%u usec)\n", 3861 level, wm[level], latency / 10, latency % 10); 3862 } 3863 3864 drm_modeset_unlock_all(dev); 3865 } 3866 3867 static int pri_wm_latency_show(struct seq_file *m, void *data) 3868 { 3869 struct drm_i915_private *dev_priv = m->private; 3870 const uint16_t *latencies; 3871 3872 if (INTEL_GEN(dev_priv) >= 9) 3873 latencies = dev_priv->wm.skl_latency; 3874 else 3875 latencies = dev_priv->wm.pri_latency; 3876 3877 wm_latency_show(m, latencies); 3878 3879 return 0; 3880 } 3881 3882 static int spr_wm_latency_show(struct seq_file *m, void *data) 3883 { 3884 struct drm_i915_private *dev_priv = m->private; 3885 const uint16_t *latencies; 3886 3887 if (INTEL_GEN(dev_priv) >= 9) 3888 latencies = dev_priv->wm.skl_latency; 3889 else 3890 latencies = dev_priv->wm.spr_latency; 3891 3892 wm_latency_show(m, latencies); 3893 3894 return 0; 3895 } 3896 3897 static int cur_wm_latency_show(struct seq_file *m, void *data) 3898 { 3899 struct drm_i915_private *dev_priv = m->private; 3900 const uint16_t *latencies; 3901 3902 if (INTEL_GEN(dev_priv) >= 9) 3903 latencies = dev_priv->wm.skl_latency; 3904 else 3905 latencies = dev_priv->wm.cur_latency; 3906 3907 wm_latency_show(m, latencies); 3908 3909 return 0; 3910 } 3911 3912 static int pri_wm_latency_open(struct inode *inode, struct file *file) 3913 { 3914 struct drm_i915_private *dev_priv = inode->i_private; 3915 3916 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 3917 return -ENODEV; 3918 3919 return single_open(file, pri_wm_latency_show, dev_priv); 3920 } 3921 3922 static int spr_wm_latency_open(struct inode *inode, struct file *file) 3923 { 3924 struct drm_i915_private *dev_priv = inode->i_private; 3925 3926 if (HAS_GMCH_DISPLAY(dev_priv)) 3927 return -ENODEV; 3928 3929 return single_open(file, spr_wm_latency_show, dev_priv); 3930 } 3931 3932 static int cur_wm_latency_open(struct inode *inode, struct file *file) 3933 { 3934 struct drm_i915_private *dev_priv = inode->i_private; 3935 3936 if (HAS_GMCH_DISPLAY(dev_priv)) 3937 return -ENODEV; 3938 3939 return single_open(file, cur_wm_latency_show, dev_priv); 3940 } 3941 3942 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 3943 size_t len, loff_t *offp, uint16_t wm[8]) 3944 { 3945 struct seq_file *m = file->private_data; 3946 struct drm_i915_private *dev_priv = m->private; 3947 struct drm_device *dev = &dev_priv->drm; 3948 uint16_t new[8] = { 0 }; 3949 int num_levels; 3950 int level; 3951 int ret; 3952 char tmp[32]; 3953 3954 if (IS_CHERRYVIEW(dev_priv)) 3955 num_levels = 3; 3956 else if (IS_VALLEYVIEW(dev_priv)) 3957 num_levels = 1; 3958 else if (IS_G4X(dev_priv)) 3959 num_levels = 3; 3960 else 3961 num_levels = ilk_wm_max_level(dev_priv) + 1; 3962 3963 if (len >= sizeof(tmp)) 3964 return -EINVAL; 3965 3966 if (copy_from_user(tmp, ubuf, len)) 3967 return -EFAULT; 3968 3969 tmp[len] = '\0'; 3970 3971 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 3972 &new[0], &new[1], &new[2], &new[3], 3973 &new[4], &new[5], &new[6], &new[7]); 3974 if (ret != num_levels) 3975 return -EINVAL; 3976 3977 drm_modeset_lock_all(dev); 3978 3979 for (level = 0; level < num_levels; level++) 3980 wm[level] = new[level]; 3981 3982 drm_modeset_unlock_all(dev); 3983 3984 return len; 3985 } 3986 3987 3988 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 3989 size_t len, loff_t *offp) 3990 { 3991 struct seq_file *m = file->private_data; 3992 struct drm_i915_private *dev_priv = m->private; 3993 uint16_t *latencies; 3994 3995 if (INTEL_GEN(dev_priv) >= 9) 3996 latencies = dev_priv->wm.skl_latency; 3997 else 3998 latencies = dev_priv->wm.pri_latency; 3999 4000 return wm_latency_write(file, ubuf, len, offp, latencies); 4001 } 4002 4003 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 4004 size_t len, loff_t *offp) 4005 { 4006 struct seq_file *m = file->private_data; 4007 struct drm_i915_private *dev_priv = m->private; 4008 uint16_t *latencies; 4009 4010 if (INTEL_GEN(dev_priv) >= 9) 4011 latencies = dev_priv->wm.skl_latency; 4012 else 4013 latencies = dev_priv->wm.spr_latency; 4014 4015 return wm_latency_write(file, ubuf, len, offp, latencies); 4016 } 4017 4018 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 4019 size_t len, loff_t *offp) 4020 { 4021 struct seq_file *m = file->private_data; 4022 struct drm_i915_private *dev_priv = m->private; 4023 uint16_t *latencies; 4024 4025 if (INTEL_GEN(dev_priv) >= 9) 4026 latencies = dev_priv->wm.skl_latency; 4027 else 4028 latencies = dev_priv->wm.cur_latency; 4029 4030 return wm_latency_write(file, ubuf, len, offp, latencies); 4031 } 4032 4033 static const struct file_operations i915_pri_wm_latency_fops = { 4034 .owner = THIS_MODULE, 4035 .open = pri_wm_latency_open, 4036 .read = seq_read, 4037 .llseek = seq_lseek, 4038 .release = single_release, 4039 .write = pri_wm_latency_write 4040 }; 4041 4042 static const struct file_operations i915_spr_wm_latency_fops = { 4043 .owner = THIS_MODULE, 4044 .open = spr_wm_latency_open, 4045 .read = seq_read, 4046 .llseek = seq_lseek, 4047 .release = single_release, 4048 .write = spr_wm_latency_write 4049 }; 4050 4051 static const struct file_operations i915_cur_wm_latency_fops = { 4052 .owner = THIS_MODULE, 4053 .open = cur_wm_latency_open, 4054 .read = seq_read, 4055 .llseek = seq_lseek, 4056 .release = single_release, 4057 .write = cur_wm_latency_write 4058 }; 4059 4060 static int 4061 i915_wedged_get(void *data, u64 *val) 4062 { 4063 struct drm_i915_private *dev_priv = data; 4064 4065 *val = i915_terminally_wedged(&dev_priv->gpu_error); 4066 4067 return 0; 4068 } 4069 4070 static int 4071 i915_wedged_set(void *data, u64 val) 4072 { 4073 struct drm_i915_private *i915 = data; 4074 struct intel_engine_cs *engine; 4075 unsigned int tmp; 4076 4077 /* 4078 * There is no safeguard against this debugfs entry colliding 4079 * with the hangcheck calling same i915_handle_error() in 4080 * parallel, causing an explosion. For now we assume that the 4081 * test harness is responsible enough not to inject gpu hangs 4082 * while it is writing to 'i915_wedged' 4083 */ 4084 4085 if (i915_reset_backoff(&i915->gpu_error)) 4086 return -EAGAIN; 4087 4088 for_each_engine_masked(engine, i915, val, tmp) { 4089 engine->hangcheck.seqno = intel_engine_get_seqno(engine); 4090 engine->hangcheck.stalled = true; 4091 } 4092 4093 i915_handle_error(i915, val, "Manually setting wedged to %llu", val); 4094 4095 wait_on_bit(&i915->gpu_error.flags, 4096 I915_RESET_HANDOFF, 4097 TASK_UNINTERRUPTIBLE); 4098 4099 return 0; 4100 } 4101 4102 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 4103 i915_wedged_get, i915_wedged_set, 4104 "%llu\n"); 4105 4106 static int 4107 fault_irq_set(struct drm_i915_private *i915, 4108 unsigned long *irq, 4109 unsigned long val) 4110 { 4111 int err; 4112 4113 err = mutex_lock_interruptible(&i915->drm.struct_mutex); 4114 if (err) 4115 return err; 4116 4117 err = i915_gem_wait_for_idle(i915, 4118 I915_WAIT_LOCKED | 4119 I915_WAIT_INTERRUPTIBLE); 4120 if (err) 4121 goto err_unlock; 4122 4123 *irq = val; 4124 mutex_unlock(&i915->drm.struct_mutex); 4125 4126 /* Flush idle worker to disarm irq */ 4127 drain_delayed_work(&i915->gt.idle_work); 4128 4129 return 0; 4130 4131 err_unlock: 4132 mutex_unlock(&i915->drm.struct_mutex); 4133 return err; 4134 } 4135 4136 static int 4137 i915_ring_missed_irq_get(void *data, u64 *val) 4138 { 4139 struct drm_i915_private *dev_priv = data; 4140 4141 *val = dev_priv->gpu_error.missed_irq_rings; 4142 return 0; 4143 } 4144 4145 static int 4146 i915_ring_missed_irq_set(void *data, u64 val) 4147 { 4148 struct drm_i915_private *i915 = data; 4149 4150 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val); 4151 } 4152 4153 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 4154 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 4155 "0x%08llx\n"); 4156 4157 static int 4158 i915_ring_test_irq_get(void *data, u64 *val) 4159 { 4160 struct drm_i915_private *dev_priv = data; 4161 4162 *val = dev_priv->gpu_error.test_irq_rings; 4163 4164 return 0; 4165 } 4166 4167 static int 4168 i915_ring_test_irq_set(void *data, u64 val) 4169 { 4170 struct drm_i915_private *i915 = data; 4171 4172 val &= INTEL_INFO(i915)->ring_mask; 4173 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 4174 4175 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val); 4176 } 4177 4178 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 4179 i915_ring_test_irq_get, i915_ring_test_irq_set, 4180 "0x%08llx\n"); 4181 4182 #define DROP_UNBOUND BIT(0) 4183 #define DROP_BOUND BIT(1) 4184 #define DROP_RETIRE BIT(2) 4185 #define DROP_ACTIVE BIT(3) 4186 #define DROP_FREED BIT(4) 4187 #define DROP_SHRINK_ALL BIT(5) 4188 #define DROP_IDLE BIT(6) 4189 #define DROP_ALL (DROP_UNBOUND | \ 4190 DROP_BOUND | \ 4191 DROP_RETIRE | \ 4192 DROP_ACTIVE | \ 4193 DROP_FREED | \ 4194 DROP_SHRINK_ALL |\ 4195 DROP_IDLE) 4196 static int 4197 i915_drop_caches_get(void *data, u64 *val) 4198 { 4199 *val = DROP_ALL; 4200 4201 return 0; 4202 } 4203 4204 static int 4205 i915_drop_caches_set(void *data, u64 val) 4206 { 4207 struct drm_i915_private *dev_priv = data; 4208 struct drm_device *dev = &dev_priv->drm; 4209 int ret = 0; 4210 4211 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", 4212 val, val & DROP_ALL); 4213 4214 /* No need to check and wait for gpu resets, only libdrm auto-restarts 4215 * on ioctls on -EAGAIN. */ 4216 if (val & (DROP_ACTIVE | DROP_RETIRE)) { 4217 ret = mutex_lock_interruptible(&dev->struct_mutex); 4218 if (ret) 4219 return ret; 4220 4221 if (val & DROP_ACTIVE) 4222 ret = i915_gem_wait_for_idle(dev_priv, 4223 I915_WAIT_INTERRUPTIBLE | 4224 I915_WAIT_LOCKED); 4225 4226 if (val & DROP_RETIRE) 4227 i915_gem_retire_requests(dev_priv); 4228 4229 mutex_unlock(&dev->struct_mutex); 4230 } 4231 4232 fs_reclaim_acquire(GFP_KERNEL); 4233 if (val & DROP_BOUND) 4234 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND); 4235 4236 if (val & DROP_UNBOUND) 4237 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND); 4238 4239 if (val & DROP_SHRINK_ALL) 4240 i915_gem_shrink_all(dev_priv); 4241 fs_reclaim_release(GFP_KERNEL); 4242 4243 if (val & DROP_IDLE) 4244 drain_delayed_work(&dev_priv->gt.idle_work); 4245 4246 if (val & DROP_FREED) { 4247 synchronize_rcu(); 4248 i915_gem_drain_freed_objects(dev_priv); 4249 } 4250 4251 return ret; 4252 } 4253 4254 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 4255 i915_drop_caches_get, i915_drop_caches_set, 4256 "0x%08llx\n"); 4257 4258 static int 4259 i915_max_freq_get(void *data, u64 *val) 4260 { 4261 struct drm_i915_private *dev_priv = data; 4262 4263 if (INTEL_GEN(dev_priv) < 6) 4264 return -ENODEV; 4265 4266 *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.max_freq_softlimit); 4267 return 0; 4268 } 4269 4270 static int 4271 i915_max_freq_set(void *data, u64 val) 4272 { 4273 struct drm_i915_private *dev_priv = data; 4274 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4275 u32 hw_max, hw_min; 4276 int ret; 4277 4278 if (INTEL_GEN(dev_priv) < 6) 4279 return -ENODEV; 4280 4281 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 4282 4283 ret = mutex_lock_interruptible(&dev_priv->pcu_lock); 4284 if (ret) 4285 return ret; 4286 4287 /* 4288 * Turbo will still be enabled, but won't go above the set value. 4289 */ 4290 val = intel_freq_opcode(dev_priv, val); 4291 4292 hw_max = rps->max_freq; 4293 hw_min = rps->min_freq; 4294 4295 if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) { 4296 mutex_unlock(&dev_priv->pcu_lock); 4297 return -EINVAL; 4298 } 4299 4300 rps->max_freq_softlimit = val; 4301 4302 if (intel_set_rps(dev_priv, val)) 4303 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); 4304 4305 mutex_unlock(&dev_priv->pcu_lock); 4306 4307 return 0; 4308 } 4309 4310 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 4311 i915_max_freq_get, i915_max_freq_set, 4312 "%llu\n"); 4313 4314 static int 4315 i915_min_freq_get(void *data, u64 *val) 4316 { 4317 struct drm_i915_private *dev_priv = data; 4318 4319 if (INTEL_GEN(dev_priv) < 6) 4320 return -ENODEV; 4321 4322 *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.min_freq_softlimit); 4323 return 0; 4324 } 4325 4326 static int 4327 i915_min_freq_set(void *data, u64 val) 4328 { 4329 struct drm_i915_private *dev_priv = data; 4330 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4331 u32 hw_max, hw_min; 4332 int ret; 4333 4334 if (INTEL_GEN(dev_priv) < 6) 4335 return -ENODEV; 4336 4337 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 4338 4339 ret = mutex_lock_interruptible(&dev_priv->pcu_lock); 4340 if (ret) 4341 return ret; 4342 4343 /* 4344 * Turbo will still be enabled, but won't go below the set value. 4345 */ 4346 val = intel_freq_opcode(dev_priv, val); 4347 4348 hw_max = rps->max_freq; 4349 hw_min = rps->min_freq; 4350 4351 if (val < hw_min || 4352 val > hw_max || val > rps->max_freq_softlimit) { 4353 mutex_unlock(&dev_priv->pcu_lock); 4354 return -EINVAL; 4355 } 4356 4357 rps->min_freq_softlimit = val; 4358 4359 if (intel_set_rps(dev_priv, val)) 4360 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); 4361 4362 mutex_unlock(&dev_priv->pcu_lock); 4363 4364 return 0; 4365 } 4366 4367 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 4368 i915_min_freq_get, i915_min_freq_set, 4369 "%llu\n"); 4370 4371 static int 4372 i915_cache_sharing_get(void *data, u64 *val) 4373 { 4374 struct drm_i915_private *dev_priv = data; 4375 u32 snpcr; 4376 4377 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) 4378 return -ENODEV; 4379 4380 intel_runtime_pm_get(dev_priv); 4381 4382 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4383 4384 intel_runtime_pm_put(dev_priv); 4385 4386 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 4387 4388 return 0; 4389 } 4390 4391 static int 4392 i915_cache_sharing_set(void *data, u64 val) 4393 { 4394 struct drm_i915_private *dev_priv = data; 4395 u32 snpcr; 4396 4397 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) 4398 return -ENODEV; 4399 4400 if (val > 3) 4401 return -EINVAL; 4402 4403 intel_runtime_pm_get(dev_priv); 4404 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 4405 4406 /* Update the cache sharing policy here as well */ 4407 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4408 snpcr &= ~GEN6_MBC_SNPCR_MASK; 4409 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 4410 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 4411 4412 intel_runtime_pm_put(dev_priv); 4413 return 0; 4414 } 4415 4416 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 4417 i915_cache_sharing_get, i915_cache_sharing_set, 4418 "%llu\n"); 4419 4420 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv, 4421 struct sseu_dev_info *sseu) 4422 { 4423 int ss_max = 2; 4424 int ss; 4425 u32 sig1[ss_max], sig2[ss_max]; 4426 4427 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); 4428 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); 4429 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); 4430 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); 4431 4432 for (ss = 0; ss < ss_max; ss++) { 4433 unsigned int eu_cnt; 4434 4435 if (sig1[ss] & CHV_SS_PG_ENABLE) 4436 /* skip disabled subslice */ 4437 continue; 4438 4439 sseu->slice_mask = BIT(0); 4440 sseu->subslice_mask |= BIT(ss); 4441 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + 4442 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + 4443 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + 4444 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); 4445 sseu->eu_total += eu_cnt; 4446 sseu->eu_per_subslice = max_t(unsigned int, 4447 sseu->eu_per_subslice, eu_cnt); 4448 } 4449 } 4450 4451 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, 4452 struct sseu_dev_info *sseu) 4453 { 4454 int s_max = 3, ss_max = 4; 4455 int s, ss; 4456 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; 4457 4458 /* BXT has a single slice and at most 3 subslices. */ 4459 if (IS_GEN9_LP(dev_priv)) { 4460 s_max = 1; 4461 ss_max = 3; 4462 } 4463 4464 for (s = 0; s < s_max; s++) { 4465 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s)); 4466 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s)); 4467 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s)); 4468 } 4469 4470 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | 4471 GEN9_PGCTL_SSA_EU19_ACK | 4472 GEN9_PGCTL_SSA_EU210_ACK | 4473 GEN9_PGCTL_SSA_EU311_ACK; 4474 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | 4475 GEN9_PGCTL_SSB_EU19_ACK | 4476 GEN9_PGCTL_SSB_EU210_ACK | 4477 GEN9_PGCTL_SSB_EU311_ACK; 4478 4479 for (s = 0; s < s_max; s++) { 4480 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) 4481 /* skip disabled slice */ 4482 continue; 4483 4484 sseu->slice_mask |= BIT(s); 4485 4486 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) 4487 sseu->subslice_mask = 4488 INTEL_INFO(dev_priv)->sseu.subslice_mask; 4489 4490 for (ss = 0; ss < ss_max; ss++) { 4491 unsigned int eu_cnt; 4492 4493 if (IS_GEN9_LP(dev_priv)) { 4494 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) 4495 /* skip disabled subslice */ 4496 continue; 4497 4498 sseu->subslice_mask |= BIT(ss); 4499 } 4500 4501 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & 4502 eu_mask[ss%2]); 4503 sseu->eu_total += eu_cnt; 4504 sseu->eu_per_subslice = max_t(unsigned int, 4505 sseu->eu_per_subslice, 4506 eu_cnt); 4507 } 4508 } 4509 } 4510 4511 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, 4512 struct sseu_dev_info *sseu) 4513 { 4514 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); 4515 int s; 4516 4517 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK; 4518 4519 if (sseu->slice_mask) { 4520 sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask; 4521 sseu->eu_per_subslice = 4522 INTEL_INFO(dev_priv)->sseu.eu_per_subslice; 4523 sseu->eu_total = sseu->eu_per_subslice * 4524 sseu_subslice_total(sseu); 4525 4526 /* subtract fused off EU(s) from enabled slice(s) */ 4527 for (s = 0; s < fls(sseu->slice_mask); s++) { 4528 u8 subslice_7eu = 4529 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s]; 4530 4531 sseu->eu_total -= hweight8(subslice_7eu); 4532 } 4533 } 4534 } 4535 4536 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info, 4537 const struct sseu_dev_info *sseu) 4538 { 4539 struct drm_i915_private *dev_priv = node_to_i915(m->private); 4540 const char *type = is_available_info ? "Available" : "Enabled"; 4541 4542 seq_printf(m, " %s Slice Mask: %04x\n", type, 4543 sseu->slice_mask); 4544 seq_printf(m, " %s Slice Total: %u\n", type, 4545 hweight8(sseu->slice_mask)); 4546 seq_printf(m, " %s Subslice Total: %u\n", type, 4547 sseu_subslice_total(sseu)); 4548 seq_printf(m, " %s Subslice Mask: %04x\n", type, 4549 sseu->subslice_mask); 4550 seq_printf(m, " %s Subslice Per Slice: %u\n", type, 4551 hweight8(sseu->subslice_mask)); 4552 seq_printf(m, " %s EU Total: %u\n", type, 4553 sseu->eu_total); 4554 seq_printf(m, " %s EU Per Subslice: %u\n", type, 4555 sseu->eu_per_subslice); 4556 4557 if (!is_available_info) 4558 return; 4559 4560 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv))); 4561 if (HAS_POOLED_EU(dev_priv)) 4562 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool); 4563 4564 seq_printf(m, " Has Slice Power Gating: %s\n", 4565 yesno(sseu->has_slice_pg)); 4566 seq_printf(m, " Has Subslice Power Gating: %s\n", 4567 yesno(sseu->has_subslice_pg)); 4568 seq_printf(m, " Has EU Power Gating: %s\n", 4569 yesno(sseu->has_eu_pg)); 4570 } 4571 4572 static int i915_sseu_status(struct seq_file *m, void *unused) 4573 { 4574 struct drm_i915_private *dev_priv = node_to_i915(m->private); 4575 struct sseu_dev_info sseu; 4576 4577 if (INTEL_GEN(dev_priv) < 8) 4578 return -ENODEV; 4579 4580 seq_puts(m, "SSEU Device Info\n"); 4581 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu); 4582 4583 seq_puts(m, "SSEU Device Status\n"); 4584 memset(&sseu, 0, sizeof(sseu)); 4585 4586 intel_runtime_pm_get(dev_priv); 4587 4588 if (IS_CHERRYVIEW(dev_priv)) { 4589 cherryview_sseu_device_status(dev_priv, &sseu); 4590 } else if (IS_BROADWELL(dev_priv)) { 4591 broadwell_sseu_device_status(dev_priv, &sseu); 4592 } else if (INTEL_GEN(dev_priv) >= 9) { 4593 gen9_sseu_device_status(dev_priv, &sseu); 4594 } 4595 4596 intel_runtime_pm_put(dev_priv); 4597 4598 i915_print_sseu_info(m, false, &sseu); 4599 4600 return 0; 4601 } 4602 4603 static int i915_forcewake_open(struct inode *inode, struct file *file) 4604 { 4605 struct drm_i915_private *i915 = inode->i_private; 4606 4607 if (INTEL_GEN(i915) < 6) 4608 return 0; 4609 4610 intel_runtime_pm_get(i915); 4611 intel_uncore_forcewake_user_get(i915); 4612 4613 return 0; 4614 } 4615 4616 static int i915_forcewake_release(struct inode *inode, struct file *file) 4617 { 4618 struct drm_i915_private *i915 = inode->i_private; 4619 4620 if (INTEL_GEN(i915) < 6) 4621 return 0; 4622 4623 intel_uncore_forcewake_user_put(i915); 4624 intel_runtime_pm_put(i915); 4625 4626 return 0; 4627 } 4628 4629 static const struct file_operations i915_forcewake_fops = { 4630 .owner = THIS_MODULE, 4631 .open = i915_forcewake_open, 4632 .release = i915_forcewake_release, 4633 }; 4634 4635 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 4636 { 4637 struct drm_i915_private *dev_priv = m->private; 4638 struct i915_hotplug *hotplug = &dev_priv->hotplug; 4639 4640 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 4641 seq_printf(m, "Detected: %s\n", 4642 yesno(delayed_work_pending(&hotplug->reenable_work))); 4643 4644 return 0; 4645 } 4646 4647 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 4648 const char __user *ubuf, size_t len, 4649 loff_t *offp) 4650 { 4651 struct seq_file *m = file->private_data; 4652 struct drm_i915_private *dev_priv = m->private; 4653 struct i915_hotplug *hotplug = &dev_priv->hotplug; 4654 unsigned int new_threshold; 4655 int i; 4656 char *newline; 4657 char tmp[16]; 4658 4659 if (len >= sizeof(tmp)) 4660 return -EINVAL; 4661 4662 if (copy_from_user(tmp, ubuf, len)) 4663 return -EFAULT; 4664 4665 tmp[len] = '\0'; 4666 4667 /* Strip newline, if any */ 4668 newline = strchr(tmp, '\n'); 4669 if (newline) 4670 *newline = '\0'; 4671 4672 if (strcmp(tmp, "reset") == 0) 4673 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4674 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 4675 return -EINVAL; 4676 4677 if (new_threshold > 0) 4678 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n", 4679 new_threshold); 4680 else 4681 DRM_DEBUG_KMS("Disabling HPD storm detection\n"); 4682 4683 spin_lock_irq(&dev_priv->irq_lock); 4684 hotplug->hpd_storm_threshold = new_threshold; 4685 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 4686 for_each_hpd_pin(i) 4687 hotplug->stats[i].count = 0; 4688 spin_unlock_irq(&dev_priv->irq_lock); 4689 4690 /* Re-enable hpd immediately if we were in an irq storm */ 4691 flush_delayed_work(&dev_priv->hotplug.reenable_work); 4692 4693 return len; 4694 } 4695 4696 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 4697 { 4698 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 4699 } 4700 4701 static const struct file_operations i915_hpd_storm_ctl_fops = { 4702 .owner = THIS_MODULE, 4703 .open = i915_hpd_storm_ctl_open, 4704 .read = seq_read, 4705 .llseek = seq_lseek, 4706 .release = single_release, 4707 .write = i915_hpd_storm_ctl_write 4708 }; 4709 4710 static const struct drm_info_list i915_debugfs_list[] = { 4711 {"i915_capabilities", i915_capabilities, 0}, 4712 {"i915_gem_objects", i915_gem_object_info, 0}, 4713 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 4714 {"i915_gem_stolen", i915_gem_stolen_list_info }, 4715 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 4716 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 4717 {"i915_gem_interrupt", i915_interrupt_info, 0}, 4718 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, 4719 {"i915_guc_info", i915_guc_info, 0}, 4720 {"i915_guc_load_status", i915_guc_load_status_info, 0}, 4721 {"i915_guc_log_dump", i915_guc_log_dump, 0}, 4722 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1}, 4723 {"i915_guc_stage_pool", i915_guc_stage_pool, 0}, 4724 {"i915_huc_load_status", i915_huc_load_status_info, 0}, 4725 {"i915_frequency_info", i915_frequency_info, 0}, 4726 {"i915_hangcheck_info", i915_hangcheck_info, 0}, 4727 {"i915_reset_info", i915_reset_info, 0}, 4728 {"i915_drpc_info", i915_drpc_info, 0}, 4729 {"i915_emon_status", i915_emon_status, 0}, 4730 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 4731 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 4732 {"i915_fbc_status", i915_fbc_status, 0}, 4733 {"i915_ips_status", i915_ips_status, 0}, 4734 {"i915_sr_status", i915_sr_status, 0}, 4735 {"i915_opregion", i915_opregion, 0}, 4736 {"i915_vbt", i915_vbt, 0}, 4737 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 4738 {"i915_context_status", i915_context_status, 0}, 4739 {"i915_dump_lrc", i915_dump_lrc, 0}, 4740 {"i915_forcewake_domains", i915_forcewake_domains, 0}, 4741 {"i915_swizzle_info", i915_swizzle_info, 0}, 4742 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 4743 {"i915_llc", i915_llc, 0}, 4744 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 4745 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 4746 {"i915_energy_uJ", i915_energy_uJ, 0}, 4747 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 4748 {"i915_power_domain_info", i915_power_domain_info, 0}, 4749 {"i915_dmc_info", i915_dmc_info, 0}, 4750 {"i915_display_info", i915_display_info, 0}, 4751 {"i915_engine_info", i915_engine_info, 0}, 4752 {"i915_shrinker_info", i915_shrinker_info, 0}, 4753 {"i915_semaphore_status", i915_semaphore_status, 0}, 4754 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 4755 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 4756 {"i915_wa_registers", i915_wa_registers, 0}, 4757 {"i915_ddb_info", i915_ddb_info, 0}, 4758 {"i915_sseu_status", i915_sseu_status, 0}, 4759 {"i915_drrs_status", i915_drrs_status, 0}, 4760 {"i915_rps_boost_info", i915_rps_boost_info, 0}, 4761 }; 4762 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 4763 4764 static const struct i915_debugfs_files { 4765 const char *name; 4766 const struct file_operations *fops; 4767 } i915_debugfs_files[] = { 4768 {"i915_wedged", &i915_wedged_fops}, 4769 {"i915_max_freq", &i915_max_freq_fops}, 4770 {"i915_min_freq", &i915_min_freq_fops}, 4771 {"i915_cache_sharing", &i915_cache_sharing_fops}, 4772 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 4773 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 4774 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 4775 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 4776 {"i915_error_state", &i915_error_state_fops}, 4777 {"i915_gpu_info", &i915_gpu_info_fops}, 4778 #endif 4779 {"i915_next_seqno", &i915_next_seqno_fops}, 4780 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 4781 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 4782 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 4783 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 4784 {"i915_fbc_false_color", &i915_fbc_false_color_fops}, 4785 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 4786 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 4787 {"i915_dp_test_active", &i915_displayport_test_active_fops}, 4788 {"i915_guc_log_control", &i915_guc_log_control_fops}, 4789 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, 4790 {"i915_ipc_status", &i915_ipc_status_fops} 4791 }; 4792 4793 int i915_debugfs_register(struct drm_i915_private *dev_priv) 4794 { 4795 struct drm_minor *minor = dev_priv->drm.primary; 4796 struct dentry *ent; 4797 int ret, i; 4798 4799 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR, 4800 minor->debugfs_root, to_i915(minor->dev), 4801 &i915_forcewake_fops); 4802 if (!ent) 4803 return -ENOMEM; 4804 4805 ret = intel_pipe_crc_create(minor); 4806 if (ret) 4807 return ret; 4808 4809 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 4810 ent = debugfs_create_file(i915_debugfs_files[i].name, 4811 S_IRUGO | S_IWUSR, 4812 minor->debugfs_root, 4813 to_i915(minor->dev), 4814 i915_debugfs_files[i].fops); 4815 if (!ent) 4816 return -ENOMEM; 4817 } 4818 4819 return drm_debugfs_create_files(i915_debugfs_list, 4820 I915_DEBUGFS_ENTRIES, 4821 minor->debugfs_root, minor); 4822 } 4823 4824 struct dpcd_block { 4825 /* DPCD dump start address. */ 4826 unsigned int offset; 4827 /* DPCD dump end address, inclusive. If unset, .size will be used. */ 4828 unsigned int end; 4829 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ 4830 size_t size; 4831 /* Only valid for eDP. */ 4832 bool edp; 4833 }; 4834 4835 static const struct dpcd_block i915_dpcd_debug[] = { 4836 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, 4837 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, 4838 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, 4839 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, 4840 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, 4841 { .offset = DP_SET_POWER }, 4842 { .offset = DP_EDP_DPCD_REV }, 4843 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, 4844 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, 4845 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, 4846 }; 4847 4848 static int i915_dpcd_show(struct seq_file *m, void *data) 4849 { 4850 struct drm_connector *connector = m->private; 4851 struct intel_dp *intel_dp = 4852 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 4853 uint8_t buf[16]; 4854 ssize_t err; 4855 int i; 4856 4857 if (connector->status != connector_status_connected) 4858 return -ENODEV; 4859 4860 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) { 4861 const struct dpcd_block *b = &i915_dpcd_debug[i]; 4862 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); 4863 4864 if (b->edp && 4865 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 4866 continue; 4867 4868 /* low tech for now */ 4869 if (WARN_ON(size > sizeof(buf))) 4870 continue; 4871 4872 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); 4873 if (err <= 0) { 4874 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", 4875 size, b->offset, err); 4876 continue; 4877 } 4878 4879 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); 4880 } 4881 4882 return 0; 4883 } 4884 4885 static int i915_dpcd_open(struct inode *inode, struct file *file) 4886 { 4887 return single_open(file, i915_dpcd_show, inode->i_private); 4888 } 4889 4890 static const struct file_operations i915_dpcd_fops = { 4891 .owner = THIS_MODULE, 4892 .open = i915_dpcd_open, 4893 .read = seq_read, 4894 .llseek = seq_lseek, 4895 .release = single_release, 4896 }; 4897 4898 static int i915_panel_show(struct seq_file *m, void *data) 4899 { 4900 struct drm_connector *connector = m->private; 4901 struct intel_dp *intel_dp = 4902 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 4903 4904 if (connector->status != connector_status_connected) 4905 return -ENODEV; 4906 4907 seq_printf(m, "Panel power up delay: %d\n", 4908 intel_dp->panel_power_up_delay); 4909 seq_printf(m, "Panel power down delay: %d\n", 4910 intel_dp->panel_power_down_delay); 4911 seq_printf(m, "Backlight on delay: %d\n", 4912 intel_dp->backlight_on_delay); 4913 seq_printf(m, "Backlight off delay: %d\n", 4914 intel_dp->backlight_off_delay); 4915 4916 return 0; 4917 } 4918 4919 static int i915_panel_open(struct inode *inode, struct file *file) 4920 { 4921 return single_open(file, i915_panel_show, inode->i_private); 4922 } 4923 4924 static const struct file_operations i915_panel_fops = { 4925 .owner = THIS_MODULE, 4926 .open = i915_panel_open, 4927 .read = seq_read, 4928 .llseek = seq_lseek, 4929 .release = single_release, 4930 }; 4931 4932 /** 4933 * i915_debugfs_connector_add - add i915 specific connector debugfs files 4934 * @connector: pointer to a registered drm_connector 4935 * 4936 * Cleanup will be done by drm_connector_unregister() through a call to 4937 * drm_debugfs_connector_remove(). 4938 * 4939 * Returns 0 on success, negative error codes on error. 4940 */ 4941 int i915_debugfs_connector_add(struct drm_connector *connector) 4942 { 4943 struct dentry *root = connector->debugfs_entry; 4944 4945 /* The connector must have been registered beforehands. */ 4946 if (!root) 4947 return -ENODEV; 4948 4949 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 4950 connector->connector_type == DRM_MODE_CONNECTOR_eDP) 4951 debugfs_create_file("i915_dpcd", S_IRUGO, root, 4952 connector, &i915_dpcd_fops); 4953 4954 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 4955 debugfs_create_file("i915_panel_timings", S_IRUGO, root, 4956 connector, &i915_panel_fops); 4957 4958 return 0; 4959 } 4960