1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/debugfs.h> 30 #include <linux/sort.h> 31 #include <linux/sched/mm.h> 32 #include "intel_drv.h" 33 #include "intel_guc_submission.h" 34 35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 36 { 37 return to_i915(node->minor->dev); 38 } 39 40 static __always_inline void seq_print_param(struct seq_file *m, 41 const char *name, 42 const char *type, 43 const void *x) 44 { 45 if (!__builtin_strcmp(type, "bool")) 46 seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x)); 47 else if (!__builtin_strcmp(type, "int")) 48 seq_printf(m, "i915.%s=%d\n", name, *(const int *)x); 49 else if (!__builtin_strcmp(type, "unsigned int")) 50 seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x); 51 else if (!__builtin_strcmp(type, "char *")) 52 seq_printf(m, "i915.%s=%s\n", name, *(const char **)x); 53 else 54 BUILD_BUG(); 55 } 56 57 static int i915_capabilities(struct seq_file *m, void *data) 58 { 59 struct drm_i915_private *dev_priv = node_to_i915(m->private); 60 const struct intel_device_info *info = INTEL_INFO(dev_priv); 61 62 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv)); 63 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform)); 64 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv)); 65 66 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 67 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); 68 #undef PRINT_FLAG 69 70 kernel_param_lock(THIS_MODULE); 71 #define PRINT_PARAM(T, x, ...) seq_print_param(m, #x, #T, &i915_modparams.x); 72 I915_PARAMS_FOR_EACH(PRINT_PARAM); 73 #undef PRINT_PARAM 74 kernel_param_unlock(THIS_MODULE); 75 76 return 0; 77 } 78 79 static char get_active_flag(struct drm_i915_gem_object *obj) 80 { 81 return i915_gem_object_is_active(obj) ? '*' : ' '; 82 } 83 84 static char get_pin_flag(struct drm_i915_gem_object *obj) 85 { 86 return obj->pin_global ? 'p' : ' '; 87 } 88 89 static char get_tiling_flag(struct drm_i915_gem_object *obj) 90 { 91 switch (i915_gem_object_get_tiling(obj)) { 92 default: 93 case I915_TILING_NONE: return ' '; 94 case I915_TILING_X: return 'X'; 95 case I915_TILING_Y: return 'Y'; 96 } 97 } 98 99 static char get_global_flag(struct drm_i915_gem_object *obj) 100 { 101 return obj->userfault_count ? 'g' : ' '; 102 } 103 104 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) 105 { 106 return obj->mm.mapping ? 'M' : ' '; 107 } 108 109 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) 110 { 111 u64 size = 0; 112 struct i915_vma *vma; 113 114 list_for_each_entry(vma, &obj->vma_list, obj_link) { 115 if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node)) 116 size += vma->node.size; 117 } 118 119 return size; 120 } 121 122 static const char * 123 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len) 124 { 125 size_t x = 0; 126 127 switch (page_sizes) { 128 case 0: 129 return ""; 130 case I915_GTT_PAGE_SIZE_4K: 131 return "4K"; 132 case I915_GTT_PAGE_SIZE_64K: 133 return "64K"; 134 case I915_GTT_PAGE_SIZE_2M: 135 return "2M"; 136 default: 137 if (!buf) 138 return "M"; 139 140 if (page_sizes & I915_GTT_PAGE_SIZE_2M) 141 x += snprintf(buf + x, len - x, "2M, "); 142 if (page_sizes & I915_GTT_PAGE_SIZE_64K) 143 x += snprintf(buf + x, len - x, "64K, "); 144 if (page_sizes & I915_GTT_PAGE_SIZE_4K) 145 x += snprintf(buf + x, len - x, "4K, "); 146 buf[x-2] = '\0'; 147 148 return buf; 149 } 150 } 151 152 static void 153 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 154 { 155 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 156 struct intel_engine_cs *engine; 157 struct i915_vma *vma; 158 unsigned int frontbuffer_bits; 159 int pin_count = 0; 160 161 lockdep_assert_held(&obj->base.dev->struct_mutex); 162 163 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s", 164 &obj->base, 165 get_active_flag(obj), 166 get_pin_flag(obj), 167 get_tiling_flag(obj), 168 get_global_flag(obj), 169 get_pin_mapped_flag(obj), 170 obj->base.size / 1024, 171 obj->base.read_domains, 172 obj->base.write_domain, 173 i915_cache_level_str(dev_priv, obj->cache_level), 174 obj->mm.dirty ? " dirty" : "", 175 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : ""); 176 if (obj->base.name) 177 seq_printf(m, " (name: %d)", obj->base.name); 178 list_for_each_entry(vma, &obj->vma_list, obj_link) { 179 if (i915_vma_is_pinned(vma)) 180 pin_count++; 181 } 182 seq_printf(m, " (pinned x %d)", pin_count); 183 if (obj->pin_global) 184 seq_printf(m, " (global)"); 185 list_for_each_entry(vma, &obj->vma_list, obj_link) { 186 if (!drm_mm_node_allocated(&vma->node)) 187 continue; 188 189 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s", 190 i915_vma_is_ggtt(vma) ? "g" : "pp", 191 vma->node.start, vma->node.size, 192 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0)); 193 if (i915_vma_is_ggtt(vma)) { 194 switch (vma->ggtt_view.type) { 195 case I915_GGTT_VIEW_NORMAL: 196 seq_puts(m, ", normal"); 197 break; 198 199 case I915_GGTT_VIEW_PARTIAL: 200 seq_printf(m, ", partial [%08llx+%x]", 201 vma->ggtt_view.partial.offset << PAGE_SHIFT, 202 vma->ggtt_view.partial.size << PAGE_SHIFT); 203 break; 204 205 case I915_GGTT_VIEW_ROTATED: 206 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]", 207 vma->ggtt_view.rotated.plane[0].width, 208 vma->ggtt_view.rotated.plane[0].height, 209 vma->ggtt_view.rotated.plane[0].stride, 210 vma->ggtt_view.rotated.plane[0].offset, 211 vma->ggtt_view.rotated.plane[1].width, 212 vma->ggtt_view.rotated.plane[1].height, 213 vma->ggtt_view.rotated.plane[1].stride, 214 vma->ggtt_view.rotated.plane[1].offset); 215 break; 216 217 default: 218 MISSING_CASE(vma->ggtt_view.type); 219 break; 220 } 221 } 222 if (vma->fence) 223 seq_printf(m, " , fence: %d%s", 224 vma->fence->id, 225 i915_gem_active_isset(&vma->last_fence) ? "*" : ""); 226 seq_puts(m, ")"); 227 } 228 if (obj->stolen) 229 seq_printf(m, " (stolen: %08llx)", obj->stolen->start); 230 231 engine = i915_gem_object_last_write_engine(obj); 232 if (engine) 233 seq_printf(m, " (%s)", engine->name); 234 235 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits); 236 if (frontbuffer_bits) 237 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits); 238 } 239 240 static int obj_rank_by_stolen(const void *A, const void *B) 241 { 242 const struct drm_i915_gem_object *a = 243 *(const struct drm_i915_gem_object **)A; 244 const struct drm_i915_gem_object *b = 245 *(const struct drm_i915_gem_object **)B; 246 247 if (a->stolen->start < b->stolen->start) 248 return -1; 249 if (a->stolen->start > b->stolen->start) 250 return 1; 251 return 0; 252 } 253 254 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 255 { 256 struct drm_i915_private *dev_priv = node_to_i915(m->private); 257 struct drm_device *dev = &dev_priv->drm; 258 struct drm_i915_gem_object **objects; 259 struct drm_i915_gem_object *obj; 260 u64 total_obj_size, total_gtt_size; 261 unsigned long total, count, n; 262 int ret; 263 264 total = READ_ONCE(dev_priv->mm.object_count); 265 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL); 266 if (!objects) 267 return -ENOMEM; 268 269 ret = mutex_lock_interruptible(&dev->struct_mutex); 270 if (ret) 271 goto out; 272 273 total_obj_size = total_gtt_size = count = 0; 274 275 spin_lock(&dev_priv->mm.obj_lock); 276 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { 277 if (count == total) 278 break; 279 280 if (obj->stolen == NULL) 281 continue; 282 283 objects[count++] = obj; 284 total_obj_size += obj->base.size; 285 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 286 287 } 288 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) { 289 if (count == total) 290 break; 291 292 if (obj->stolen == NULL) 293 continue; 294 295 objects[count++] = obj; 296 total_obj_size += obj->base.size; 297 } 298 spin_unlock(&dev_priv->mm.obj_lock); 299 300 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL); 301 302 seq_puts(m, "Stolen:\n"); 303 for (n = 0; n < count; n++) { 304 seq_puts(m, " "); 305 describe_obj(m, objects[n]); 306 seq_putc(m, '\n'); 307 } 308 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n", 309 count, total_obj_size, total_gtt_size); 310 311 mutex_unlock(&dev->struct_mutex); 312 out: 313 kvfree(objects); 314 return ret; 315 } 316 317 struct file_stats { 318 struct drm_i915_file_private *file_priv; 319 unsigned long count; 320 u64 total, unbound; 321 u64 global, shared; 322 u64 active, inactive; 323 }; 324 325 static int per_file_stats(int id, void *ptr, void *data) 326 { 327 struct drm_i915_gem_object *obj = ptr; 328 struct file_stats *stats = data; 329 struct i915_vma *vma; 330 331 lockdep_assert_held(&obj->base.dev->struct_mutex); 332 333 stats->count++; 334 stats->total += obj->base.size; 335 if (!obj->bind_count) 336 stats->unbound += obj->base.size; 337 if (obj->base.name || obj->base.dma_buf) 338 stats->shared += obj->base.size; 339 340 list_for_each_entry(vma, &obj->vma_list, obj_link) { 341 if (!drm_mm_node_allocated(&vma->node)) 342 continue; 343 344 if (i915_vma_is_ggtt(vma)) { 345 stats->global += vma->node.size; 346 } else { 347 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm); 348 349 if (ppgtt->base.file != stats->file_priv) 350 continue; 351 } 352 353 if (i915_vma_is_active(vma)) 354 stats->active += vma->node.size; 355 else 356 stats->inactive += vma->node.size; 357 } 358 359 return 0; 360 } 361 362 #define print_file_stats(m, name, stats) do { \ 363 if (stats.count) \ 364 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \ 365 name, \ 366 stats.count, \ 367 stats.total, \ 368 stats.active, \ 369 stats.inactive, \ 370 stats.global, \ 371 stats.shared, \ 372 stats.unbound); \ 373 } while (0) 374 375 static void print_batch_pool_stats(struct seq_file *m, 376 struct drm_i915_private *dev_priv) 377 { 378 struct drm_i915_gem_object *obj; 379 struct file_stats stats; 380 struct intel_engine_cs *engine; 381 enum intel_engine_id id; 382 int j; 383 384 memset(&stats, 0, sizeof(stats)); 385 386 for_each_engine(engine, dev_priv, id) { 387 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { 388 list_for_each_entry(obj, 389 &engine->batch_pool.cache_list[j], 390 batch_pool_link) 391 per_file_stats(0, obj, &stats); 392 } 393 } 394 395 print_file_stats(m, "[k]batch pool", stats); 396 } 397 398 static int per_file_ctx_stats(int id, void *ptr, void *data) 399 { 400 struct i915_gem_context *ctx = ptr; 401 int n; 402 403 for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) { 404 if (ctx->engine[n].state) 405 per_file_stats(0, ctx->engine[n].state->obj, data); 406 if (ctx->engine[n].ring) 407 per_file_stats(0, ctx->engine[n].ring->vma->obj, data); 408 } 409 410 return 0; 411 } 412 413 static void print_context_stats(struct seq_file *m, 414 struct drm_i915_private *dev_priv) 415 { 416 struct drm_device *dev = &dev_priv->drm; 417 struct file_stats stats; 418 struct drm_file *file; 419 420 memset(&stats, 0, sizeof(stats)); 421 422 mutex_lock(&dev->struct_mutex); 423 if (dev_priv->kernel_context) 424 per_file_ctx_stats(0, dev_priv->kernel_context, &stats); 425 426 list_for_each_entry(file, &dev->filelist, lhead) { 427 struct drm_i915_file_private *fpriv = file->driver_priv; 428 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats); 429 } 430 mutex_unlock(&dev->struct_mutex); 431 432 print_file_stats(m, "[k]contexts", stats); 433 } 434 435 static int i915_gem_object_info(struct seq_file *m, void *data) 436 { 437 struct drm_i915_private *dev_priv = node_to_i915(m->private); 438 struct drm_device *dev = &dev_priv->drm; 439 struct i915_ggtt *ggtt = &dev_priv->ggtt; 440 u32 count, mapped_count, purgeable_count, dpy_count, huge_count; 441 u64 size, mapped_size, purgeable_size, dpy_size, huge_size; 442 struct drm_i915_gem_object *obj; 443 unsigned int page_sizes = 0; 444 struct drm_file *file; 445 char buf[80]; 446 int ret; 447 448 ret = mutex_lock_interruptible(&dev->struct_mutex); 449 if (ret) 450 return ret; 451 452 seq_printf(m, "%u objects, %llu bytes\n", 453 dev_priv->mm.object_count, 454 dev_priv->mm.object_memory); 455 456 size = count = 0; 457 mapped_size = mapped_count = 0; 458 purgeable_size = purgeable_count = 0; 459 huge_size = huge_count = 0; 460 461 spin_lock(&dev_priv->mm.obj_lock); 462 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) { 463 size += obj->base.size; 464 ++count; 465 466 if (obj->mm.madv == I915_MADV_DONTNEED) { 467 purgeable_size += obj->base.size; 468 ++purgeable_count; 469 } 470 471 if (obj->mm.mapping) { 472 mapped_count++; 473 mapped_size += obj->base.size; 474 } 475 476 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) { 477 huge_count++; 478 huge_size += obj->base.size; 479 page_sizes |= obj->mm.page_sizes.sg; 480 } 481 } 482 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); 483 484 size = count = dpy_size = dpy_count = 0; 485 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { 486 size += obj->base.size; 487 ++count; 488 489 if (obj->pin_global) { 490 dpy_size += obj->base.size; 491 ++dpy_count; 492 } 493 494 if (obj->mm.madv == I915_MADV_DONTNEED) { 495 purgeable_size += obj->base.size; 496 ++purgeable_count; 497 } 498 499 if (obj->mm.mapping) { 500 mapped_count++; 501 mapped_size += obj->base.size; 502 } 503 504 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) { 505 huge_count++; 506 huge_size += obj->base.size; 507 page_sizes |= obj->mm.page_sizes.sg; 508 } 509 } 510 spin_unlock(&dev_priv->mm.obj_lock); 511 512 seq_printf(m, "%u bound objects, %llu bytes\n", 513 count, size); 514 seq_printf(m, "%u purgeable objects, %llu bytes\n", 515 purgeable_count, purgeable_size); 516 seq_printf(m, "%u mapped objects, %llu bytes\n", 517 mapped_count, mapped_size); 518 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n", 519 huge_count, 520 stringify_page_sizes(page_sizes, buf, sizeof(buf)), 521 huge_size); 522 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n", 523 dpy_count, dpy_size); 524 525 seq_printf(m, "%llu [%llu] gtt total\n", 526 ggtt->base.total, ggtt->mappable_end); 527 seq_printf(m, "Supported page sizes: %s\n", 528 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes, 529 buf, sizeof(buf))); 530 531 seq_putc(m, '\n'); 532 print_batch_pool_stats(m, dev_priv); 533 mutex_unlock(&dev->struct_mutex); 534 535 mutex_lock(&dev->filelist_mutex); 536 print_context_stats(m, dev_priv); 537 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 538 struct file_stats stats; 539 struct drm_i915_file_private *file_priv = file->driver_priv; 540 struct drm_i915_gem_request *request; 541 struct task_struct *task; 542 543 mutex_lock(&dev->struct_mutex); 544 545 memset(&stats, 0, sizeof(stats)); 546 stats.file_priv = file->driver_priv; 547 spin_lock(&file->table_lock); 548 idr_for_each(&file->object_idr, per_file_stats, &stats); 549 spin_unlock(&file->table_lock); 550 /* 551 * Although we have a valid reference on file->pid, that does 552 * not guarantee that the task_struct who called get_pid() is 553 * still alive (e.g. get_pid(current) => fork() => exit()). 554 * Therefore, we need to protect this ->comm access using RCU. 555 */ 556 request = list_first_entry_or_null(&file_priv->mm.request_list, 557 struct drm_i915_gem_request, 558 client_link); 559 rcu_read_lock(); 560 task = pid_task(request && request->ctx->pid ? 561 request->ctx->pid : file->pid, 562 PIDTYPE_PID); 563 print_file_stats(m, task ? task->comm : "<unknown>", stats); 564 rcu_read_unlock(); 565 566 mutex_unlock(&dev->struct_mutex); 567 } 568 mutex_unlock(&dev->filelist_mutex); 569 570 return 0; 571 } 572 573 static int i915_gem_gtt_info(struct seq_file *m, void *data) 574 { 575 struct drm_info_node *node = m->private; 576 struct drm_i915_private *dev_priv = node_to_i915(node); 577 struct drm_device *dev = &dev_priv->drm; 578 struct drm_i915_gem_object **objects; 579 struct drm_i915_gem_object *obj; 580 u64 total_obj_size, total_gtt_size; 581 unsigned long nobject, n; 582 int count, ret; 583 584 nobject = READ_ONCE(dev_priv->mm.object_count); 585 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL); 586 if (!objects) 587 return -ENOMEM; 588 589 ret = mutex_lock_interruptible(&dev->struct_mutex); 590 if (ret) 591 return ret; 592 593 count = 0; 594 spin_lock(&dev_priv->mm.obj_lock); 595 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { 596 objects[count++] = obj; 597 if (count == nobject) 598 break; 599 } 600 spin_unlock(&dev_priv->mm.obj_lock); 601 602 total_obj_size = total_gtt_size = 0; 603 for (n = 0; n < count; n++) { 604 obj = objects[n]; 605 606 seq_puts(m, " "); 607 describe_obj(m, obj); 608 seq_putc(m, '\n'); 609 total_obj_size += obj->base.size; 610 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 611 } 612 613 mutex_unlock(&dev->struct_mutex); 614 615 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 616 count, total_obj_size, total_gtt_size); 617 kvfree(objects); 618 619 return 0; 620 } 621 622 static int i915_gem_batch_pool_info(struct seq_file *m, void *data) 623 { 624 struct drm_i915_private *dev_priv = node_to_i915(m->private); 625 struct drm_device *dev = &dev_priv->drm; 626 struct drm_i915_gem_object *obj; 627 struct intel_engine_cs *engine; 628 enum intel_engine_id id; 629 int total = 0; 630 int ret, j; 631 632 ret = mutex_lock_interruptible(&dev->struct_mutex); 633 if (ret) 634 return ret; 635 636 for_each_engine(engine, dev_priv, id) { 637 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { 638 int count; 639 640 count = 0; 641 list_for_each_entry(obj, 642 &engine->batch_pool.cache_list[j], 643 batch_pool_link) 644 count++; 645 seq_printf(m, "%s cache[%d]: %d objects\n", 646 engine->name, j, count); 647 648 list_for_each_entry(obj, 649 &engine->batch_pool.cache_list[j], 650 batch_pool_link) { 651 seq_puts(m, " "); 652 describe_obj(m, obj); 653 seq_putc(m, '\n'); 654 } 655 656 total += count; 657 } 658 } 659 660 seq_printf(m, "total: %d\n", total); 661 662 mutex_unlock(&dev->struct_mutex); 663 664 return 0; 665 } 666 667 static void i915_ring_seqno_info(struct seq_file *m, 668 struct intel_engine_cs *engine) 669 { 670 struct intel_breadcrumbs *b = &engine->breadcrumbs; 671 struct rb_node *rb; 672 673 seq_printf(m, "Current sequence (%s): %x\n", 674 engine->name, intel_engine_get_seqno(engine)); 675 676 spin_lock_irq(&b->rb_lock); 677 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { 678 struct intel_wait *w = rb_entry(rb, typeof(*w), node); 679 680 seq_printf(m, "Waiting (%s): %s [%d] on %x\n", 681 engine->name, w->tsk->comm, w->tsk->pid, w->seqno); 682 } 683 spin_unlock_irq(&b->rb_lock); 684 } 685 686 static int i915_gem_seqno_info(struct seq_file *m, void *data) 687 { 688 struct drm_i915_private *dev_priv = node_to_i915(m->private); 689 struct intel_engine_cs *engine; 690 enum intel_engine_id id; 691 692 for_each_engine(engine, dev_priv, id) 693 i915_ring_seqno_info(m, engine); 694 695 return 0; 696 } 697 698 699 static int i915_interrupt_info(struct seq_file *m, void *data) 700 { 701 struct drm_i915_private *dev_priv = node_to_i915(m->private); 702 struct intel_engine_cs *engine; 703 enum intel_engine_id id; 704 int i, pipe; 705 706 intel_runtime_pm_get(dev_priv); 707 708 if (IS_CHERRYVIEW(dev_priv)) { 709 seq_printf(m, "Master Interrupt Control:\t%08x\n", 710 I915_READ(GEN8_MASTER_IRQ)); 711 712 seq_printf(m, "Display IER:\t%08x\n", 713 I915_READ(VLV_IER)); 714 seq_printf(m, "Display IIR:\t%08x\n", 715 I915_READ(VLV_IIR)); 716 seq_printf(m, "Display IIR_RW:\t%08x\n", 717 I915_READ(VLV_IIR_RW)); 718 seq_printf(m, "Display IMR:\t%08x\n", 719 I915_READ(VLV_IMR)); 720 for_each_pipe(dev_priv, pipe) { 721 enum intel_display_power_domain power_domain; 722 723 power_domain = POWER_DOMAIN_PIPE(pipe); 724 if (!intel_display_power_get_if_enabled(dev_priv, 725 power_domain)) { 726 seq_printf(m, "Pipe %c power disabled\n", 727 pipe_name(pipe)); 728 continue; 729 } 730 731 seq_printf(m, "Pipe %c stat:\t%08x\n", 732 pipe_name(pipe), 733 I915_READ(PIPESTAT(pipe))); 734 735 intel_display_power_put(dev_priv, power_domain); 736 } 737 738 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 739 seq_printf(m, "Port hotplug:\t%08x\n", 740 I915_READ(PORT_HOTPLUG_EN)); 741 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 742 I915_READ(VLV_DPFLIPSTAT)); 743 seq_printf(m, "DPINVGTT:\t%08x\n", 744 I915_READ(DPINVGTT)); 745 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 746 747 for (i = 0; i < 4; i++) { 748 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 749 i, I915_READ(GEN8_GT_IMR(i))); 750 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 751 i, I915_READ(GEN8_GT_IIR(i))); 752 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 753 i, I915_READ(GEN8_GT_IER(i))); 754 } 755 756 seq_printf(m, "PCU interrupt mask:\t%08x\n", 757 I915_READ(GEN8_PCU_IMR)); 758 seq_printf(m, "PCU interrupt identity:\t%08x\n", 759 I915_READ(GEN8_PCU_IIR)); 760 seq_printf(m, "PCU interrupt enable:\t%08x\n", 761 I915_READ(GEN8_PCU_IER)); 762 } else if (INTEL_GEN(dev_priv) >= 8) { 763 seq_printf(m, "Master Interrupt Control:\t%08x\n", 764 I915_READ(GEN8_MASTER_IRQ)); 765 766 for (i = 0; i < 4; i++) { 767 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 768 i, I915_READ(GEN8_GT_IMR(i))); 769 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 770 i, I915_READ(GEN8_GT_IIR(i))); 771 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 772 i, I915_READ(GEN8_GT_IER(i))); 773 } 774 775 for_each_pipe(dev_priv, pipe) { 776 enum intel_display_power_domain power_domain; 777 778 power_domain = POWER_DOMAIN_PIPE(pipe); 779 if (!intel_display_power_get_if_enabled(dev_priv, 780 power_domain)) { 781 seq_printf(m, "Pipe %c power disabled\n", 782 pipe_name(pipe)); 783 continue; 784 } 785 seq_printf(m, "Pipe %c IMR:\t%08x\n", 786 pipe_name(pipe), 787 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 788 seq_printf(m, "Pipe %c IIR:\t%08x\n", 789 pipe_name(pipe), 790 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 791 seq_printf(m, "Pipe %c IER:\t%08x\n", 792 pipe_name(pipe), 793 I915_READ(GEN8_DE_PIPE_IER(pipe))); 794 795 intel_display_power_put(dev_priv, power_domain); 796 } 797 798 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 799 I915_READ(GEN8_DE_PORT_IMR)); 800 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 801 I915_READ(GEN8_DE_PORT_IIR)); 802 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 803 I915_READ(GEN8_DE_PORT_IER)); 804 805 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 806 I915_READ(GEN8_DE_MISC_IMR)); 807 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 808 I915_READ(GEN8_DE_MISC_IIR)); 809 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 810 I915_READ(GEN8_DE_MISC_IER)); 811 812 seq_printf(m, "PCU interrupt mask:\t%08x\n", 813 I915_READ(GEN8_PCU_IMR)); 814 seq_printf(m, "PCU interrupt identity:\t%08x\n", 815 I915_READ(GEN8_PCU_IIR)); 816 seq_printf(m, "PCU interrupt enable:\t%08x\n", 817 I915_READ(GEN8_PCU_IER)); 818 } else if (IS_VALLEYVIEW(dev_priv)) { 819 seq_printf(m, "Display IER:\t%08x\n", 820 I915_READ(VLV_IER)); 821 seq_printf(m, "Display IIR:\t%08x\n", 822 I915_READ(VLV_IIR)); 823 seq_printf(m, "Display IIR_RW:\t%08x\n", 824 I915_READ(VLV_IIR_RW)); 825 seq_printf(m, "Display IMR:\t%08x\n", 826 I915_READ(VLV_IMR)); 827 for_each_pipe(dev_priv, pipe) { 828 enum intel_display_power_domain power_domain; 829 830 power_domain = POWER_DOMAIN_PIPE(pipe); 831 if (!intel_display_power_get_if_enabled(dev_priv, 832 power_domain)) { 833 seq_printf(m, "Pipe %c power disabled\n", 834 pipe_name(pipe)); 835 continue; 836 } 837 838 seq_printf(m, "Pipe %c stat:\t%08x\n", 839 pipe_name(pipe), 840 I915_READ(PIPESTAT(pipe))); 841 intel_display_power_put(dev_priv, power_domain); 842 } 843 844 seq_printf(m, "Master IER:\t%08x\n", 845 I915_READ(VLV_MASTER_IER)); 846 847 seq_printf(m, "Render IER:\t%08x\n", 848 I915_READ(GTIER)); 849 seq_printf(m, "Render IIR:\t%08x\n", 850 I915_READ(GTIIR)); 851 seq_printf(m, "Render IMR:\t%08x\n", 852 I915_READ(GTIMR)); 853 854 seq_printf(m, "PM IER:\t\t%08x\n", 855 I915_READ(GEN6_PMIER)); 856 seq_printf(m, "PM IIR:\t\t%08x\n", 857 I915_READ(GEN6_PMIIR)); 858 seq_printf(m, "PM IMR:\t\t%08x\n", 859 I915_READ(GEN6_PMIMR)); 860 861 seq_printf(m, "Port hotplug:\t%08x\n", 862 I915_READ(PORT_HOTPLUG_EN)); 863 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 864 I915_READ(VLV_DPFLIPSTAT)); 865 seq_printf(m, "DPINVGTT:\t%08x\n", 866 I915_READ(DPINVGTT)); 867 868 } else if (!HAS_PCH_SPLIT(dev_priv)) { 869 seq_printf(m, "Interrupt enable: %08x\n", 870 I915_READ(IER)); 871 seq_printf(m, "Interrupt identity: %08x\n", 872 I915_READ(IIR)); 873 seq_printf(m, "Interrupt mask: %08x\n", 874 I915_READ(IMR)); 875 for_each_pipe(dev_priv, pipe) 876 seq_printf(m, "Pipe %c stat: %08x\n", 877 pipe_name(pipe), 878 I915_READ(PIPESTAT(pipe))); 879 } else { 880 seq_printf(m, "North Display Interrupt enable: %08x\n", 881 I915_READ(DEIER)); 882 seq_printf(m, "North Display Interrupt identity: %08x\n", 883 I915_READ(DEIIR)); 884 seq_printf(m, "North Display Interrupt mask: %08x\n", 885 I915_READ(DEIMR)); 886 seq_printf(m, "South Display Interrupt enable: %08x\n", 887 I915_READ(SDEIER)); 888 seq_printf(m, "South Display Interrupt identity: %08x\n", 889 I915_READ(SDEIIR)); 890 seq_printf(m, "South Display Interrupt mask: %08x\n", 891 I915_READ(SDEIMR)); 892 seq_printf(m, "Graphics Interrupt enable: %08x\n", 893 I915_READ(GTIER)); 894 seq_printf(m, "Graphics Interrupt identity: %08x\n", 895 I915_READ(GTIIR)); 896 seq_printf(m, "Graphics Interrupt mask: %08x\n", 897 I915_READ(GTIMR)); 898 } 899 for_each_engine(engine, dev_priv, id) { 900 if (INTEL_GEN(dev_priv) >= 6) { 901 seq_printf(m, 902 "Graphics Interrupt mask (%s): %08x\n", 903 engine->name, I915_READ_IMR(engine)); 904 } 905 i915_ring_seqno_info(m, engine); 906 } 907 intel_runtime_pm_put(dev_priv); 908 909 return 0; 910 } 911 912 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 913 { 914 struct drm_i915_private *dev_priv = node_to_i915(m->private); 915 struct drm_device *dev = &dev_priv->drm; 916 int i, ret; 917 918 ret = mutex_lock_interruptible(&dev->struct_mutex); 919 if (ret) 920 return ret; 921 922 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 923 for (i = 0; i < dev_priv->num_fence_regs; i++) { 924 struct i915_vma *vma = dev_priv->fence_regs[i].vma; 925 926 seq_printf(m, "Fence %d, pin count = %d, object = ", 927 i, dev_priv->fence_regs[i].pin_count); 928 if (!vma) 929 seq_puts(m, "unused"); 930 else 931 describe_obj(m, vma->obj); 932 seq_putc(m, '\n'); 933 } 934 935 mutex_unlock(&dev->struct_mutex); 936 return 0; 937 } 938 939 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 940 static ssize_t gpu_state_read(struct file *file, char __user *ubuf, 941 size_t count, loff_t *pos) 942 { 943 struct i915_gpu_state *error = file->private_data; 944 struct drm_i915_error_state_buf str; 945 ssize_t ret; 946 loff_t tmp; 947 948 if (!error) 949 return 0; 950 951 ret = i915_error_state_buf_init(&str, error->i915, count, *pos); 952 if (ret) 953 return ret; 954 955 ret = i915_error_state_to_str(&str, error); 956 if (ret) 957 goto out; 958 959 tmp = 0; 960 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes); 961 if (ret < 0) 962 goto out; 963 964 *pos = str.start + ret; 965 out: 966 i915_error_state_buf_release(&str); 967 return ret; 968 } 969 970 static int gpu_state_release(struct inode *inode, struct file *file) 971 { 972 i915_gpu_state_put(file->private_data); 973 return 0; 974 } 975 976 static int i915_gpu_info_open(struct inode *inode, struct file *file) 977 { 978 struct drm_i915_private *i915 = inode->i_private; 979 struct i915_gpu_state *gpu; 980 981 intel_runtime_pm_get(i915); 982 gpu = i915_capture_gpu_state(i915); 983 intel_runtime_pm_put(i915); 984 if (!gpu) 985 return -ENOMEM; 986 987 file->private_data = gpu; 988 return 0; 989 } 990 991 static const struct file_operations i915_gpu_info_fops = { 992 .owner = THIS_MODULE, 993 .open = i915_gpu_info_open, 994 .read = gpu_state_read, 995 .llseek = default_llseek, 996 .release = gpu_state_release, 997 }; 998 999 static ssize_t 1000 i915_error_state_write(struct file *filp, 1001 const char __user *ubuf, 1002 size_t cnt, 1003 loff_t *ppos) 1004 { 1005 struct i915_gpu_state *error = filp->private_data; 1006 1007 if (!error) 1008 return 0; 1009 1010 DRM_DEBUG_DRIVER("Resetting error state\n"); 1011 i915_reset_error_state(error->i915); 1012 1013 return cnt; 1014 } 1015 1016 static int i915_error_state_open(struct inode *inode, struct file *file) 1017 { 1018 file->private_data = i915_first_error_state(inode->i_private); 1019 return 0; 1020 } 1021 1022 static const struct file_operations i915_error_state_fops = { 1023 .owner = THIS_MODULE, 1024 .open = i915_error_state_open, 1025 .read = gpu_state_read, 1026 .write = i915_error_state_write, 1027 .llseek = default_llseek, 1028 .release = gpu_state_release, 1029 }; 1030 #endif 1031 1032 static int 1033 i915_next_seqno_set(void *data, u64 val) 1034 { 1035 struct drm_i915_private *dev_priv = data; 1036 struct drm_device *dev = &dev_priv->drm; 1037 int ret; 1038 1039 ret = mutex_lock_interruptible(&dev->struct_mutex); 1040 if (ret) 1041 return ret; 1042 1043 ret = i915_gem_set_global_seqno(dev, val); 1044 mutex_unlock(&dev->struct_mutex); 1045 1046 return ret; 1047 } 1048 1049 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 1050 NULL, i915_next_seqno_set, 1051 "0x%llx\n"); 1052 1053 static int i915_frequency_info(struct seq_file *m, void *unused) 1054 { 1055 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1056 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1057 int ret = 0; 1058 1059 intel_runtime_pm_get(dev_priv); 1060 1061 if (IS_GEN5(dev_priv)) { 1062 u16 rgvswctl = I915_READ16(MEMSWCTL); 1063 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1064 1065 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 1066 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 1067 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 1068 MEMSTAT_VID_SHIFT); 1069 seq_printf(m, "Current P-state: %d\n", 1070 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1071 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1072 u32 rpmodectl, freq_sts; 1073 1074 mutex_lock(&dev_priv->pcu_lock); 1075 1076 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1077 seq_printf(m, "Video Turbo Mode: %s\n", 1078 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); 1079 seq_printf(m, "HW control enabled: %s\n", 1080 yesno(rpmodectl & GEN6_RP_ENABLE)); 1081 seq_printf(m, "SW control enabled: %s\n", 1082 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == 1083 GEN6_RP_MEDIA_SW_MODE)); 1084 1085 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1086 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1087 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1088 1089 seq_printf(m, "actual GPU freq: %d MHz\n", 1090 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1091 1092 seq_printf(m, "current GPU freq: %d MHz\n", 1093 intel_gpu_freq(dev_priv, rps->cur_freq)); 1094 1095 seq_printf(m, "max GPU freq: %d MHz\n", 1096 intel_gpu_freq(dev_priv, rps->max_freq)); 1097 1098 seq_printf(m, "min GPU freq: %d MHz\n", 1099 intel_gpu_freq(dev_priv, rps->min_freq)); 1100 1101 seq_printf(m, "idle GPU freq: %d MHz\n", 1102 intel_gpu_freq(dev_priv, rps->idle_freq)); 1103 1104 seq_printf(m, 1105 "efficient (RPe) frequency: %d MHz\n", 1106 intel_gpu_freq(dev_priv, rps->efficient_freq)); 1107 mutex_unlock(&dev_priv->pcu_lock); 1108 } else if (INTEL_GEN(dev_priv) >= 6) { 1109 u32 rp_state_limits; 1110 u32 gt_perf_status; 1111 u32 rp_state_cap; 1112 u32 rpmodectl, rpinclimit, rpdeclimit; 1113 u32 rpstat, cagf, reqf; 1114 u32 rpupei, rpcurup, rpprevup; 1115 u32 rpdownei, rpcurdown, rpprevdown; 1116 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 1117 int max_freq; 1118 1119 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1120 if (IS_GEN9_LP(dev_priv)) { 1121 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 1122 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); 1123 } else { 1124 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1125 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1126 } 1127 1128 /* RPSTAT1 is in the GT power well */ 1129 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1130 1131 reqf = I915_READ(GEN6_RPNSWREQ); 1132 if (INTEL_GEN(dev_priv) >= 9) 1133 reqf >>= 23; 1134 else { 1135 reqf &= ~GEN6_TURBO_DISABLE; 1136 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1137 reqf >>= 24; 1138 else 1139 reqf >>= 25; 1140 } 1141 reqf = intel_gpu_freq(dev_priv, reqf); 1142 1143 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1144 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 1145 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 1146 1147 rpstat = I915_READ(GEN6_RPSTAT1); 1148 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 1149 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 1150 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 1151 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 1152 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 1153 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 1154 if (INTEL_GEN(dev_priv) >= 9) 1155 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; 1156 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1157 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1158 else 1159 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1160 cagf = intel_gpu_freq(dev_priv, cagf); 1161 1162 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1163 1164 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { 1165 pm_ier = I915_READ(GEN6_PMIER); 1166 pm_imr = I915_READ(GEN6_PMIMR); 1167 pm_isr = I915_READ(GEN6_PMISR); 1168 pm_iir = I915_READ(GEN6_PMIIR); 1169 pm_mask = I915_READ(GEN6_PMINTRMSK); 1170 } else { 1171 pm_ier = I915_READ(GEN8_GT_IER(2)); 1172 pm_imr = I915_READ(GEN8_GT_IMR(2)); 1173 pm_isr = I915_READ(GEN8_GT_ISR(2)); 1174 pm_iir = I915_READ(GEN8_GT_IIR(2)); 1175 pm_mask = I915_READ(GEN6_PMINTRMSK); 1176 } 1177 seq_printf(m, "Video Turbo Mode: %s\n", 1178 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); 1179 seq_printf(m, "HW control enabled: %s\n", 1180 yesno(rpmodectl & GEN6_RP_ENABLE)); 1181 seq_printf(m, "SW control enabled: %s\n", 1182 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == 1183 GEN6_RP_MEDIA_SW_MODE)); 1184 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1185 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1186 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n", 1187 rps->pm_intrmsk_mbz); 1188 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1189 seq_printf(m, "Render p-state ratio: %d\n", 1190 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8); 1191 seq_printf(m, "Render p-state VID: %d\n", 1192 gt_perf_status & 0xff); 1193 seq_printf(m, "Render p-state limit: %d\n", 1194 rp_state_limits & 0xff); 1195 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1196 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 1197 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 1198 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 1199 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1200 seq_printf(m, "CAGF: %dMHz\n", cagf); 1201 seq_printf(m, "RP CUR UP EI: %d (%dus)\n", 1202 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei)); 1203 seq_printf(m, "RP CUR UP: %d (%dus)\n", 1204 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup)); 1205 seq_printf(m, "RP PREV UP: %d (%dus)\n", 1206 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup)); 1207 seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold); 1208 1209 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n", 1210 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei)); 1211 seq_printf(m, "RP CUR DOWN: %d (%dus)\n", 1212 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown)); 1213 seq_printf(m, "RP PREV DOWN: %d (%dus)\n", 1214 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown)); 1215 seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold); 1216 1217 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : 1218 rp_state_cap >> 16) & 0xff; 1219 max_freq *= (IS_GEN9_BC(dev_priv) || 1220 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); 1221 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1222 intel_gpu_freq(dev_priv, max_freq)); 1223 1224 max_freq = (rp_state_cap & 0xff00) >> 8; 1225 max_freq *= (IS_GEN9_BC(dev_priv) || 1226 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); 1227 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1228 intel_gpu_freq(dev_priv, max_freq)); 1229 1230 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : 1231 rp_state_cap >> 0) & 0xff; 1232 max_freq *= (IS_GEN9_BC(dev_priv) || 1233 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); 1234 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1235 intel_gpu_freq(dev_priv, max_freq)); 1236 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1237 intel_gpu_freq(dev_priv, rps->max_freq)); 1238 1239 seq_printf(m, "Current freq: %d MHz\n", 1240 intel_gpu_freq(dev_priv, rps->cur_freq)); 1241 seq_printf(m, "Actual freq: %d MHz\n", cagf); 1242 seq_printf(m, "Idle freq: %d MHz\n", 1243 intel_gpu_freq(dev_priv, rps->idle_freq)); 1244 seq_printf(m, "Min freq: %d MHz\n", 1245 intel_gpu_freq(dev_priv, rps->min_freq)); 1246 seq_printf(m, "Boost freq: %d MHz\n", 1247 intel_gpu_freq(dev_priv, rps->boost_freq)); 1248 seq_printf(m, "Max freq: %d MHz\n", 1249 intel_gpu_freq(dev_priv, rps->max_freq)); 1250 seq_printf(m, 1251 "efficient (RPe) frequency: %d MHz\n", 1252 intel_gpu_freq(dev_priv, rps->efficient_freq)); 1253 } else { 1254 seq_puts(m, "no P-state info available\n"); 1255 } 1256 1257 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk); 1258 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); 1259 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); 1260 1261 intel_runtime_pm_put(dev_priv); 1262 return ret; 1263 } 1264 1265 static void i915_instdone_info(struct drm_i915_private *dev_priv, 1266 struct seq_file *m, 1267 struct intel_instdone *instdone) 1268 { 1269 int slice; 1270 int subslice; 1271 1272 seq_printf(m, "\t\tINSTDONE: 0x%08x\n", 1273 instdone->instdone); 1274 1275 if (INTEL_GEN(dev_priv) <= 3) 1276 return; 1277 1278 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n", 1279 instdone->slice_common); 1280 1281 if (INTEL_GEN(dev_priv) <= 6) 1282 return; 1283 1284 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 1285 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n", 1286 slice, subslice, instdone->sampler[slice][subslice]); 1287 1288 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 1289 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n", 1290 slice, subslice, instdone->row[slice][subslice]); 1291 } 1292 1293 static int i915_hangcheck_info(struct seq_file *m, void *unused) 1294 { 1295 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1296 struct intel_engine_cs *engine; 1297 u64 acthd[I915_NUM_ENGINES]; 1298 u32 seqno[I915_NUM_ENGINES]; 1299 struct intel_instdone instdone; 1300 enum intel_engine_id id; 1301 1302 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) 1303 seq_puts(m, "Wedged\n"); 1304 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) 1305 seq_puts(m, "Reset in progress: struct_mutex backoff\n"); 1306 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags)) 1307 seq_puts(m, "Reset in progress: reset handoff to waiter\n"); 1308 if (waitqueue_active(&dev_priv->gpu_error.wait_queue)) 1309 seq_puts(m, "Waiter holding struct mutex\n"); 1310 if (waitqueue_active(&dev_priv->gpu_error.reset_queue)) 1311 seq_puts(m, "struct_mutex blocked for reset\n"); 1312 1313 if (!i915_modparams.enable_hangcheck) { 1314 seq_puts(m, "Hangcheck disabled\n"); 1315 return 0; 1316 } 1317 1318 intel_runtime_pm_get(dev_priv); 1319 1320 for_each_engine(engine, dev_priv, id) { 1321 acthd[id] = intel_engine_get_active_head(engine); 1322 seqno[id] = intel_engine_get_seqno(engine); 1323 } 1324 1325 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); 1326 1327 intel_runtime_pm_put(dev_priv); 1328 1329 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer)) 1330 seq_printf(m, "Hangcheck active, timer fires in %dms\n", 1331 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - 1332 jiffies)); 1333 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) 1334 seq_puts(m, "Hangcheck active, work pending\n"); 1335 else 1336 seq_puts(m, "Hangcheck inactive\n"); 1337 1338 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake)); 1339 1340 for_each_engine(engine, dev_priv, id) { 1341 struct intel_breadcrumbs *b = &engine->breadcrumbs; 1342 struct rb_node *rb; 1343 1344 seq_printf(m, "%s:\n", engine->name); 1345 seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n", 1346 engine->hangcheck.seqno, seqno[id], 1347 intel_engine_last_submit(engine), 1348 engine->timeline->inflight_seqnos); 1349 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n", 1350 yesno(intel_engine_has_waiter(engine)), 1351 yesno(test_bit(engine->id, 1352 &dev_priv->gpu_error.missed_irq_rings)), 1353 yesno(engine->hangcheck.stalled)); 1354 1355 spin_lock_irq(&b->rb_lock); 1356 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { 1357 struct intel_wait *w = rb_entry(rb, typeof(*w), node); 1358 1359 seq_printf(m, "\t%s [%d] waiting for %x\n", 1360 w->tsk->comm, w->tsk->pid, w->seqno); 1361 } 1362 spin_unlock_irq(&b->rb_lock); 1363 1364 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", 1365 (long long)engine->hangcheck.acthd, 1366 (long long)acthd[id]); 1367 seq_printf(m, "\taction = %s(%d) %d ms ago\n", 1368 hangcheck_action_to_str(engine->hangcheck.action), 1369 engine->hangcheck.action, 1370 jiffies_to_msecs(jiffies - 1371 engine->hangcheck.action_timestamp)); 1372 1373 if (engine->id == RCS) { 1374 seq_puts(m, "\tinstdone read =\n"); 1375 1376 i915_instdone_info(dev_priv, m, &instdone); 1377 1378 seq_puts(m, "\tinstdone accu =\n"); 1379 1380 i915_instdone_info(dev_priv, m, 1381 &engine->hangcheck.instdone); 1382 } 1383 } 1384 1385 return 0; 1386 } 1387 1388 static int i915_reset_info(struct seq_file *m, void *unused) 1389 { 1390 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1391 struct i915_gpu_error *error = &dev_priv->gpu_error; 1392 struct intel_engine_cs *engine; 1393 enum intel_engine_id id; 1394 1395 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error)); 1396 1397 for_each_engine(engine, dev_priv, id) { 1398 seq_printf(m, "%s = %u\n", engine->name, 1399 i915_reset_engine_count(error, engine)); 1400 } 1401 1402 return 0; 1403 } 1404 1405 static int ironlake_drpc_info(struct seq_file *m) 1406 { 1407 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1408 u32 rgvmodectl, rstdbyctl; 1409 u16 crstandvid; 1410 1411 rgvmodectl = I915_READ(MEMMODECTL); 1412 rstdbyctl = I915_READ(RSTDBYCTL); 1413 crstandvid = I915_READ16(CRSTANDVID); 1414 1415 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); 1416 seq_printf(m, "Boost freq: %d\n", 1417 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1418 MEMMODE_BOOST_FREQ_SHIFT); 1419 seq_printf(m, "HW control enabled: %s\n", 1420 yesno(rgvmodectl & MEMMODE_HWIDLE_EN)); 1421 seq_printf(m, "SW control enabled: %s\n", 1422 yesno(rgvmodectl & MEMMODE_SWMODE_EN)); 1423 seq_printf(m, "Gated voltage change: %s\n", 1424 yesno(rgvmodectl & MEMMODE_RCLK_GATE)); 1425 seq_printf(m, "Starting frequency: P%d\n", 1426 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1427 seq_printf(m, "Max P-state: P%d\n", 1428 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1429 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1430 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1431 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1432 seq_printf(m, "Render standby enabled: %s\n", 1433 yesno(!(rstdbyctl & RCX_SW_EXIT))); 1434 seq_puts(m, "Current RS state: "); 1435 switch (rstdbyctl & RSX_STATUS_MASK) { 1436 case RSX_STATUS_ON: 1437 seq_puts(m, "on\n"); 1438 break; 1439 case RSX_STATUS_RC1: 1440 seq_puts(m, "RC1\n"); 1441 break; 1442 case RSX_STATUS_RC1E: 1443 seq_puts(m, "RC1E\n"); 1444 break; 1445 case RSX_STATUS_RS1: 1446 seq_puts(m, "RS1\n"); 1447 break; 1448 case RSX_STATUS_RS2: 1449 seq_puts(m, "RS2 (RC6)\n"); 1450 break; 1451 case RSX_STATUS_RS3: 1452 seq_puts(m, "RC3 (RC6+)\n"); 1453 break; 1454 default: 1455 seq_puts(m, "unknown\n"); 1456 break; 1457 } 1458 1459 return 0; 1460 } 1461 1462 static int i915_forcewake_domains(struct seq_file *m, void *data) 1463 { 1464 struct drm_i915_private *i915 = node_to_i915(m->private); 1465 struct intel_uncore_forcewake_domain *fw_domain; 1466 unsigned int tmp; 1467 1468 seq_printf(m, "user.bypass_count = %u\n", 1469 i915->uncore.user_forcewake.count); 1470 1471 for_each_fw_domain(fw_domain, i915, tmp) 1472 seq_printf(m, "%s.wake_count = %u\n", 1473 intel_uncore_forcewake_domain_to_str(fw_domain->id), 1474 READ_ONCE(fw_domain->wake_count)); 1475 1476 return 0; 1477 } 1478 1479 static void print_rc6_res(struct seq_file *m, 1480 const char *title, 1481 const i915_reg_t reg) 1482 { 1483 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1484 1485 seq_printf(m, "%s %u (%llu us)\n", 1486 title, I915_READ(reg), 1487 intel_rc6_residency_us(dev_priv, reg)); 1488 } 1489 1490 static int vlv_drpc_info(struct seq_file *m) 1491 { 1492 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1493 u32 rcctl1, pw_status; 1494 1495 pw_status = I915_READ(VLV_GTLC_PW_STATUS); 1496 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1497 1498 seq_printf(m, "RC6 Enabled: %s\n", 1499 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1500 GEN6_RC_CTL_EI_MODE(1)))); 1501 seq_printf(m, "Render Power Well: %s\n", 1502 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1503 seq_printf(m, "Media Power Well: %s\n", 1504 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1505 1506 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6); 1507 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6); 1508 1509 return i915_forcewake_domains(m, NULL); 1510 } 1511 1512 static int gen6_drpc_info(struct seq_file *m) 1513 { 1514 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1515 u32 gt_core_status, rcctl1, rc6vids = 0; 1516 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0; 1517 unsigned forcewake_count; 1518 int count = 0; 1519 1520 forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count); 1521 if (forcewake_count) { 1522 seq_puts(m, "RC information inaccurate because somebody " 1523 "holds a forcewake reference \n"); 1524 } else { 1525 /* NB: we cannot use forcewake, else we read the wrong values */ 1526 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1527 udelay(10); 1528 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1529 } 1530 1531 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS); 1532 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1533 1534 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1535 if (INTEL_GEN(dev_priv) >= 9) { 1536 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE); 1537 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS); 1538 } 1539 1540 mutex_lock(&dev_priv->pcu_lock); 1541 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1542 mutex_unlock(&dev_priv->pcu_lock); 1543 1544 seq_printf(m, "RC1e Enabled: %s\n", 1545 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1546 seq_printf(m, "RC6 Enabled: %s\n", 1547 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1548 if (INTEL_GEN(dev_priv) >= 9) { 1549 seq_printf(m, "Render Well Gating Enabled: %s\n", 1550 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE)); 1551 seq_printf(m, "Media Well Gating Enabled: %s\n", 1552 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE)); 1553 } 1554 seq_printf(m, "Deep RC6 Enabled: %s\n", 1555 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1556 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1557 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1558 seq_puts(m, "Current RC state: "); 1559 switch (gt_core_status & GEN6_RCn_MASK) { 1560 case GEN6_RC0: 1561 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1562 seq_puts(m, "Core Power Down\n"); 1563 else 1564 seq_puts(m, "on\n"); 1565 break; 1566 case GEN6_RC3: 1567 seq_puts(m, "RC3\n"); 1568 break; 1569 case GEN6_RC6: 1570 seq_puts(m, "RC6\n"); 1571 break; 1572 case GEN6_RC7: 1573 seq_puts(m, "RC7\n"); 1574 break; 1575 default: 1576 seq_puts(m, "Unknown\n"); 1577 break; 1578 } 1579 1580 seq_printf(m, "Core Power Down: %s\n", 1581 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1582 if (INTEL_GEN(dev_priv) >= 9) { 1583 seq_printf(m, "Render Power Well: %s\n", 1584 (gen9_powergate_status & 1585 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down"); 1586 seq_printf(m, "Media Power Well: %s\n", 1587 (gen9_powergate_status & 1588 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1589 } 1590 1591 /* Not exactly sure what this is */ 1592 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:", 1593 GEN6_GT_GFX_RC6_LOCKED); 1594 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6); 1595 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p); 1596 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp); 1597 1598 seq_printf(m, "RC6 voltage: %dmV\n", 1599 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1600 seq_printf(m, "RC6+ voltage: %dmV\n", 1601 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1602 seq_printf(m, "RC6++ voltage: %dmV\n", 1603 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1604 return i915_forcewake_domains(m, NULL); 1605 } 1606 1607 static int i915_drpc_info(struct seq_file *m, void *unused) 1608 { 1609 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1610 int err; 1611 1612 intel_runtime_pm_get(dev_priv); 1613 1614 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1615 err = vlv_drpc_info(m); 1616 else if (INTEL_GEN(dev_priv) >= 6) 1617 err = gen6_drpc_info(m); 1618 else 1619 err = ironlake_drpc_info(m); 1620 1621 intel_runtime_pm_put(dev_priv); 1622 1623 return err; 1624 } 1625 1626 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) 1627 { 1628 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1629 1630 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 1631 dev_priv->fb_tracking.busy_bits); 1632 1633 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 1634 dev_priv->fb_tracking.flip_bits); 1635 1636 return 0; 1637 } 1638 1639 static int i915_fbc_status(struct seq_file *m, void *unused) 1640 { 1641 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1642 1643 if (!HAS_FBC(dev_priv)) { 1644 seq_puts(m, "FBC unsupported on this chipset\n"); 1645 return 0; 1646 } 1647 1648 intel_runtime_pm_get(dev_priv); 1649 mutex_lock(&dev_priv->fbc.lock); 1650 1651 if (intel_fbc_is_active(dev_priv)) 1652 seq_puts(m, "FBC enabled\n"); 1653 else 1654 seq_printf(m, "FBC disabled: %s\n", 1655 dev_priv->fbc.no_fbc_reason); 1656 1657 if (intel_fbc_is_active(dev_priv)) { 1658 u32 mask; 1659 1660 if (INTEL_GEN(dev_priv) >= 8) 1661 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK; 1662 else if (INTEL_GEN(dev_priv) >= 7) 1663 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK; 1664 else if (INTEL_GEN(dev_priv) >= 5) 1665 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK; 1666 else if (IS_G4X(dev_priv)) 1667 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK; 1668 else 1669 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING | 1670 FBC_STAT_COMPRESSED); 1671 1672 seq_printf(m, "Compressing: %s\n", yesno(mask)); 1673 } 1674 1675 mutex_unlock(&dev_priv->fbc.lock); 1676 intel_runtime_pm_put(dev_priv); 1677 1678 return 0; 1679 } 1680 1681 static int i915_fbc_false_color_get(void *data, u64 *val) 1682 { 1683 struct drm_i915_private *dev_priv = data; 1684 1685 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 1686 return -ENODEV; 1687 1688 *val = dev_priv->fbc.false_color; 1689 1690 return 0; 1691 } 1692 1693 static int i915_fbc_false_color_set(void *data, u64 val) 1694 { 1695 struct drm_i915_private *dev_priv = data; 1696 u32 reg; 1697 1698 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 1699 return -ENODEV; 1700 1701 mutex_lock(&dev_priv->fbc.lock); 1702 1703 reg = I915_READ(ILK_DPFC_CONTROL); 1704 dev_priv->fbc.false_color = val; 1705 1706 I915_WRITE(ILK_DPFC_CONTROL, val ? 1707 (reg | FBC_CTL_FALSE_COLOR) : 1708 (reg & ~FBC_CTL_FALSE_COLOR)); 1709 1710 mutex_unlock(&dev_priv->fbc.lock); 1711 return 0; 1712 } 1713 1714 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, 1715 i915_fbc_false_color_get, i915_fbc_false_color_set, 1716 "%llu\n"); 1717 1718 static int i915_ips_status(struct seq_file *m, void *unused) 1719 { 1720 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1721 1722 if (!HAS_IPS(dev_priv)) { 1723 seq_puts(m, "not supported\n"); 1724 return 0; 1725 } 1726 1727 intel_runtime_pm_get(dev_priv); 1728 1729 seq_printf(m, "Enabled by kernel parameter: %s\n", 1730 yesno(i915_modparams.enable_ips)); 1731 1732 if (INTEL_GEN(dev_priv) >= 8) { 1733 seq_puts(m, "Currently: unknown\n"); 1734 } else { 1735 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1736 seq_puts(m, "Currently: enabled\n"); 1737 else 1738 seq_puts(m, "Currently: disabled\n"); 1739 } 1740 1741 intel_runtime_pm_put(dev_priv); 1742 1743 return 0; 1744 } 1745 1746 static int i915_sr_status(struct seq_file *m, void *unused) 1747 { 1748 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1749 bool sr_enabled = false; 1750 1751 intel_runtime_pm_get(dev_priv); 1752 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 1753 1754 if (INTEL_GEN(dev_priv) >= 9) 1755 /* no global SR status; inspect per-plane WM */; 1756 else if (HAS_PCH_SPLIT(dev_priv)) 1757 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1758 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || 1759 IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 1760 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1761 else if (IS_I915GM(dev_priv)) 1762 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1763 else if (IS_PINEVIEW(dev_priv)) 1764 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1765 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1766 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 1767 1768 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 1769 intel_runtime_pm_put(dev_priv); 1770 1771 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); 1772 1773 return 0; 1774 } 1775 1776 static int i915_emon_status(struct seq_file *m, void *unused) 1777 { 1778 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1779 struct drm_device *dev = &dev_priv->drm; 1780 unsigned long temp, chipset, gfx; 1781 int ret; 1782 1783 if (!IS_GEN5(dev_priv)) 1784 return -ENODEV; 1785 1786 ret = mutex_lock_interruptible(&dev->struct_mutex); 1787 if (ret) 1788 return ret; 1789 1790 temp = i915_mch_val(dev_priv); 1791 chipset = i915_chipset_val(dev_priv); 1792 gfx = i915_gfx_val(dev_priv); 1793 mutex_unlock(&dev->struct_mutex); 1794 1795 seq_printf(m, "GMCH temp: %ld\n", temp); 1796 seq_printf(m, "Chipset power: %ld\n", chipset); 1797 seq_printf(m, "GFX power: %ld\n", gfx); 1798 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1799 1800 return 0; 1801 } 1802 1803 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1804 { 1805 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1806 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1807 int ret = 0; 1808 int gpu_freq, ia_freq; 1809 unsigned int max_gpu_freq, min_gpu_freq; 1810 1811 if (!HAS_LLC(dev_priv)) { 1812 seq_puts(m, "unsupported on this chipset\n"); 1813 return 0; 1814 } 1815 1816 intel_runtime_pm_get(dev_priv); 1817 1818 ret = mutex_lock_interruptible(&dev_priv->pcu_lock); 1819 if (ret) 1820 goto out; 1821 1822 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { 1823 /* Convert GT frequency to 50 HZ units */ 1824 min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER; 1825 max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER; 1826 } else { 1827 min_gpu_freq = rps->min_freq_softlimit; 1828 max_gpu_freq = rps->max_freq_softlimit; 1829 } 1830 1831 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1832 1833 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { 1834 ia_freq = gpu_freq; 1835 sandybridge_pcode_read(dev_priv, 1836 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1837 &ia_freq); 1838 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1839 intel_gpu_freq(dev_priv, (gpu_freq * 1840 (IS_GEN9_BC(dev_priv) || 1841 IS_CANNONLAKE(dev_priv) ? 1842 GEN9_FREQ_SCALER : 1))), 1843 ((ia_freq >> 0) & 0xff) * 100, 1844 ((ia_freq >> 8) & 0xff) * 100); 1845 } 1846 1847 mutex_unlock(&dev_priv->pcu_lock); 1848 1849 out: 1850 intel_runtime_pm_put(dev_priv); 1851 return ret; 1852 } 1853 1854 static int i915_opregion(struct seq_file *m, void *unused) 1855 { 1856 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1857 struct drm_device *dev = &dev_priv->drm; 1858 struct intel_opregion *opregion = &dev_priv->opregion; 1859 int ret; 1860 1861 ret = mutex_lock_interruptible(&dev->struct_mutex); 1862 if (ret) 1863 goto out; 1864 1865 if (opregion->header) 1866 seq_write(m, opregion->header, OPREGION_SIZE); 1867 1868 mutex_unlock(&dev->struct_mutex); 1869 1870 out: 1871 return 0; 1872 } 1873 1874 static int i915_vbt(struct seq_file *m, void *unused) 1875 { 1876 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; 1877 1878 if (opregion->vbt) 1879 seq_write(m, opregion->vbt, opregion->vbt_size); 1880 1881 return 0; 1882 } 1883 1884 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1885 { 1886 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1887 struct drm_device *dev = &dev_priv->drm; 1888 struct intel_framebuffer *fbdev_fb = NULL; 1889 struct drm_framebuffer *drm_fb; 1890 int ret; 1891 1892 ret = mutex_lock_interruptible(&dev->struct_mutex); 1893 if (ret) 1894 return ret; 1895 1896 #ifdef CONFIG_DRM_FBDEV_EMULATION 1897 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) { 1898 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb); 1899 1900 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1901 fbdev_fb->base.width, 1902 fbdev_fb->base.height, 1903 fbdev_fb->base.format->depth, 1904 fbdev_fb->base.format->cpp[0] * 8, 1905 fbdev_fb->base.modifier, 1906 drm_framebuffer_read_refcount(&fbdev_fb->base)); 1907 describe_obj(m, fbdev_fb->obj); 1908 seq_putc(m, '\n'); 1909 } 1910 #endif 1911 1912 mutex_lock(&dev->mode_config.fb_lock); 1913 drm_for_each_fb(drm_fb, dev) { 1914 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 1915 if (fb == fbdev_fb) 1916 continue; 1917 1918 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1919 fb->base.width, 1920 fb->base.height, 1921 fb->base.format->depth, 1922 fb->base.format->cpp[0] * 8, 1923 fb->base.modifier, 1924 drm_framebuffer_read_refcount(&fb->base)); 1925 describe_obj(m, fb->obj); 1926 seq_putc(m, '\n'); 1927 } 1928 mutex_unlock(&dev->mode_config.fb_lock); 1929 mutex_unlock(&dev->struct_mutex); 1930 1931 return 0; 1932 } 1933 1934 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) 1935 { 1936 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)", 1937 ring->space, ring->head, ring->tail); 1938 } 1939 1940 static int i915_context_status(struct seq_file *m, void *unused) 1941 { 1942 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1943 struct drm_device *dev = &dev_priv->drm; 1944 struct intel_engine_cs *engine; 1945 struct i915_gem_context *ctx; 1946 enum intel_engine_id id; 1947 int ret; 1948 1949 ret = mutex_lock_interruptible(&dev->struct_mutex); 1950 if (ret) 1951 return ret; 1952 1953 list_for_each_entry(ctx, &dev_priv->contexts.list, link) { 1954 seq_printf(m, "HW context %u ", ctx->hw_id); 1955 if (ctx->pid) { 1956 struct task_struct *task; 1957 1958 task = get_pid_task(ctx->pid, PIDTYPE_PID); 1959 if (task) { 1960 seq_printf(m, "(%s [%d]) ", 1961 task->comm, task->pid); 1962 put_task_struct(task); 1963 } 1964 } else if (IS_ERR(ctx->file_priv)) { 1965 seq_puts(m, "(deleted) "); 1966 } else { 1967 seq_puts(m, "(kernel) "); 1968 } 1969 1970 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 1971 seq_putc(m, '\n'); 1972 1973 for_each_engine(engine, dev_priv, id) { 1974 struct intel_context *ce = &ctx->engine[engine->id]; 1975 1976 seq_printf(m, "%s: ", engine->name); 1977 if (ce->state) 1978 describe_obj(m, ce->state->obj); 1979 if (ce->ring) 1980 describe_ctx_ring(m, ce->ring); 1981 seq_putc(m, '\n'); 1982 } 1983 1984 seq_putc(m, '\n'); 1985 } 1986 1987 mutex_unlock(&dev->struct_mutex); 1988 1989 return 0; 1990 } 1991 1992 static void i915_dump_lrc_obj(struct seq_file *m, 1993 struct i915_gem_context *ctx, 1994 struct intel_engine_cs *engine) 1995 { 1996 struct i915_vma *vma = ctx->engine[engine->id].state; 1997 struct page *page; 1998 int j; 1999 2000 seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id); 2001 2002 if (!vma) { 2003 seq_puts(m, "\tFake context\n"); 2004 return; 2005 } 2006 2007 if (vma->flags & I915_VMA_GLOBAL_BIND) 2008 seq_printf(m, "\tBound in GGTT at 0x%08x\n", 2009 i915_ggtt_offset(vma)); 2010 2011 if (i915_gem_object_pin_pages(vma->obj)) { 2012 seq_puts(m, "\tFailed to get pages for context object\n\n"); 2013 return; 2014 } 2015 2016 page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN); 2017 if (page) { 2018 u32 *reg_state = kmap_atomic(page); 2019 2020 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { 2021 seq_printf(m, 2022 "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n", 2023 j * 4, 2024 reg_state[j], reg_state[j + 1], 2025 reg_state[j + 2], reg_state[j + 3]); 2026 } 2027 kunmap_atomic(reg_state); 2028 } 2029 2030 i915_gem_object_unpin_pages(vma->obj); 2031 seq_putc(m, '\n'); 2032 } 2033 2034 static int i915_dump_lrc(struct seq_file *m, void *unused) 2035 { 2036 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2037 struct drm_device *dev = &dev_priv->drm; 2038 struct intel_engine_cs *engine; 2039 struct i915_gem_context *ctx; 2040 enum intel_engine_id id; 2041 int ret; 2042 2043 if (!i915_modparams.enable_execlists) { 2044 seq_printf(m, "Logical Ring Contexts are disabled\n"); 2045 return 0; 2046 } 2047 2048 ret = mutex_lock_interruptible(&dev->struct_mutex); 2049 if (ret) 2050 return ret; 2051 2052 list_for_each_entry(ctx, &dev_priv->contexts.list, link) 2053 for_each_engine(engine, dev_priv, id) 2054 i915_dump_lrc_obj(m, ctx, engine); 2055 2056 mutex_unlock(&dev->struct_mutex); 2057 2058 return 0; 2059 } 2060 2061 static const char *swizzle_string(unsigned swizzle) 2062 { 2063 switch (swizzle) { 2064 case I915_BIT_6_SWIZZLE_NONE: 2065 return "none"; 2066 case I915_BIT_6_SWIZZLE_9: 2067 return "bit9"; 2068 case I915_BIT_6_SWIZZLE_9_10: 2069 return "bit9/bit10"; 2070 case I915_BIT_6_SWIZZLE_9_11: 2071 return "bit9/bit11"; 2072 case I915_BIT_6_SWIZZLE_9_10_11: 2073 return "bit9/bit10/bit11"; 2074 case I915_BIT_6_SWIZZLE_9_17: 2075 return "bit9/bit17"; 2076 case I915_BIT_6_SWIZZLE_9_10_17: 2077 return "bit9/bit10/bit17"; 2078 case I915_BIT_6_SWIZZLE_UNKNOWN: 2079 return "unknown"; 2080 } 2081 2082 return "bug"; 2083 } 2084 2085 static int i915_swizzle_info(struct seq_file *m, void *data) 2086 { 2087 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2088 2089 intel_runtime_pm_get(dev_priv); 2090 2091 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 2092 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 2093 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 2094 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 2095 2096 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) { 2097 seq_printf(m, "DDC = 0x%08x\n", 2098 I915_READ(DCC)); 2099 seq_printf(m, "DDC2 = 0x%08x\n", 2100 I915_READ(DCC2)); 2101 seq_printf(m, "C0DRB3 = 0x%04x\n", 2102 I915_READ16(C0DRB3)); 2103 seq_printf(m, "C1DRB3 = 0x%04x\n", 2104 I915_READ16(C1DRB3)); 2105 } else if (INTEL_GEN(dev_priv) >= 6) { 2106 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 2107 I915_READ(MAD_DIMM_C0)); 2108 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 2109 I915_READ(MAD_DIMM_C1)); 2110 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 2111 I915_READ(MAD_DIMM_C2)); 2112 seq_printf(m, "TILECTL = 0x%08x\n", 2113 I915_READ(TILECTL)); 2114 if (INTEL_GEN(dev_priv) >= 8) 2115 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 2116 I915_READ(GAMTARBMODE)); 2117 else 2118 seq_printf(m, "ARB_MODE = 0x%08x\n", 2119 I915_READ(ARB_MODE)); 2120 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 2121 I915_READ(DISP_ARB_CTL)); 2122 } 2123 2124 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 2125 seq_puts(m, "L-shaped memory detected\n"); 2126 2127 intel_runtime_pm_put(dev_priv); 2128 2129 return 0; 2130 } 2131 2132 static int per_file_ctx(int id, void *ptr, void *data) 2133 { 2134 struct i915_gem_context *ctx = ptr; 2135 struct seq_file *m = data; 2136 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2137 2138 if (!ppgtt) { 2139 seq_printf(m, " no ppgtt for context %d\n", 2140 ctx->user_handle); 2141 return 0; 2142 } 2143 2144 if (i915_gem_context_is_default(ctx)) 2145 seq_puts(m, " default context:\n"); 2146 else 2147 seq_printf(m, " context %d:\n", ctx->user_handle); 2148 ppgtt->debug_dump(ppgtt, m); 2149 2150 return 0; 2151 } 2152 2153 static void gen8_ppgtt_info(struct seq_file *m, 2154 struct drm_i915_private *dev_priv) 2155 { 2156 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2157 struct intel_engine_cs *engine; 2158 enum intel_engine_id id; 2159 int i; 2160 2161 if (!ppgtt) 2162 return; 2163 2164 for_each_engine(engine, dev_priv, id) { 2165 seq_printf(m, "%s\n", engine->name); 2166 for (i = 0; i < 4; i++) { 2167 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i)); 2168 pdp <<= 32; 2169 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i)); 2170 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 2171 } 2172 } 2173 } 2174 2175 static void gen6_ppgtt_info(struct seq_file *m, 2176 struct drm_i915_private *dev_priv) 2177 { 2178 struct intel_engine_cs *engine; 2179 enum intel_engine_id id; 2180 2181 if (IS_GEN6(dev_priv)) 2182 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2183 2184 for_each_engine(engine, dev_priv, id) { 2185 seq_printf(m, "%s\n", engine->name); 2186 if (IS_GEN7(dev_priv)) 2187 seq_printf(m, "GFX_MODE: 0x%08x\n", 2188 I915_READ(RING_MODE_GEN7(engine))); 2189 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", 2190 I915_READ(RING_PP_DIR_BASE(engine))); 2191 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", 2192 I915_READ(RING_PP_DIR_BASE_READ(engine))); 2193 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", 2194 I915_READ(RING_PP_DIR_DCLV(engine))); 2195 } 2196 if (dev_priv->mm.aliasing_ppgtt) { 2197 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2198 2199 seq_puts(m, "aliasing PPGTT:\n"); 2200 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset); 2201 2202 ppgtt->debug_dump(ppgtt, m); 2203 } 2204 2205 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 2206 } 2207 2208 static int i915_ppgtt_info(struct seq_file *m, void *data) 2209 { 2210 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2211 struct drm_device *dev = &dev_priv->drm; 2212 struct drm_file *file; 2213 int ret; 2214 2215 mutex_lock(&dev->filelist_mutex); 2216 ret = mutex_lock_interruptible(&dev->struct_mutex); 2217 if (ret) 2218 goto out_unlock; 2219 2220 intel_runtime_pm_get(dev_priv); 2221 2222 if (INTEL_GEN(dev_priv) >= 8) 2223 gen8_ppgtt_info(m, dev_priv); 2224 else if (INTEL_GEN(dev_priv) >= 6) 2225 gen6_ppgtt_info(m, dev_priv); 2226 2227 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2228 struct drm_i915_file_private *file_priv = file->driver_priv; 2229 struct task_struct *task; 2230 2231 task = get_pid_task(file->pid, PIDTYPE_PID); 2232 if (!task) { 2233 ret = -ESRCH; 2234 goto out_rpm; 2235 } 2236 seq_printf(m, "\nproc: %s\n", task->comm); 2237 put_task_struct(task); 2238 idr_for_each(&file_priv->context_idr, per_file_ctx, 2239 (void *)(unsigned long)m); 2240 } 2241 2242 out_rpm: 2243 intel_runtime_pm_put(dev_priv); 2244 mutex_unlock(&dev->struct_mutex); 2245 out_unlock: 2246 mutex_unlock(&dev->filelist_mutex); 2247 return ret; 2248 } 2249 2250 static int count_irq_waiters(struct drm_i915_private *i915) 2251 { 2252 struct intel_engine_cs *engine; 2253 enum intel_engine_id id; 2254 int count = 0; 2255 2256 for_each_engine(engine, i915, id) 2257 count += intel_engine_has_waiter(engine); 2258 2259 return count; 2260 } 2261 2262 static const char *rps_power_to_str(unsigned int power) 2263 { 2264 static const char * const strings[] = { 2265 [LOW_POWER] = "low power", 2266 [BETWEEN] = "mixed", 2267 [HIGH_POWER] = "high power", 2268 }; 2269 2270 if (power >= ARRAY_SIZE(strings) || !strings[power]) 2271 return "unknown"; 2272 2273 return strings[power]; 2274 } 2275 2276 static int i915_rps_boost_info(struct seq_file *m, void *data) 2277 { 2278 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2279 struct drm_device *dev = &dev_priv->drm; 2280 struct intel_rps *rps = &dev_priv->gt_pm.rps; 2281 struct drm_file *file; 2282 2283 seq_printf(m, "RPS enabled? %d\n", rps->enabled); 2284 seq_printf(m, "GPU busy? %s [%d requests]\n", 2285 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); 2286 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); 2287 seq_printf(m, "Boosts outstanding? %d\n", 2288 atomic_read(&rps->num_waiters)); 2289 seq_printf(m, "Frequency requested %d\n", 2290 intel_gpu_freq(dev_priv, rps->cur_freq)); 2291 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", 2292 intel_gpu_freq(dev_priv, rps->min_freq), 2293 intel_gpu_freq(dev_priv, rps->min_freq_softlimit), 2294 intel_gpu_freq(dev_priv, rps->max_freq_softlimit), 2295 intel_gpu_freq(dev_priv, rps->max_freq)); 2296 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n", 2297 intel_gpu_freq(dev_priv, rps->idle_freq), 2298 intel_gpu_freq(dev_priv, rps->efficient_freq), 2299 intel_gpu_freq(dev_priv, rps->boost_freq)); 2300 2301 mutex_lock(&dev->filelist_mutex); 2302 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2303 struct drm_i915_file_private *file_priv = file->driver_priv; 2304 struct task_struct *task; 2305 2306 rcu_read_lock(); 2307 task = pid_task(file->pid, PIDTYPE_PID); 2308 seq_printf(m, "%s [%d]: %d boosts\n", 2309 task ? task->comm : "<unknown>", 2310 task ? task->pid : -1, 2311 atomic_read(&file_priv->rps_client.boosts)); 2312 rcu_read_unlock(); 2313 } 2314 seq_printf(m, "Kernel (anonymous) boosts: %d\n", 2315 atomic_read(&rps->boosts)); 2316 mutex_unlock(&dev->filelist_mutex); 2317 2318 if (INTEL_GEN(dev_priv) >= 6 && 2319 rps->enabled && 2320 dev_priv->gt.active_requests) { 2321 u32 rpup, rpupei; 2322 u32 rpdown, rpdownei; 2323 2324 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 2325 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK; 2326 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK; 2327 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK; 2328 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK; 2329 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 2330 2331 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", 2332 rps_power_to_str(rps->power)); 2333 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n", 2334 rpup && rpupei ? 100 * rpup / rpupei : 0, 2335 rps->up_threshold); 2336 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n", 2337 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0, 2338 rps->down_threshold); 2339 } else { 2340 seq_puts(m, "\nRPS Autotuning inactive\n"); 2341 } 2342 2343 return 0; 2344 } 2345 2346 static int i915_llc(struct seq_file *m, void *data) 2347 { 2348 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2349 const bool edram = INTEL_GEN(dev_priv) > 8; 2350 2351 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv))); 2352 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC", 2353 intel_uncore_edram_size(dev_priv)/1024/1024); 2354 2355 return 0; 2356 } 2357 2358 static int i915_huc_load_status_info(struct seq_file *m, void *data) 2359 { 2360 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2361 struct drm_printer p; 2362 2363 if (!HAS_HUC_UCODE(dev_priv)) 2364 return 0; 2365 2366 p = drm_seq_file_printer(m); 2367 intel_uc_fw_dump(&dev_priv->huc.fw, &p); 2368 2369 intel_runtime_pm_get(dev_priv); 2370 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); 2371 intel_runtime_pm_put(dev_priv); 2372 2373 return 0; 2374 } 2375 2376 static int i915_guc_load_status_info(struct seq_file *m, void *data) 2377 { 2378 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2379 struct drm_printer p; 2380 u32 tmp, i; 2381 2382 if (!HAS_GUC_UCODE(dev_priv)) 2383 return 0; 2384 2385 p = drm_seq_file_printer(m); 2386 intel_uc_fw_dump(&dev_priv->guc.fw, &p); 2387 2388 intel_runtime_pm_get(dev_priv); 2389 2390 tmp = I915_READ(GUC_STATUS); 2391 2392 seq_printf(m, "\nGuC status 0x%08x:\n", tmp); 2393 seq_printf(m, "\tBootrom status = 0x%x\n", 2394 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); 2395 seq_printf(m, "\tuKernel status = 0x%x\n", 2396 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); 2397 seq_printf(m, "\tMIA Core status = 0x%x\n", 2398 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); 2399 seq_puts(m, "\nScratch registers:\n"); 2400 for (i = 0; i < 16; i++) 2401 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i))); 2402 2403 intel_runtime_pm_put(dev_priv); 2404 2405 return 0; 2406 } 2407 2408 static void i915_guc_log_info(struct seq_file *m, 2409 struct drm_i915_private *dev_priv) 2410 { 2411 struct intel_guc *guc = &dev_priv->guc; 2412 2413 seq_puts(m, "\nGuC logging stats:\n"); 2414 2415 seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n", 2416 guc->log.flush_count[GUC_ISR_LOG_BUFFER], 2417 guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]); 2418 2419 seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n", 2420 guc->log.flush_count[GUC_DPC_LOG_BUFFER], 2421 guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]); 2422 2423 seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n", 2424 guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER], 2425 guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]); 2426 2427 seq_printf(m, "\tTotal flush interrupt count: %u\n", 2428 guc->log.flush_interrupt_count); 2429 2430 seq_printf(m, "\tCapture miss count: %u\n", 2431 guc->log.capture_miss_count); 2432 } 2433 2434 static void i915_guc_client_info(struct seq_file *m, 2435 struct drm_i915_private *dev_priv, 2436 struct intel_guc_client *client) 2437 { 2438 struct intel_engine_cs *engine; 2439 enum intel_engine_id id; 2440 uint64_t tot = 0; 2441 2442 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n", 2443 client->priority, client->stage_id, client->proc_desc_offset); 2444 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n", 2445 client->doorbell_id, client->doorbell_offset); 2446 2447 for_each_engine(engine, dev_priv, id) { 2448 u64 submissions = client->submissions[id]; 2449 tot += submissions; 2450 seq_printf(m, "\tSubmissions: %llu %s\n", 2451 submissions, engine->name); 2452 } 2453 seq_printf(m, "\tTotal: %llu\n", tot); 2454 } 2455 2456 static bool check_guc_submission(struct seq_file *m) 2457 { 2458 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2459 const struct intel_guc *guc = &dev_priv->guc; 2460 2461 if (!guc->execbuf_client) { 2462 seq_printf(m, "GuC submission %s\n", 2463 HAS_GUC_SCHED(dev_priv) ? 2464 "disabled" : 2465 "not supported"); 2466 return false; 2467 } 2468 2469 return true; 2470 } 2471 2472 static int i915_guc_info(struct seq_file *m, void *data) 2473 { 2474 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2475 const struct intel_guc *guc = &dev_priv->guc; 2476 2477 if (!check_guc_submission(m)) 2478 return 0; 2479 2480 seq_printf(m, "Doorbell map:\n"); 2481 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap); 2482 seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline); 2483 2484 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client); 2485 i915_guc_client_info(m, dev_priv, guc->execbuf_client); 2486 seq_printf(m, "\nGuC preempt client @ %p:\n", guc->preempt_client); 2487 i915_guc_client_info(m, dev_priv, guc->preempt_client); 2488 2489 i915_guc_log_info(m, dev_priv); 2490 2491 /* Add more as required ... */ 2492 2493 return 0; 2494 } 2495 2496 static int i915_guc_stage_pool(struct seq_file *m, void *data) 2497 { 2498 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2499 const struct intel_guc *guc = &dev_priv->guc; 2500 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr; 2501 struct intel_guc_client *client = guc->execbuf_client; 2502 unsigned int tmp; 2503 int index; 2504 2505 if (!check_guc_submission(m)) 2506 return 0; 2507 2508 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) { 2509 struct intel_engine_cs *engine; 2510 2511 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE)) 2512 continue; 2513 2514 seq_printf(m, "GuC stage descriptor %u:\n", index); 2515 seq_printf(m, "\tIndex: %u\n", desc->stage_id); 2516 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute); 2517 seq_printf(m, "\tPriority: %d\n", desc->priority); 2518 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id); 2519 seq_printf(m, "\tEngines used: 0x%x\n", 2520 desc->engines_used); 2521 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n", 2522 desc->db_trigger_phy, 2523 desc->db_trigger_cpu, 2524 desc->db_trigger_uk); 2525 seq_printf(m, "\tProcess descriptor: 0x%x\n", 2526 desc->process_desc); 2527 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n", 2528 desc->wq_addr, desc->wq_size); 2529 seq_putc(m, '\n'); 2530 2531 for_each_engine_masked(engine, dev_priv, client->engines, tmp) { 2532 u32 guc_engine_id = engine->guc_id; 2533 struct guc_execlist_context *lrc = 2534 &desc->lrc[guc_engine_id]; 2535 2536 seq_printf(m, "\t%s LRC:\n", engine->name); 2537 seq_printf(m, "\t\tContext desc: 0x%x\n", 2538 lrc->context_desc); 2539 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id); 2540 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca); 2541 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin); 2542 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end); 2543 seq_putc(m, '\n'); 2544 } 2545 } 2546 2547 return 0; 2548 } 2549 2550 static int i915_guc_log_dump(struct seq_file *m, void *data) 2551 { 2552 struct drm_info_node *node = m->private; 2553 struct drm_i915_private *dev_priv = node_to_i915(node); 2554 bool dump_load_err = !!node->info_ent->data; 2555 struct drm_i915_gem_object *obj = NULL; 2556 u32 *log; 2557 int i = 0; 2558 2559 if (dump_load_err) 2560 obj = dev_priv->guc.load_err_log; 2561 else if (dev_priv->guc.log.vma) 2562 obj = dev_priv->guc.log.vma->obj; 2563 2564 if (!obj) 2565 return 0; 2566 2567 log = i915_gem_object_pin_map(obj, I915_MAP_WC); 2568 if (IS_ERR(log)) { 2569 DRM_DEBUG("Failed to pin object\n"); 2570 seq_puts(m, "(log data unaccessible)\n"); 2571 return PTR_ERR(log); 2572 } 2573 2574 for (i = 0; i < obj->base.size / sizeof(u32); i += 4) 2575 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n", 2576 *(log + i), *(log + i + 1), 2577 *(log + i + 2), *(log + i + 3)); 2578 2579 seq_putc(m, '\n'); 2580 2581 i915_gem_object_unpin_map(obj); 2582 2583 return 0; 2584 } 2585 2586 static int i915_guc_log_control_get(void *data, u64 *val) 2587 { 2588 struct drm_i915_private *dev_priv = data; 2589 2590 if (!dev_priv->guc.log.vma) 2591 return -EINVAL; 2592 2593 *val = i915_modparams.guc_log_level; 2594 2595 return 0; 2596 } 2597 2598 static int i915_guc_log_control_set(void *data, u64 val) 2599 { 2600 struct drm_i915_private *dev_priv = data; 2601 int ret; 2602 2603 if (!dev_priv->guc.log.vma) 2604 return -EINVAL; 2605 2606 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); 2607 if (ret) 2608 return ret; 2609 2610 intel_runtime_pm_get(dev_priv); 2611 ret = i915_guc_log_control(dev_priv, val); 2612 intel_runtime_pm_put(dev_priv); 2613 2614 mutex_unlock(&dev_priv->drm.struct_mutex); 2615 return ret; 2616 } 2617 2618 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops, 2619 i915_guc_log_control_get, i915_guc_log_control_set, 2620 "%lld\n"); 2621 2622 static const char *psr2_live_status(u32 val) 2623 { 2624 static const char * const live_status[] = { 2625 "IDLE", 2626 "CAPTURE", 2627 "CAPTURE_FS", 2628 "SLEEP", 2629 "BUFON_FW", 2630 "ML_UP", 2631 "SU_STANDBY", 2632 "FAST_SLEEP", 2633 "DEEP_SLEEP", 2634 "BUF_ON", 2635 "TG_ON" 2636 }; 2637 2638 val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT; 2639 if (val < ARRAY_SIZE(live_status)) 2640 return live_status[val]; 2641 2642 return "unknown"; 2643 } 2644 2645 static int i915_edp_psr_status(struct seq_file *m, void *data) 2646 { 2647 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2648 u32 psrperf = 0; 2649 u32 stat[3]; 2650 enum pipe pipe; 2651 bool enabled = false; 2652 2653 if (!HAS_PSR(dev_priv)) { 2654 seq_puts(m, "PSR not supported\n"); 2655 return 0; 2656 } 2657 2658 intel_runtime_pm_get(dev_priv); 2659 2660 mutex_lock(&dev_priv->psr.lock); 2661 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 2662 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 2663 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); 2664 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); 2665 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", 2666 dev_priv->psr.busy_frontbuffer_bits); 2667 seq_printf(m, "Re-enable work scheduled: %s\n", 2668 yesno(work_busy(&dev_priv->psr.work.work))); 2669 2670 if (HAS_DDI(dev_priv)) { 2671 if (dev_priv->psr.psr2_support) 2672 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; 2673 else 2674 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 2675 } else { 2676 for_each_pipe(dev_priv, pipe) { 2677 enum transcoder cpu_transcoder = 2678 intel_pipe_to_cpu_transcoder(dev_priv, pipe); 2679 enum intel_display_power_domain power_domain; 2680 2681 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 2682 if (!intel_display_power_get_if_enabled(dev_priv, 2683 power_domain)) 2684 continue; 2685 2686 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & 2687 VLV_EDP_PSR_CURR_STATE_MASK; 2688 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2689 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2690 enabled = true; 2691 2692 intel_display_power_put(dev_priv, power_domain); 2693 } 2694 } 2695 2696 seq_printf(m, "Main link in standby mode: %s\n", 2697 yesno(dev_priv->psr.link_standby)); 2698 2699 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); 2700 2701 if (!HAS_DDI(dev_priv)) 2702 for_each_pipe(dev_priv, pipe) { 2703 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2704 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2705 seq_printf(m, " pipe %c", pipe_name(pipe)); 2706 } 2707 seq_puts(m, "\n"); 2708 2709 /* 2710 * VLV/CHV PSR has no kind of performance counter 2711 * SKL+ Perf counter is reset to 0 everytime DC state is entered 2712 */ 2713 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2714 psrperf = I915_READ(EDP_PSR_PERF_CNT) & 2715 EDP_PSR_PERF_CNT_MASK; 2716 2717 seq_printf(m, "Performance_Counter: %u\n", psrperf); 2718 } 2719 if (dev_priv->psr.psr2_support) { 2720 u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL); 2721 2722 seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n", 2723 psr2, psr2_live_status(psr2)); 2724 } 2725 mutex_unlock(&dev_priv->psr.lock); 2726 2727 intel_runtime_pm_put(dev_priv); 2728 return 0; 2729 } 2730 2731 static int i915_sink_crc(struct seq_file *m, void *data) 2732 { 2733 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2734 struct drm_device *dev = &dev_priv->drm; 2735 struct intel_connector *connector; 2736 struct drm_connector_list_iter conn_iter; 2737 struct intel_dp *intel_dp = NULL; 2738 struct drm_modeset_acquire_ctx ctx; 2739 int ret; 2740 u8 crc[6]; 2741 2742 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 2743 2744 drm_connector_list_iter_begin(dev, &conn_iter); 2745 2746 for_each_intel_connector_iter(connector, &conn_iter) { 2747 struct drm_crtc *crtc; 2748 struct drm_connector_state *state; 2749 struct intel_crtc_state *crtc_state; 2750 2751 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) 2752 continue; 2753 2754 retry: 2755 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); 2756 if (ret) 2757 goto err; 2758 2759 state = connector->base.state; 2760 if (!state->best_encoder) 2761 continue; 2762 2763 crtc = state->crtc; 2764 ret = drm_modeset_lock(&crtc->mutex, &ctx); 2765 if (ret) 2766 goto err; 2767 2768 crtc_state = to_intel_crtc_state(crtc->state); 2769 if (!crtc_state->base.active) 2770 continue; 2771 2772 /* 2773 * We need to wait for all crtc updates to complete, to make 2774 * sure any pending modesets and plane updates are completed. 2775 */ 2776 if (crtc_state->base.commit) { 2777 ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done); 2778 2779 if (ret) 2780 goto err; 2781 } 2782 2783 intel_dp = enc_to_intel_dp(state->best_encoder); 2784 2785 ret = intel_dp_sink_crc(intel_dp, crtc_state, crc); 2786 if (ret) 2787 goto err; 2788 2789 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 2790 crc[0], crc[1], crc[2], 2791 crc[3], crc[4], crc[5]); 2792 goto out; 2793 2794 err: 2795 if (ret == -EDEADLK) { 2796 ret = drm_modeset_backoff(&ctx); 2797 if (!ret) 2798 goto retry; 2799 } 2800 goto out; 2801 } 2802 ret = -ENODEV; 2803 out: 2804 drm_connector_list_iter_end(&conn_iter); 2805 drm_modeset_drop_locks(&ctx); 2806 drm_modeset_acquire_fini(&ctx); 2807 2808 return ret; 2809 } 2810 2811 static int i915_energy_uJ(struct seq_file *m, void *data) 2812 { 2813 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2814 unsigned long long power; 2815 u32 units; 2816 2817 if (INTEL_GEN(dev_priv) < 6) 2818 return -ENODEV; 2819 2820 intel_runtime_pm_get(dev_priv); 2821 2822 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) { 2823 intel_runtime_pm_put(dev_priv); 2824 return -ENODEV; 2825 } 2826 2827 units = (power & 0x1f00) >> 8; 2828 power = I915_READ(MCH_SECP_NRG_STTS); 2829 power = (1000000 * power) >> units; /* convert to uJ */ 2830 2831 intel_runtime_pm_put(dev_priv); 2832 2833 seq_printf(m, "%llu", power); 2834 2835 return 0; 2836 } 2837 2838 static int i915_runtime_pm_status(struct seq_file *m, void *unused) 2839 { 2840 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2841 struct pci_dev *pdev = dev_priv->drm.pdev; 2842 2843 if (!HAS_RUNTIME_PM(dev_priv)) 2844 seq_puts(m, "Runtime power management not supported\n"); 2845 2846 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake)); 2847 seq_printf(m, "IRQs disabled: %s\n", 2848 yesno(!intel_irqs_enabled(dev_priv))); 2849 #ifdef CONFIG_PM 2850 seq_printf(m, "Usage count: %d\n", 2851 atomic_read(&dev_priv->drm.dev->power.usage_count)); 2852 #else 2853 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 2854 #endif 2855 seq_printf(m, "PCI device power state: %s [%d]\n", 2856 pci_power_name(pdev->current_state), 2857 pdev->current_state); 2858 2859 return 0; 2860 } 2861 2862 static int i915_power_domain_info(struct seq_file *m, void *unused) 2863 { 2864 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2865 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2866 int i; 2867 2868 mutex_lock(&power_domains->lock); 2869 2870 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2871 for (i = 0; i < power_domains->power_well_count; i++) { 2872 struct i915_power_well *power_well; 2873 enum intel_display_power_domain power_domain; 2874 2875 power_well = &power_domains->power_wells[i]; 2876 seq_printf(m, "%-25s %d\n", power_well->name, 2877 power_well->count); 2878 2879 for_each_power_domain(power_domain, power_well->domains) 2880 seq_printf(m, " %-23s %d\n", 2881 intel_display_power_domain_str(power_domain), 2882 power_domains->domain_use_count[power_domain]); 2883 } 2884 2885 mutex_unlock(&power_domains->lock); 2886 2887 return 0; 2888 } 2889 2890 static int i915_dmc_info(struct seq_file *m, void *unused) 2891 { 2892 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2893 struct intel_csr *csr; 2894 2895 if (!HAS_CSR(dev_priv)) { 2896 seq_puts(m, "not supported\n"); 2897 return 0; 2898 } 2899 2900 csr = &dev_priv->csr; 2901 2902 intel_runtime_pm_get(dev_priv); 2903 2904 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); 2905 seq_printf(m, "path: %s\n", csr->fw_path); 2906 2907 if (!csr->dmc_payload) 2908 goto out; 2909 2910 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), 2911 CSR_VERSION_MINOR(csr->version)); 2912 2913 if (IS_KABYLAKE(dev_priv) || 2914 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) { 2915 seq_printf(m, "DC3 -> DC5 count: %d\n", 2916 I915_READ(SKL_CSR_DC3_DC5_COUNT)); 2917 seq_printf(m, "DC5 -> DC6 count: %d\n", 2918 I915_READ(SKL_CSR_DC5_DC6_COUNT)); 2919 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) { 2920 seq_printf(m, "DC3 -> DC5 count: %d\n", 2921 I915_READ(BXT_CSR_DC3_DC5_COUNT)); 2922 } 2923 2924 out: 2925 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); 2926 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); 2927 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); 2928 2929 intel_runtime_pm_put(dev_priv); 2930 2931 return 0; 2932 } 2933 2934 static void intel_seq_print_mode(struct seq_file *m, int tabs, 2935 struct drm_display_mode *mode) 2936 { 2937 int i; 2938 2939 for (i = 0; i < tabs; i++) 2940 seq_putc(m, '\t'); 2941 2942 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2943 mode->base.id, mode->name, 2944 mode->vrefresh, mode->clock, 2945 mode->hdisplay, mode->hsync_start, 2946 mode->hsync_end, mode->htotal, 2947 mode->vdisplay, mode->vsync_start, 2948 mode->vsync_end, mode->vtotal, 2949 mode->type, mode->flags); 2950 } 2951 2952 static void intel_encoder_info(struct seq_file *m, 2953 struct intel_crtc *intel_crtc, 2954 struct intel_encoder *intel_encoder) 2955 { 2956 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2957 struct drm_device *dev = &dev_priv->drm; 2958 struct drm_crtc *crtc = &intel_crtc->base; 2959 struct intel_connector *intel_connector; 2960 struct drm_encoder *encoder; 2961 2962 encoder = &intel_encoder->base; 2963 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2964 encoder->base.id, encoder->name); 2965 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2966 struct drm_connector *connector = &intel_connector->base; 2967 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2968 connector->base.id, 2969 connector->name, 2970 drm_get_connector_status_name(connector->status)); 2971 if (connector->status == connector_status_connected) { 2972 struct drm_display_mode *mode = &crtc->mode; 2973 seq_printf(m, ", mode:\n"); 2974 intel_seq_print_mode(m, 2, mode); 2975 } else { 2976 seq_putc(m, '\n'); 2977 } 2978 } 2979 } 2980 2981 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2982 { 2983 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2984 struct drm_device *dev = &dev_priv->drm; 2985 struct drm_crtc *crtc = &intel_crtc->base; 2986 struct intel_encoder *intel_encoder; 2987 struct drm_plane_state *plane_state = crtc->primary->state; 2988 struct drm_framebuffer *fb = plane_state->fb; 2989 2990 if (fb) 2991 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2992 fb->base.id, plane_state->src_x >> 16, 2993 plane_state->src_y >> 16, fb->width, fb->height); 2994 else 2995 seq_puts(m, "\tprimary plane disabled\n"); 2996 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2997 intel_encoder_info(m, intel_crtc, intel_encoder); 2998 } 2999 3000 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 3001 { 3002 struct drm_display_mode *mode = panel->fixed_mode; 3003 3004 seq_printf(m, "\tfixed mode:\n"); 3005 intel_seq_print_mode(m, 2, mode); 3006 } 3007 3008 static void intel_dp_info(struct seq_file *m, 3009 struct intel_connector *intel_connector) 3010 { 3011 struct intel_encoder *intel_encoder = intel_connector->encoder; 3012 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 3013 3014 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 3015 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); 3016 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) 3017 intel_panel_info(m, &intel_connector->panel); 3018 3019 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, 3020 &intel_dp->aux); 3021 } 3022 3023 static void intel_dp_mst_info(struct seq_file *m, 3024 struct intel_connector *intel_connector) 3025 { 3026 struct intel_encoder *intel_encoder = intel_connector->encoder; 3027 struct intel_dp_mst_encoder *intel_mst = 3028 enc_to_mst(&intel_encoder->base); 3029 struct intel_digital_port *intel_dig_port = intel_mst->primary; 3030 struct intel_dp *intel_dp = &intel_dig_port->dp; 3031 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, 3032 intel_connector->port); 3033 3034 seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); 3035 } 3036 3037 static void intel_hdmi_info(struct seq_file *m, 3038 struct intel_connector *intel_connector) 3039 { 3040 struct intel_encoder *intel_encoder = intel_connector->encoder; 3041 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 3042 3043 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); 3044 } 3045 3046 static void intel_lvds_info(struct seq_file *m, 3047 struct intel_connector *intel_connector) 3048 { 3049 intel_panel_info(m, &intel_connector->panel); 3050 } 3051 3052 static void intel_connector_info(struct seq_file *m, 3053 struct drm_connector *connector) 3054 { 3055 struct intel_connector *intel_connector = to_intel_connector(connector); 3056 struct intel_encoder *intel_encoder = intel_connector->encoder; 3057 struct drm_display_mode *mode; 3058 3059 seq_printf(m, "connector %d: type %s, status: %s\n", 3060 connector->base.id, connector->name, 3061 drm_get_connector_status_name(connector->status)); 3062 if (connector->status == connector_status_connected) { 3063 seq_printf(m, "\tname: %s\n", connector->display_info.name); 3064 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 3065 connector->display_info.width_mm, 3066 connector->display_info.height_mm); 3067 seq_printf(m, "\tsubpixel order: %s\n", 3068 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 3069 seq_printf(m, "\tCEA rev: %d\n", 3070 connector->display_info.cea_rev); 3071 } 3072 3073 if (!intel_encoder) 3074 return; 3075 3076 switch (connector->connector_type) { 3077 case DRM_MODE_CONNECTOR_DisplayPort: 3078 case DRM_MODE_CONNECTOR_eDP: 3079 if (intel_encoder->type == INTEL_OUTPUT_DP_MST) 3080 intel_dp_mst_info(m, intel_connector); 3081 else 3082 intel_dp_info(m, intel_connector); 3083 break; 3084 case DRM_MODE_CONNECTOR_LVDS: 3085 if (intel_encoder->type == INTEL_OUTPUT_LVDS) 3086 intel_lvds_info(m, intel_connector); 3087 break; 3088 case DRM_MODE_CONNECTOR_HDMIA: 3089 if (intel_encoder->type == INTEL_OUTPUT_HDMI || 3090 intel_encoder->type == INTEL_OUTPUT_DDI) 3091 intel_hdmi_info(m, intel_connector); 3092 break; 3093 default: 3094 break; 3095 } 3096 3097 seq_printf(m, "\tmodes:\n"); 3098 list_for_each_entry(mode, &connector->modes, head) 3099 intel_seq_print_mode(m, 2, mode); 3100 } 3101 3102 static const char *plane_type(enum drm_plane_type type) 3103 { 3104 switch (type) { 3105 case DRM_PLANE_TYPE_OVERLAY: 3106 return "OVL"; 3107 case DRM_PLANE_TYPE_PRIMARY: 3108 return "PRI"; 3109 case DRM_PLANE_TYPE_CURSOR: 3110 return "CUR"; 3111 /* 3112 * Deliberately omitting default: to generate compiler warnings 3113 * when a new drm_plane_type gets added. 3114 */ 3115 } 3116 3117 return "unknown"; 3118 } 3119 3120 static const char *plane_rotation(unsigned int rotation) 3121 { 3122 static char buf[48]; 3123 /* 3124 * According to doc only one DRM_MODE_ROTATE_ is allowed but this 3125 * will print them all to visualize if the values are misused 3126 */ 3127 snprintf(buf, sizeof(buf), 3128 "%s%s%s%s%s%s(0x%08x)", 3129 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "", 3130 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "", 3131 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "", 3132 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "", 3133 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "", 3134 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "", 3135 rotation); 3136 3137 return buf; 3138 } 3139 3140 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3141 { 3142 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3143 struct drm_device *dev = &dev_priv->drm; 3144 struct intel_plane *intel_plane; 3145 3146 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3147 struct drm_plane_state *state; 3148 struct drm_plane *plane = &intel_plane->base; 3149 struct drm_format_name_buf format_name; 3150 3151 if (!plane->state) { 3152 seq_puts(m, "plane->state is NULL!\n"); 3153 continue; 3154 } 3155 3156 state = plane->state; 3157 3158 if (state->fb) { 3159 drm_get_format_name(state->fb->format->format, 3160 &format_name); 3161 } else { 3162 sprintf(format_name.str, "N/A"); 3163 } 3164 3165 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n", 3166 plane->base.id, 3167 plane_type(intel_plane->base.type), 3168 state->crtc_x, state->crtc_y, 3169 state->crtc_w, state->crtc_h, 3170 (state->src_x >> 16), 3171 ((state->src_x & 0xffff) * 15625) >> 10, 3172 (state->src_y >> 16), 3173 ((state->src_y & 0xffff) * 15625) >> 10, 3174 (state->src_w >> 16), 3175 ((state->src_w & 0xffff) * 15625) >> 10, 3176 (state->src_h >> 16), 3177 ((state->src_h & 0xffff) * 15625) >> 10, 3178 format_name.str, 3179 plane_rotation(state->rotation)); 3180 } 3181 } 3182 3183 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3184 { 3185 struct intel_crtc_state *pipe_config; 3186 int num_scalers = intel_crtc->num_scalers; 3187 int i; 3188 3189 pipe_config = to_intel_crtc_state(intel_crtc->base.state); 3190 3191 /* Not all platformas have a scaler */ 3192 if (num_scalers) { 3193 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", 3194 num_scalers, 3195 pipe_config->scaler_state.scaler_users, 3196 pipe_config->scaler_state.scaler_id); 3197 3198 for (i = 0; i < num_scalers; i++) { 3199 struct intel_scaler *sc = 3200 &pipe_config->scaler_state.scalers[i]; 3201 3202 seq_printf(m, ", scalers[%d]: use=%s, mode=%x", 3203 i, yesno(sc->in_use), sc->mode); 3204 } 3205 seq_puts(m, "\n"); 3206 } else { 3207 seq_puts(m, "\tNo scalers available on this platform\n"); 3208 } 3209 } 3210 3211 static int i915_display_info(struct seq_file *m, void *unused) 3212 { 3213 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3214 struct drm_device *dev = &dev_priv->drm; 3215 struct intel_crtc *crtc; 3216 struct drm_connector *connector; 3217 struct drm_connector_list_iter conn_iter; 3218 3219 intel_runtime_pm_get(dev_priv); 3220 seq_printf(m, "CRTC info\n"); 3221 seq_printf(m, "---------\n"); 3222 for_each_intel_crtc(dev, crtc) { 3223 struct intel_crtc_state *pipe_config; 3224 3225 drm_modeset_lock(&crtc->base.mutex, NULL); 3226 pipe_config = to_intel_crtc_state(crtc->base.state); 3227 3228 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n", 3229 crtc->base.base.id, pipe_name(crtc->pipe), 3230 yesno(pipe_config->base.active), 3231 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 3232 yesno(pipe_config->dither), pipe_config->pipe_bpp); 3233 3234 if (pipe_config->base.active) { 3235 struct intel_plane *cursor = 3236 to_intel_plane(crtc->base.cursor); 3237 3238 intel_crtc_info(m, crtc); 3239 3240 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n", 3241 yesno(cursor->base.state->visible), 3242 cursor->base.state->crtc_x, 3243 cursor->base.state->crtc_y, 3244 cursor->base.state->crtc_w, 3245 cursor->base.state->crtc_h, 3246 cursor->cursor.base); 3247 intel_scaler_info(m, crtc); 3248 intel_plane_info(m, crtc); 3249 } 3250 3251 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 3252 yesno(!crtc->cpu_fifo_underrun_disabled), 3253 yesno(!crtc->pch_fifo_underrun_disabled)); 3254 drm_modeset_unlock(&crtc->base.mutex); 3255 } 3256 3257 seq_printf(m, "\n"); 3258 seq_printf(m, "Connector info\n"); 3259 seq_printf(m, "--------------\n"); 3260 mutex_lock(&dev->mode_config.mutex); 3261 drm_connector_list_iter_begin(dev, &conn_iter); 3262 drm_for_each_connector_iter(connector, &conn_iter) 3263 intel_connector_info(m, connector); 3264 drm_connector_list_iter_end(&conn_iter); 3265 mutex_unlock(&dev->mode_config.mutex); 3266 3267 intel_runtime_pm_put(dev_priv); 3268 3269 return 0; 3270 } 3271 3272 static int i915_engine_info(struct seq_file *m, void *unused) 3273 { 3274 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3275 struct intel_engine_cs *engine; 3276 enum intel_engine_id id; 3277 struct drm_printer p; 3278 3279 intel_runtime_pm_get(dev_priv); 3280 3281 seq_printf(m, "GT awake? %s\n", 3282 yesno(dev_priv->gt.awake)); 3283 seq_printf(m, "Global active requests: %d\n", 3284 dev_priv->gt.active_requests); 3285 seq_printf(m, "CS timestamp frequency: %u kHz\n", 3286 dev_priv->info.cs_timestamp_frequency_khz); 3287 3288 p = drm_seq_file_printer(m); 3289 for_each_engine(engine, dev_priv, id) 3290 intel_engine_dump(engine, &p); 3291 3292 intel_runtime_pm_put(dev_priv); 3293 3294 return 0; 3295 } 3296 3297 static int i915_shrinker_info(struct seq_file *m, void *unused) 3298 { 3299 struct drm_i915_private *i915 = node_to_i915(m->private); 3300 3301 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks); 3302 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch); 3303 3304 return 0; 3305 } 3306 3307 static int i915_semaphore_status(struct seq_file *m, void *unused) 3308 { 3309 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3310 struct drm_device *dev = &dev_priv->drm; 3311 struct intel_engine_cs *engine; 3312 int num_rings = INTEL_INFO(dev_priv)->num_rings; 3313 enum intel_engine_id id; 3314 int j, ret; 3315 3316 if (!i915_modparams.semaphores) { 3317 seq_puts(m, "Semaphores are disabled\n"); 3318 return 0; 3319 } 3320 3321 ret = mutex_lock_interruptible(&dev->struct_mutex); 3322 if (ret) 3323 return ret; 3324 intel_runtime_pm_get(dev_priv); 3325 3326 if (IS_BROADWELL(dev_priv)) { 3327 struct page *page; 3328 uint64_t *seqno; 3329 3330 page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0); 3331 3332 seqno = (uint64_t *)kmap_atomic(page); 3333 for_each_engine(engine, dev_priv, id) { 3334 uint64_t offset; 3335 3336 seq_printf(m, "%s\n", engine->name); 3337 3338 seq_puts(m, " Last signal:"); 3339 for (j = 0; j < num_rings; j++) { 3340 offset = id * I915_NUM_ENGINES + j; 3341 seq_printf(m, "0x%08llx (0x%02llx) ", 3342 seqno[offset], offset * 8); 3343 } 3344 seq_putc(m, '\n'); 3345 3346 seq_puts(m, " Last wait: "); 3347 for (j = 0; j < num_rings; j++) { 3348 offset = id + (j * I915_NUM_ENGINES); 3349 seq_printf(m, "0x%08llx (0x%02llx) ", 3350 seqno[offset], offset * 8); 3351 } 3352 seq_putc(m, '\n'); 3353 3354 } 3355 kunmap_atomic(seqno); 3356 } else { 3357 seq_puts(m, " Last signal:"); 3358 for_each_engine(engine, dev_priv, id) 3359 for (j = 0; j < num_rings; j++) 3360 seq_printf(m, "0x%08x\n", 3361 I915_READ(engine->semaphore.mbox.signal[j])); 3362 seq_putc(m, '\n'); 3363 } 3364 3365 intel_runtime_pm_put(dev_priv); 3366 mutex_unlock(&dev->struct_mutex); 3367 return 0; 3368 } 3369 3370 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 3371 { 3372 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3373 struct drm_device *dev = &dev_priv->drm; 3374 int i; 3375 3376 drm_modeset_lock_all(dev); 3377 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3378 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 3379 3380 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); 3381 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", 3382 pll->state.crtc_mask, pll->active_mask, yesno(pll->on)); 3383 seq_printf(m, " tracked hardware state:\n"); 3384 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); 3385 seq_printf(m, " dpll_md: 0x%08x\n", 3386 pll->state.hw_state.dpll_md); 3387 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0); 3388 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1); 3389 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); 3390 } 3391 drm_modeset_unlock_all(dev); 3392 3393 return 0; 3394 } 3395 3396 static int i915_wa_registers(struct seq_file *m, void *unused) 3397 { 3398 int i; 3399 int ret; 3400 struct intel_engine_cs *engine; 3401 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3402 struct drm_device *dev = &dev_priv->drm; 3403 struct i915_workarounds *workarounds = &dev_priv->workarounds; 3404 enum intel_engine_id id; 3405 3406 ret = mutex_lock_interruptible(&dev->struct_mutex); 3407 if (ret) 3408 return ret; 3409 3410 intel_runtime_pm_get(dev_priv); 3411 3412 seq_printf(m, "Workarounds applied: %d\n", workarounds->count); 3413 for_each_engine(engine, dev_priv, id) 3414 seq_printf(m, "HW whitelist count for %s: %d\n", 3415 engine->name, workarounds->hw_whitelist_count[id]); 3416 for (i = 0; i < workarounds->count; ++i) { 3417 i915_reg_t addr; 3418 u32 mask, value, read; 3419 bool ok; 3420 3421 addr = workarounds->reg[i].addr; 3422 mask = workarounds->reg[i].mask; 3423 value = workarounds->reg[i].value; 3424 read = I915_READ(addr); 3425 ok = (value & mask) == (read & mask); 3426 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", 3427 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL"); 3428 } 3429 3430 intel_runtime_pm_put(dev_priv); 3431 mutex_unlock(&dev->struct_mutex); 3432 3433 return 0; 3434 } 3435 3436 static int i915_ipc_status_show(struct seq_file *m, void *data) 3437 { 3438 struct drm_i915_private *dev_priv = m->private; 3439 3440 seq_printf(m, "Isochronous Priority Control: %s\n", 3441 yesno(dev_priv->ipc_enabled)); 3442 return 0; 3443 } 3444 3445 static int i915_ipc_status_open(struct inode *inode, struct file *file) 3446 { 3447 struct drm_i915_private *dev_priv = inode->i_private; 3448 3449 if (!HAS_IPC(dev_priv)) 3450 return -ENODEV; 3451 3452 return single_open(file, i915_ipc_status_show, dev_priv); 3453 } 3454 3455 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, 3456 size_t len, loff_t *offp) 3457 { 3458 struct seq_file *m = file->private_data; 3459 struct drm_i915_private *dev_priv = m->private; 3460 int ret; 3461 bool enable; 3462 3463 ret = kstrtobool_from_user(ubuf, len, &enable); 3464 if (ret < 0) 3465 return ret; 3466 3467 intel_runtime_pm_get(dev_priv); 3468 if (!dev_priv->ipc_enabled && enable) 3469 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n"); 3470 dev_priv->wm.distrust_bios_wm = true; 3471 dev_priv->ipc_enabled = enable; 3472 intel_enable_ipc(dev_priv); 3473 intel_runtime_pm_put(dev_priv); 3474 3475 return len; 3476 } 3477 3478 static const struct file_operations i915_ipc_status_fops = { 3479 .owner = THIS_MODULE, 3480 .open = i915_ipc_status_open, 3481 .read = seq_read, 3482 .llseek = seq_lseek, 3483 .release = single_release, 3484 .write = i915_ipc_status_write 3485 }; 3486 3487 static int i915_ddb_info(struct seq_file *m, void *unused) 3488 { 3489 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3490 struct drm_device *dev = &dev_priv->drm; 3491 struct skl_ddb_allocation *ddb; 3492 struct skl_ddb_entry *entry; 3493 enum pipe pipe; 3494 int plane; 3495 3496 if (INTEL_GEN(dev_priv) < 9) 3497 return 0; 3498 3499 drm_modeset_lock_all(dev); 3500 3501 ddb = &dev_priv->wm.skl_hw.ddb; 3502 3503 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 3504 3505 for_each_pipe(dev_priv, pipe) { 3506 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 3507 3508 for_each_universal_plane(dev_priv, pipe, plane) { 3509 entry = &ddb->plane[pipe][plane]; 3510 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, 3511 entry->start, entry->end, 3512 skl_ddb_entry_size(entry)); 3513 } 3514 3515 entry = &ddb->plane[pipe][PLANE_CURSOR]; 3516 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 3517 entry->end, skl_ddb_entry_size(entry)); 3518 } 3519 3520 drm_modeset_unlock_all(dev); 3521 3522 return 0; 3523 } 3524 3525 static void drrs_status_per_crtc(struct seq_file *m, 3526 struct drm_device *dev, 3527 struct intel_crtc *intel_crtc) 3528 { 3529 struct drm_i915_private *dev_priv = to_i915(dev); 3530 struct i915_drrs *drrs = &dev_priv->drrs; 3531 int vrefresh = 0; 3532 struct drm_connector *connector; 3533 struct drm_connector_list_iter conn_iter; 3534 3535 drm_connector_list_iter_begin(dev, &conn_iter); 3536 drm_for_each_connector_iter(connector, &conn_iter) { 3537 if (connector->state->crtc != &intel_crtc->base) 3538 continue; 3539 3540 seq_printf(m, "%s:\n", connector->name); 3541 } 3542 drm_connector_list_iter_end(&conn_iter); 3543 3544 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) 3545 seq_puts(m, "\tVBT: DRRS_type: Static"); 3546 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT) 3547 seq_puts(m, "\tVBT: DRRS_type: Seamless"); 3548 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED) 3549 seq_puts(m, "\tVBT: DRRS_type: None"); 3550 else 3551 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value"); 3552 3553 seq_puts(m, "\n\n"); 3554 3555 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { 3556 struct intel_panel *panel; 3557 3558 mutex_lock(&drrs->mutex); 3559 /* DRRS Supported */ 3560 seq_puts(m, "\tDRRS Supported: Yes\n"); 3561 3562 /* disable_drrs() will make drrs->dp NULL */ 3563 if (!drrs->dp) { 3564 seq_puts(m, "Idleness DRRS: Disabled"); 3565 mutex_unlock(&drrs->mutex); 3566 return; 3567 } 3568 3569 panel = &drrs->dp->attached_connector->panel; 3570 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", 3571 drrs->busy_frontbuffer_bits); 3572 3573 seq_puts(m, "\n\t\t"); 3574 if (drrs->refresh_rate_type == DRRS_HIGH_RR) { 3575 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); 3576 vrefresh = panel->fixed_mode->vrefresh; 3577 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { 3578 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); 3579 vrefresh = panel->downclock_mode->vrefresh; 3580 } else { 3581 seq_printf(m, "DRRS_State: Unknown(%d)\n", 3582 drrs->refresh_rate_type); 3583 mutex_unlock(&drrs->mutex); 3584 return; 3585 } 3586 seq_printf(m, "\t\tVrefresh: %d", vrefresh); 3587 3588 seq_puts(m, "\n\t\t"); 3589 mutex_unlock(&drrs->mutex); 3590 } else { 3591 /* DRRS not supported. Print the VBT parameter*/ 3592 seq_puts(m, "\tDRRS Supported : No"); 3593 } 3594 seq_puts(m, "\n"); 3595 } 3596 3597 static int i915_drrs_status(struct seq_file *m, void *unused) 3598 { 3599 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3600 struct drm_device *dev = &dev_priv->drm; 3601 struct intel_crtc *intel_crtc; 3602 int active_crtc_cnt = 0; 3603 3604 drm_modeset_lock_all(dev); 3605 for_each_intel_crtc(dev, intel_crtc) { 3606 if (intel_crtc->base.state->active) { 3607 active_crtc_cnt++; 3608 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 3609 3610 drrs_status_per_crtc(m, dev, intel_crtc); 3611 } 3612 } 3613 drm_modeset_unlock_all(dev); 3614 3615 if (!active_crtc_cnt) 3616 seq_puts(m, "No active crtc found\n"); 3617 3618 return 0; 3619 } 3620 3621 static int i915_dp_mst_info(struct seq_file *m, void *unused) 3622 { 3623 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3624 struct drm_device *dev = &dev_priv->drm; 3625 struct intel_encoder *intel_encoder; 3626 struct intel_digital_port *intel_dig_port; 3627 struct drm_connector *connector; 3628 struct drm_connector_list_iter conn_iter; 3629 3630 drm_connector_list_iter_begin(dev, &conn_iter); 3631 drm_for_each_connector_iter(connector, &conn_iter) { 3632 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 3633 continue; 3634 3635 intel_encoder = intel_attached_encoder(connector); 3636 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) 3637 continue; 3638 3639 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 3640 if (!intel_dig_port->dp.can_mst) 3641 continue; 3642 3643 seq_printf(m, "MST Source Port %c\n", 3644 port_name(intel_dig_port->base.port)); 3645 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 3646 } 3647 drm_connector_list_iter_end(&conn_iter); 3648 3649 return 0; 3650 } 3651 3652 static ssize_t i915_displayport_test_active_write(struct file *file, 3653 const char __user *ubuf, 3654 size_t len, loff_t *offp) 3655 { 3656 char *input_buffer; 3657 int status = 0; 3658 struct drm_device *dev; 3659 struct drm_connector *connector; 3660 struct drm_connector_list_iter conn_iter; 3661 struct intel_dp *intel_dp; 3662 int val = 0; 3663 3664 dev = ((struct seq_file *)file->private_data)->private; 3665 3666 if (len == 0) 3667 return 0; 3668 3669 input_buffer = memdup_user_nul(ubuf, len); 3670 if (IS_ERR(input_buffer)) 3671 return PTR_ERR(input_buffer); 3672 3673 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); 3674 3675 drm_connector_list_iter_begin(dev, &conn_iter); 3676 drm_for_each_connector_iter(connector, &conn_iter) { 3677 struct intel_encoder *encoder; 3678 3679 if (connector->connector_type != 3680 DRM_MODE_CONNECTOR_DisplayPort) 3681 continue; 3682 3683 encoder = to_intel_encoder(connector->encoder); 3684 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3685 continue; 3686 3687 if (encoder && connector->status == connector_status_connected) { 3688 intel_dp = enc_to_intel_dp(&encoder->base); 3689 status = kstrtoint(input_buffer, 10, &val); 3690 if (status < 0) 3691 break; 3692 DRM_DEBUG_DRIVER("Got %d for test active\n", val); 3693 /* To prevent erroneous activation of the compliance 3694 * testing code, only accept an actual value of 1 here 3695 */ 3696 if (val == 1) 3697 intel_dp->compliance.test_active = 1; 3698 else 3699 intel_dp->compliance.test_active = 0; 3700 } 3701 } 3702 drm_connector_list_iter_end(&conn_iter); 3703 kfree(input_buffer); 3704 if (status < 0) 3705 return status; 3706 3707 *offp += len; 3708 return len; 3709 } 3710 3711 static int i915_displayport_test_active_show(struct seq_file *m, void *data) 3712 { 3713 struct drm_device *dev = m->private; 3714 struct drm_connector *connector; 3715 struct drm_connector_list_iter conn_iter; 3716 struct intel_dp *intel_dp; 3717 3718 drm_connector_list_iter_begin(dev, &conn_iter); 3719 drm_for_each_connector_iter(connector, &conn_iter) { 3720 struct intel_encoder *encoder; 3721 3722 if (connector->connector_type != 3723 DRM_MODE_CONNECTOR_DisplayPort) 3724 continue; 3725 3726 encoder = to_intel_encoder(connector->encoder); 3727 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3728 continue; 3729 3730 if (encoder && connector->status == connector_status_connected) { 3731 intel_dp = enc_to_intel_dp(&encoder->base); 3732 if (intel_dp->compliance.test_active) 3733 seq_puts(m, "1"); 3734 else 3735 seq_puts(m, "0"); 3736 } else 3737 seq_puts(m, "0"); 3738 } 3739 drm_connector_list_iter_end(&conn_iter); 3740 3741 return 0; 3742 } 3743 3744 static int i915_displayport_test_active_open(struct inode *inode, 3745 struct file *file) 3746 { 3747 struct drm_i915_private *dev_priv = inode->i_private; 3748 3749 return single_open(file, i915_displayport_test_active_show, 3750 &dev_priv->drm); 3751 } 3752 3753 static const struct file_operations i915_displayport_test_active_fops = { 3754 .owner = THIS_MODULE, 3755 .open = i915_displayport_test_active_open, 3756 .read = seq_read, 3757 .llseek = seq_lseek, 3758 .release = single_release, 3759 .write = i915_displayport_test_active_write 3760 }; 3761 3762 static int i915_displayport_test_data_show(struct seq_file *m, void *data) 3763 { 3764 struct drm_device *dev = m->private; 3765 struct drm_connector *connector; 3766 struct drm_connector_list_iter conn_iter; 3767 struct intel_dp *intel_dp; 3768 3769 drm_connector_list_iter_begin(dev, &conn_iter); 3770 drm_for_each_connector_iter(connector, &conn_iter) { 3771 struct intel_encoder *encoder; 3772 3773 if (connector->connector_type != 3774 DRM_MODE_CONNECTOR_DisplayPort) 3775 continue; 3776 3777 encoder = to_intel_encoder(connector->encoder); 3778 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3779 continue; 3780 3781 if (encoder && connector->status == connector_status_connected) { 3782 intel_dp = enc_to_intel_dp(&encoder->base); 3783 if (intel_dp->compliance.test_type == 3784 DP_TEST_LINK_EDID_READ) 3785 seq_printf(m, "%lx", 3786 intel_dp->compliance.test_data.edid); 3787 else if (intel_dp->compliance.test_type == 3788 DP_TEST_LINK_VIDEO_PATTERN) { 3789 seq_printf(m, "hdisplay: %d\n", 3790 intel_dp->compliance.test_data.hdisplay); 3791 seq_printf(m, "vdisplay: %d\n", 3792 intel_dp->compliance.test_data.vdisplay); 3793 seq_printf(m, "bpc: %u\n", 3794 intel_dp->compliance.test_data.bpc); 3795 } 3796 } else 3797 seq_puts(m, "0"); 3798 } 3799 drm_connector_list_iter_end(&conn_iter); 3800 3801 return 0; 3802 } 3803 static int i915_displayport_test_data_open(struct inode *inode, 3804 struct file *file) 3805 { 3806 struct drm_i915_private *dev_priv = inode->i_private; 3807 3808 return single_open(file, i915_displayport_test_data_show, 3809 &dev_priv->drm); 3810 } 3811 3812 static const struct file_operations i915_displayport_test_data_fops = { 3813 .owner = THIS_MODULE, 3814 .open = i915_displayport_test_data_open, 3815 .read = seq_read, 3816 .llseek = seq_lseek, 3817 .release = single_release 3818 }; 3819 3820 static int i915_displayport_test_type_show(struct seq_file *m, void *data) 3821 { 3822 struct drm_device *dev = m->private; 3823 struct drm_connector *connector; 3824 struct drm_connector_list_iter conn_iter; 3825 struct intel_dp *intel_dp; 3826 3827 drm_connector_list_iter_begin(dev, &conn_iter); 3828 drm_for_each_connector_iter(connector, &conn_iter) { 3829 struct intel_encoder *encoder; 3830 3831 if (connector->connector_type != 3832 DRM_MODE_CONNECTOR_DisplayPort) 3833 continue; 3834 3835 encoder = to_intel_encoder(connector->encoder); 3836 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3837 continue; 3838 3839 if (encoder && connector->status == connector_status_connected) { 3840 intel_dp = enc_to_intel_dp(&encoder->base); 3841 seq_printf(m, "%02lx", intel_dp->compliance.test_type); 3842 } else 3843 seq_puts(m, "0"); 3844 } 3845 drm_connector_list_iter_end(&conn_iter); 3846 3847 return 0; 3848 } 3849 3850 static int i915_displayport_test_type_open(struct inode *inode, 3851 struct file *file) 3852 { 3853 struct drm_i915_private *dev_priv = inode->i_private; 3854 3855 return single_open(file, i915_displayport_test_type_show, 3856 &dev_priv->drm); 3857 } 3858 3859 static const struct file_operations i915_displayport_test_type_fops = { 3860 .owner = THIS_MODULE, 3861 .open = i915_displayport_test_type_open, 3862 .read = seq_read, 3863 .llseek = seq_lseek, 3864 .release = single_release 3865 }; 3866 3867 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) 3868 { 3869 struct drm_i915_private *dev_priv = m->private; 3870 struct drm_device *dev = &dev_priv->drm; 3871 int level; 3872 int num_levels; 3873 3874 if (IS_CHERRYVIEW(dev_priv)) 3875 num_levels = 3; 3876 else if (IS_VALLEYVIEW(dev_priv)) 3877 num_levels = 1; 3878 else if (IS_G4X(dev_priv)) 3879 num_levels = 3; 3880 else 3881 num_levels = ilk_wm_max_level(dev_priv) + 1; 3882 3883 drm_modeset_lock_all(dev); 3884 3885 for (level = 0; level < num_levels; level++) { 3886 unsigned int latency = wm[level]; 3887 3888 /* 3889 * - WM1+ latency values in 0.5us units 3890 * - latencies are in us on gen9/vlv/chv 3891 */ 3892 if (INTEL_GEN(dev_priv) >= 9 || 3893 IS_VALLEYVIEW(dev_priv) || 3894 IS_CHERRYVIEW(dev_priv) || 3895 IS_G4X(dev_priv)) 3896 latency *= 10; 3897 else if (level > 0) 3898 latency *= 5; 3899 3900 seq_printf(m, "WM%d %u (%u.%u usec)\n", 3901 level, wm[level], latency / 10, latency % 10); 3902 } 3903 3904 drm_modeset_unlock_all(dev); 3905 } 3906 3907 static int pri_wm_latency_show(struct seq_file *m, void *data) 3908 { 3909 struct drm_i915_private *dev_priv = m->private; 3910 const uint16_t *latencies; 3911 3912 if (INTEL_GEN(dev_priv) >= 9) 3913 latencies = dev_priv->wm.skl_latency; 3914 else 3915 latencies = dev_priv->wm.pri_latency; 3916 3917 wm_latency_show(m, latencies); 3918 3919 return 0; 3920 } 3921 3922 static int spr_wm_latency_show(struct seq_file *m, void *data) 3923 { 3924 struct drm_i915_private *dev_priv = m->private; 3925 const uint16_t *latencies; 3926 3927 if (INTEL_GEN(dev_priv) >= 9) 3928 latencies = dev_priv->wm.skl_latency; 3929 else 3930 latencies = dev_priv->wm.spr_latency; 3931 3932 wm_latency_show(m, latencies); 3933 3934 return 0; 3935 } 3936 3937 static int cur_wm_latency_show(struct seq_file *m, void *data) 3938 { 3939 struct drm_i915_private *dev_priv = m->private; 3940 const uint16_t *latencies; 3941 3942 if (INTEL_GEN(dev_priv) >= 9) 3943 latencies = dev_priv->wm.skl_latency; 3944 else 3945 latencies = dev_priv->wm.cur_latency; 3946 3947 wm_latency_show(m, latencies); 3948 3949 return 0; 3950 } 3951 3952 static int pri_wm_latency_open(struct inode *inode, struct file *file) 3953 { 3954 struct drm_i915_private *dev_priv = inode->i_private; 3955 3956 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 3957 return -ENODEV; 3958 3959 return single_open(file, pri_wm_latency_show, dev_priv); 3960 } 3961 3962 static int spr_wm_latency_open(struct inode *inode, struct file *file) 3963 { 3964 struct drm_i915_private *dev_priv = inode->i_private; 3965 3966 if (HAS_GMCH_DISPLAY(dev_priv)) 3967 return -ENODEV; 3968 3969 return single_open(file, spr_wm_latency_show, dev_priv); 3970 } 3971 3972 static int cur_wm_latency_open(struct inode *inode, struct file *file) 3973 { 3974 struct drm_i915_private *dev_priv = inode->i_private; 3975 3976 if (HAS_GMCH_DISPLAY(dev_priv)) 3977 return -ENODEV; 3978 3979 return single_open(file, cur_wm_latency_show, dev_priv); 3980 } 3981 3982 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 3983 size_t len, loff_t *offp, uint16_t wm[8]) 3984 { 3985 struct seq_file *m = file->private_data; 3986 struct drm_i915_private *dev_priv = m->private; 3987 struct drm_device *dev = &dev_priv->drm; 3988 uint16_t new[8] = { 0 }; 3989 int num_levels; 3990 int level; 3991 int ret; 3992 char tmp[32]; 3993 3994 if (IS_CHERRYVIEW(dev_priv)) 3995 num_levels = 3; 3996 else if (IS_VALLEYVIEW(dev_priv)) 3997 num_levels = 1; 3998 else if (IS_G4X(dev_priv)) 3999 num_levels = 3; 4000 else 4001 num_levels = ilk_wm_max_level(dev_priv) + 1; 4002 4003 if (len >= sizeof(tmp)) 4004 return -EINVAL; 4005 4006 if (copy_from_user(tmp, ubuf, len)) 4007 return -EFAULT; 4008 4009 tmp[len] = '\0'; 4010 4011 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 4012 &new[0], &new[1], &new[2], &new[3], 4013 &new[4], &new[5], &new[6], &new[7]); 4014 if (ret != num_levels) 4015 return -EINVAL; 4016 4017 drm_modeset_lock_all(dev); 4018 4019 for (level = 0; level < num_levels; level++) 4020 wm[level] = new[level]; 4021 4022 drm_modeset_unlock_all(dev); 4023 4024 return len; 4025 } 4026 4027 4028 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 4029 size_t len, loff_t *offp) 4030 { 4031 struct seq_file *m = file->private_data; 4032 struct drm_i915_private *dev_priv = m->private; 4033 uint16_t *latencies; 4034 4035 if (INTEL_GEN(dev_priv) >= 9) 4036 latencies = dev_priv->wm.skl_latency; 4037 else 4038 latencies = dev_priv->wm.pri_latency; 4039 4040 return wm_latency_write(file, ubuf, len, offp, latencies); 4041 } 4042 4043 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 4044 size_t len, loff_t *offp) 4045 { 4046 struct seq_file *m = file->private_data; 4047 struct drm_i915_private *dev_priv = m->private; 4048 uint16_t *latencies; 4049 4050 if (INTEL_GEN(dev_priv) >= 9) 4051 latencies = dev_priv->wm.skl_latency; 4052 else 4053 latencies = dev_priv->wm.spr_latency; 4054 4055 return wm_latency_write(file, ubuf, len, offp, latencies); 4056 } 4057 4058 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 4059 size_t len, loff_t *offp) 4060 { 4061 struct seq_file *m = file->private_data; 4062 struct drm_i915_private *dev_priv = m->private; 4063 uint16_t *latencies; 4064 4065 if (INTEL_GEN(dev_priv) >= 9) 4066 latencies = dev_priv->wm.skl_latency; 4067 else 4068 latencies = dev_priv->wm.cur_latency; 4069 4070 return wm_latency_write(file, ubuf, len, offp, latencies); 4071 } 4072 4073 static const struct file_operations i915_pri_wm_latency_fops = { 4074 .owner = THIS_MODULE, 4075 .open = pri_wm_latency_open, 4076 .read = seq_read, 4077 .llseek = seq_lseek, 4078 .release = single_release, 4079 .write = pri_wm_latency_write 4080 }; 4081 4082 static const struct file_operations i915_spr_wm_latency_fops = { 4083 .owner = THIS_MODULE, 4084 .open = spr_wm_latency_open, 4085 .read = seq_read, 4086 .llseek = seq_lseek, 4087 .release = single_release, 4088 .write = spr_wm_latency_write 4089 }; 4090 4091 static const struct file_operations i915_cur_wm_latency_fops = { 4092 .owner = THIS_MODULE, 4093 .open = cur_wm_latency_open, 4094 .read = seq_read, 4095 .llseek = seq_lseek, 4096 .release = single_release, 4097 .write = cur_wm_latency_write 4098 }; 4099 4100 static int 4101 i915_wedged_get(void *data, u64 *val) 4102 { 4103 struct drm_i915_private *dev_priv = data; 4104 4105 *val = i915_terminally_wedged(&dev_priv->gpu_error); 4106 4107 return 0; 4108 } 4109 4110 static int 4111 i915_wedged_set(void *data, u64 val) 4112 { 4113 struct drm_i915_private *i915 = data; 4114 struct intel_engine_cs *engine; 4115 unsigned int tmp; 4116 4117 /* 4118 * There is no safeguard against this debugfs entry colliding 4119 * with the hangcheck calling same i915_handle_error() in 4120 * parallel, causing an explosion. For now we assume that the 4121 * test harness is responsible enough not to inject gpu hangs 4122 * while it is writing to 'i915_wedged' 4123 */ 4124 4125 if (i915_reset_backoff(&i915->gpu_error)) 4126 return -EAGAIN; 4127 4128 for_each_engine_masked(engine, i915, val, tmp) { 4129 engine->hangcheck.seqno = intel_engine_get_seqno(engine); 4130 engine->hangcheck.stalled = true; 4131 } 4132 4133 i915_handle_error(i915, val, "Manually setting wedged to %llu", val); 4134 4135 wait_on_bit(&i915->gpu_error.flags, 4136 I915_RESET_HANDOFF, 4137 TASK_UNINTERRUPTIBLE); 4138 4139 return 0; 4140 } 4141 4142 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 4143 i915_wedged_get, i915_wedged_set, 4144 "%llu\n"); 4145 4146 static int 4147 fault_irq_set(struct drm_i915_private *i915, 4148 unsigned long *irq, 4149 unsigned long val) 4150 { 4151 int err; 4152 4153 err = mutex_lock_interruptible(&i915->drm.struct_mutex); 4154 if (err) 4155 return err; 4156 4157 err = i915_gem_wait_for_idle(i915, 4158 I915_WAIT_LOCKED | 4159 I915_WAIT_INTERRUPTIBLE); 4160 if (err) 4161 goto err_unlock; 4162 4163 *irq = val; 4164 mutex_unlock(&i915->drm.struct_mutex); 4165 4166 /* Flush idle worker to disarm irq */ 4167 drain_delayed_work(&i915->gt.idle_work); 4168 4169 return 0; 4170 4171 err_unlock: 4172 mutex_unlock(&i915->drm.struct_mutex); 4173 return err; 4174 } 4175 4176 static int 4177 i915_ring_missed_irq_get(void *data, u64 *val) 4178 { 4179 struct drm_i915_private *dev_priv = data; 4180 4181 *val = dev_priv->gpu_error.missed_irq_rings; 4182 return 0; 4183 } 4184 4185 static int 4186 i915_ring_missed_irq_set(void *data, u64 val) 4187 { 4188 struct drm_i915_private *i915 = data; 4189 4190 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val); 4191 } 4192 4193 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 4194 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 4195 "0x%08llx\n"); 4196 4197 static int 4198 i915_ring_test_irq_get(void *data, u64 *val) 4199 { 4200 struct drm_i915_private *dev_priv = data; 4201 4202 *val = dev_priv->gpu_error.test_irq_rings; 4203 4204 return 0; 4205 } 4206 4207 static int 4208 i915_ring_test_irq_set(void *data, u64 val) 4209 { 4210 struct drm_i915_private *i915 = data; 4211 4212 val &= INTEL_INFO(i915)->ring_mask; 4213 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 4214 4215 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val); 4216 } 4217 4218 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 4219 i915_ring_test_irq_get, i915_ring_test_irq_set, 4220 "0x%08llx\n"); 4221 4222 #define DROP_UNBOUND BIT(0) 4223 #define DROP_BOUND BIT(1) 4224 #define DROP_RETIRE BIT(2) 4225 #define DROP_ACTIVE BIT(3) 4226 #define DROP_FREED BIT(4) 4227 #define DROP_SHRINK_ALL BIT(5) 4228 #define DROP_IDLE BIT(6) 4229 #define DROP_ALL (DROP_UNBOUND | \ 4230 DROP_BOUND | \ 4231 DROP_RETIRE | \ 4232 DROP_ACTIVE | \ 4233 DROP_FREED | \ 4234 DROP_SHRINK_ALL |\ 4235 DROP_IDLE) 4236 static int 4237 i915_drop_caches_get(void *data, u64 *val) 4238 { 4239 *val = DROP_ALL; 4240 4241 return 0; 4242 } 4243 4244 static int 4245 i915_drop_caches_set(void *data, u64 val) 4246 { 4247 struct drm_i915_private *dev_priv = data; 4248 struct drm_device *dev = &dev_priv->drm; 4249 int ret = 0; 4250 4251 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", 4252 val, val & DROP_ALL); 4253 4254 /* No need to check and wait for gpu resets, only libdrm auto-restarts 4255 * on ioctls on -EAGAIN. */ 4256 if (val & (DROP_ACTIVE | DROP_RETIRE)) { 4257 ret = mutex_lock_interruptible(&dev->struct_mutex); 4258 if (ret) 4259 return ret; 4260 4261 if (val & DROP_ACTIVE) 4262 ret = i915_gem_wait_for_idle(dev_priv, 4263 I915_WAIT_INTERRUPTIBLE | 4264 I915_WAIT_LOCKED); 4265 4266 if (val & DROP_RETIRE) 4267 i915_gem_retire_requests(dev_priv); 4268 4269 mutex_unlock(&dev->struct_mutex); 4270 } 4271 4272 fs_reclaim_acquire(GFP_KERNEL); 4273 if (val & DROP_BOUND) 4274 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND); 4275 4276 if (val & DROP_UNBOUND) 4277 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND); 4278 4279 if (val & DROP_SHRINK_ALL) 4280 i915_gem_shrink_all(dev_priv); 4281 fs_reclaim_release(GFP_KERNEL); 4282 4283 if (val & DROP_IDLE) 4284 drain_delayed_work(&dev_priv->gt.idle_work); 4285 4286 if (val & DROP_FREED) { 4287 synchronize_rcu(); 4288 i915_gem_drain_freed_objects(dev_priv); 4289 } 4290 4291 return ret; 4292 } 4293 4294 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 4295 i915_drop_caches_get, i915_drop_caches_set, 4296 "0x%08llx\n"); 4297 4298 static int 4299 i915_max_freq_get(void *data, u64 *val) 4300 { 4301 struct drm_i915_private *dev_priv = data; 4302 4303 if (INTEL_GEN(dev_priv) < 6) 4304 return -ENODEV; 4305 4306 *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.max_freq_softlimit); 4307 return 0; 4308 } 4309 4310 static int 4311 i915_max_freq_set(void *data, u64 val) 4312 { 4313 struct drm_i915_private *dev_priv = data; 4314 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4315 u32 hw_max, hw_min; 4316 int ret; 4317 4318 if (INTEL_GEN(dev_priv) < 6) 4319 return -ENODEV; 4320 4321 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 4322 4323 ret = mutex_lock_interruptible(&dev_priv->pcu_lock); 4324 if (ret) 4325 return ret; 4326 4327 /* 4328 * Turbo will still be enabled, but won't go above the set value. 4329 */ 4330 val = intel_freq_opcode(dev_priv, val); 4331 4332 hw_max = rps->max_freq; 4333 hw_min = rps->min_freq; 4334 4335 if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) { 4336 mutex_unlock(&dev_priv->pcu_lock); 4337 return -EINVAL; 4338 } 4339 4340 rps->max_freq_softlimit = val; 4341 4342 if (intel_set_rps(dev_priv, val)) 4343 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); 4344 4345 mutex_unlock(&dev_priv->pcu_lock); 4346 4347 return 0; 4348 } 4349 4350 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 4351 i915_max_freq_get, i915_max_freq_set, 4352 "%llu\n"); 4353 4354 static int 4355 i915_min_freq_get(void *data, u64 *val) 4356 { 4357 struct drm_i915_private *dev_priv = data; 4358 4359 if (INTEL_GEN(dev_priv) < 6) 4360 return -ENODEV; 4361 4362 *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.min_freq_softlimit); 4363 return 0; 4364 } 4365 4366 static int 4367 i915_min_freq_set(void *data, u64 val) 4368 { 4369 struct drm_i915_private *dev_priv = data; 4370 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4371 u32 hw_max, hw_min; 4372 int ret; 4373 4374 if (INTEL_GEN(dev_priv) < 6) 4375 return -ENODEV; 4376 4377 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 4378 4379 ret = mutex_lock_interruptible(&dev_priv->pcu_lock); 4380 if (ret) 4381 return ret; 4382 4383 /* 4384 * Turbo will still be enabled, but won't go below the set value. 4385 */ 4386 val = intel_freq_opcode(dev_priv, val); 4387 4388 hw_max = rps->max_freq; 4389 hw_min = rps->min_freq; 4390 4391 if (val < hw_min || 4392 val > hw_max || val > rps->max_freq_softlimit) { 4393 mutex_unlock(&dev_priv->pcu_lock); 4394 return -EINVAL; 4395 } 4396 4397 rps->min_freq_softlimit = val; 4398 4399 if (intel_set_rps(dev_priv, val)) 4400 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); 4401 4402 mutex_unlock(&dev_priv->pcu_lock); 4403 4404 return 0; 4405 } 4406 4407 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 4408 i915_min_freq_get, i915_min_freq_set, 4409 "%llu\n"); 4410 4411 static int 4412 i915_cache_sharing_get(void *data, u64 *val) 4413 { 4414 struct drm_i915_private *dev_priv = data; 4415 u32 snpcr; 4416 4417 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) 4418 return -ENODEV; 4419 4420 intel_runtime_pm_get(dev_priv); 4421 4422 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4423 4424 intel_runtime_pm_put(dev_priv); 4425 4426 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 4427 4428 return 0; 4429 } 4430 4431 static int 4432 i915_cache_sharing_set(void *data, u64 val) 4433 { 4434 struct drm_i915_private *dev_priv = data; 4435 u32 snpcr; 4436 4437 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) 4438 return -ENODEV; 4439 4440 if (val > 3) 4441 return -EINVAL; 4442 4443 intel_runtime_pm_get(dev_priv); 4444 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 4445 4446 /* Update the cache sharing policy here as well */ 4447 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4448 snpcr &= ~GEN6_MBC_SNPCR_MASK; 4449 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 4450 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 4451 4452 intel_runtime_pm_put(dev_priv); 4453 return 0; 4454 } 4455 4456 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 4457 i915_cache_sharing_get, i915_cache_sharing_set, 4458 "%llu\n"); 4459 4460 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv, 4461 struct sseu_dev_info *sseu) 4462 { 4463 int ss_max = 2; 4464 int ss; 4465 u32 sig1[ss_max], sig2[ss_max]; 4466 4467 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); 4468 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); 4469 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); 4470 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); 4471 4472 for (ss = 0; ss < ss_max; ss++) { 4473 unsigned int eu_cnt; 4474 4475 if (sig1[ss] & CHV_SS_PG_ENABLE) 4476 /* skip disabled subslice */ 4477 continue; 4478 4479 sseu->slice_mask = BIT(0); 4480 sseu->subslice_mask |= BIT(ss); 4481 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + 4482 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + 4483 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + 4484 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); 4485 sseu->eu_total += eu_cnt; 4486 sseu->eu_per_subslice = max_t(unsigned int, 4487 sseu->eu_per_subslice, eu_cnt); 4488 } 4489 } 4490 4491 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, 4492 struct sseu_dev_info *sseu) 4493 { 4494 const struct intel_device_info *info = INTEL_INFO(dev_priv); 4495 int s_max = 6, ss_max = 4; 4496 int s, ss; 4497 u32 s_reg[s_max], eu_reg[2 * s_max], eu_mask[2]; 4498 4499 for (s = 0; s < s_max; s++) { 4500 /* 4501 * FIXME: Valid SS Mask respects the spec and read 4502 * only valid bits for those registers, excluding reserverd 4503 * although this seems wrong because it would leave many 4504 * subslices without ACK. 4505 */ 4506 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) & 4507 GEN10_PGCTL_VALID_SS_MASK(s); 4508 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s)); 4509 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s)); 4510 } 4511 4512 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | 4513 GEN9_PGCTL_SSA_EU19_ACK | 4514 GEN9_PGCTL_SSA_EU210_ACK | 4515 GEN9_PGCTL_SSA_EU311_ACK; 4516 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | 4517 GEN9_PGCTL_SSB_EU19_ACK | 4518 GEN9_PGCTL_SSB_EU210_ACK | 4519 GEN9_PGCTL_SSB_EU311_ACK; 4520 4521 for (s = 0; s < s_max; s++) { 4522 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) 4523 /* skip disabled slice */ 4524 continue; 4525 4526 sseu->slice_mask |= BIT(s); 4527 sseu->subslice_mask = info->sseu.subslice_mask; 4528 4529 for (ss = 0; ss < ss_max; ss++) { 4530 unsigned int eu_cnt; 4531 4532 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) 4533 /* skip disabled subslice */ 4534 continue; 4535 4536 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] & 4537 eu_mask[ss % 2]); 4538 sseu->eu_total += eu_cnt; 4539 sseu->eu_per_subslice = max_t(unsigned int, 4540 sseu->eu_per_subslice, 4541 eu_cnt); 4542 } 4543 } 4544 } 4545 4546 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, 4547 struct sseu_dev_info *sseu) 4548 { 4549 int s_max = 3, ss_max = 4; 4550 int s, ss; 4551 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; 4552 4553 /* BXT has a single slice and at most 3 subslices. */ 4554 if (IS_GEN9_LP(dev_priv)) { 4555 s_max = 1; 4556 ss_max = 3; 4557 } 4558 4559 for (s = 0; s < s_max; s++) { 4560 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s)); 4561 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s)); 4562 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s)); 4563 } 4564 4565 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | 4566 GEN9_PGCTL_SSA_EU19_ACK | 4567 GEN9_PGCTL_SSA_EU210_ACK | 4568 GEN9_PGCTL_SSA_EU311_ACK; 4569 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | 4570 GEN9_PGCTL_SSB_EU19_ACK | 4571 GEN9_PGCTL_SSB_EU210_ACK | 4572 GEN9_PGCTL_SSB_EU311_ACK; 4573 4574 for (s = 0; s < s_max; s++) { 4575 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) 4576 /* skip disabled slice */ 4577 continue; 4578 4579 sseu->slice_mask |= BIT(s); 4580 4581 if (IS_GEN9_BC(dev_priv)) 4582 sseu->subslice_mask = 4583 INTEL_INFO(dev_priv)->sseu.subslice_mask; 4584 4585 for (ss = 0; ss < ss_max; ss++) { 4586 unsigned int eu_cnt; 4587 4588 if (IS_GEN9_LP(dev_priv)) { 4589 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) 4590 /* skip disabled subslice */ 4591 continue; 4592 4593 sseu->subslice_mask |= BIT(ss); 4594 } 4595 4596 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & 4597 eu_mask[ss%2]); 4598 sseu->eu_total += eu_cnt; 4599 sseu->eu_per_subslice = max_t(unsigned int, 4600 sseu->eu_per_subslice, 4601 eu_cnt); 4602 } 4603 } 4604 } 4605 4606 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, 4607 struct sseu_dev_info *sseu) 4608 { 4609 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); 4610 int s; 4611 4612 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK; 4613 4614 if (sseu->slice_mask) { 4615 sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask; 4616 sseu->eu_per_subslice = 4617 INTEL_INFO(dev_priv)->sseu.eu_per_subslice; 4618 sseu->eu_total = sseu->eu_per_subslice * 4619 sseu_subslice_total(sseu); 4620 4621 /* subtract fused off EU(s) from enabled slice(s) */ 4622 for (s = 0; s < fls(sseu->slice_mask); s++) { 4623 u8 subslice_7eu = 4624 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s]; 4625 4626 sseu->eu_total -= hweight8(subslice_7eu); 4627 } 4628 } 4629 } 4630 4631 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info, 4632 const struct sseu_dev_info *sseu) 4633 { 4634 struct drm_i915_private *dev_priv = node_to_i915(m->private); 4635 const char *type = is_available_info ? "Available" : "Enabled"; 4636 4637 seq_printf(m, " %s Slice Mask: %04x\n", type, 4638 sseu->slice_mask); 4639 seq_printf(m, " %s Slice Total: %u\n", type, 4640 hweight8(sseu->slice_mask)); 4641 seq_printf(m, " %s Subslice Total: %u\n", type, 4642 sseu_subslice_total(sseu)); 4643 seq_printf(m, " %s Subslice Mask: %04x\n", type, 4644 sseu->subslice_mask); 4645 seq_printf(m, " %s Subslice Per Slice: %u\n", type, 4646 hweight8(sseu->subslice_mask)); 4647 seq_printf(m, " %s EU Total: %u\n", type, 4648 sseu->eu_total); 4649 seq_printf(m, " %s EU Per Subslice: %u\n", type, 4650 sseu->eu_per_subslice); 4651 4652 if (!is_available_info) 4653 return; 4654 4655 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv))); 4656 if (HAS_POOLED_EU(dev_priv)) 4657 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool); 4658 4659 seq_printf(m, " Has Slice Power Gating: %s\n", 4660 yesno(sseu->has_slice_pg)); 4661 seq_printf(m, " Has Subslice Power Gating: %s\n", 4662 yesno(sseu->has_subslice_pg)); 4663 seq_printf(m, " Has EU Power Gating: %s\n", 4664 yesno(sseu->has_eu_pg)); 4665 } 4666 4667 static int i915_sseu_status(struct seq_file *m, void *unused) 4668 { 4669 struct drm_i915_private *dev_priv = node_to_i915(m->private); 4670 struct sseu_dev_info sseu; 4671 4672 if (INTEL_GEN(dev_priv) < 8) 4673 return -ENODEV; 4674 4675 seq_puts(m, "SSEU Device Info\n"); 4676 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu); 4677 4678 seq_puts(m, "SSEU Device Status\n"); 4679 memset(&sseu, 0, sizeof(sseu)); 4680 4681 intel_runtime_pm_get(dev_priv); 4682 4683 if (IS_CHERRYVIEW(dev_priv)) { 4684 cherryview_sseu_device_status(dev_priv, &sseu); 4685 } else if (IS_BROADWELL(dev_priv)) { 4686 broadwell_sseu_device_status(dev_priv, &sseu); 4687 } else if (IS_GEN9(dev_priv)) { 4688 gen9_sseu_device_status(dev_priv, &sseu); 4689 } else if (INTEL_GEN(dev_priv) >= 10) { 4690 gen10_sseu_device_status(dev_priv, &sseu); 4691 } 4692 4693 intel_runtime_pm_put(dev_priv); 4694 4695 i915_print_sseu_info(m, false, &sseu); 4696 4697 return 0; 4698 } 4699 4700 static int i915_forcewake_open(struct inode *inode, struct file *file) 4701 { 4702 struct drm_i915_private *i915 = inode->i_private; 4703 4704 if (INTEL_GEN(i915) < 6) 4705 return 0; 4706 4707 intel_runtime_pm_get(i915); 4708 intel_uncore_forcewake_user_get(i915); 4709 4710 return 0; 4711 } 4712 4713 static int i915_forcewake_release(struct inode *inode, struct file *file) 4714 { 4715 struct drm_i915_private *i915 = inode->i_private; 4716 4717 if (INTEL_GEN(i915) < 6) 4718 return 0; 4719 4720 intel_uncore_forcewake_user_put(i915); 4721 intel_runtime_pm_put(i915); 4722 4723 return 0; 4724 } 4725 4726 static const struct file_operations i915_forcewake_fops = { 4727 .owner = THIS_MODULE, 4728 .open = i915_forcewake_open, 4729 .release = i915_forcewake_release, 4730 }; 4731 4732 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 4733 { 4734 struct drm_i915_private *dev_priv = m->private; 4735 struct i915_hotplug *hotplug = &dev_priv->hotplug; 4736 4737 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 4738 seq_printf(m, "Detected: %s\n", 4739 yesno(delayed_work_pending(&hotplug->reenable_work))); 4740 4741 return 0; 4742 } 4743 4744 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 4745 const char __user *ubuf, size_t len, 4746 loff_t *offp) 4747 { 4748 struct seq_file *m = file->private_data; 4749 struct drm_i915_private *dev_priv = m->private; 4750 struct i915_hotplug *hotplug = &dev_priv->hotplug; 4751 unsigned int new_threshold; 4752 int i; 4753 char *newline; 4754 char tmp[16]; 4755 4756 if (len >= sizeof(tmp)) 4757 return -EINVAL; 4758 4759 if (copy_from_user(tmp, ubuf, len)) 4760 return -EFAULT; 4761 4762 tmp[len] = '\0'; 4763 4764 /* Strip newline, if any */ 4765 newline = strchr(tmp, '\n'); 4766 if (newline) 4767 *newline = '\0'; 4768 4769 if (strcmp(tmp, "reset") == 0) 4770 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4771 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 4772 return -EINVAL; 4773 4774 if (new_threshold > 0) 4775 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n", 4776 new_threshold); 4777 else 4778 DRM_DEBUG_KMS("Disabling HPD storm detection\n"); 4779 4780 spin_lock_irq(&dev_priv->irq_lock); 4781 hotplug->hpd_storm_threshold = new_threshold; 4782 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 4783 for_each_hpd_pin(i) 4784 hotplug->stats[i].count = 0; 4785 spin_unlock_irq(&dev_priv->irq_lock); 4786 4787 /* Re-enable hpd immediately if we were in an irq storm */ 4788 flush_delayed_work(&dev_priv->hotplug.reenable_work); 4789 4790 return len; 4791 } 4792 4793 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 4794 { 4795 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 4796 } 4797 4798 static const struct file_operations i915_hpd_storm_ctl_fops = { 4799 .owner = THIS_MODULE, 4800 .open = i915_hpd_storm_ctl_open, 4801 .read = seq_read, 4802 .llseek = seq_lseek, 4803 .release = single_release, 4804 .write = i915_hpd_storm_ctl_write 4805 }; 4806 4807 static const struct drm_info_list i915_debugfs_list[] = { 4808 {"i915_capabilities", i915_capabilities, 0}, 4809 {"i915_gem_objects", i915_gem_object_info, 0}, 4810 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 4811 {"i915_gem_stolen", i915_gem_stolen_list_info }, 4812 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 4813 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 4814 {"i915_gem_interrupt", i915_interrupt_info, 0}, 4815 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, 4816 {"i915_guc_info", i915_guc_info, 0}, 4817 {"i915_guc_load_status", i915_guc_load_status_info, 0}, 4818 {"i915_guc_log_dump", i915_guc_log_dump, 0}, 4819 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1}, 4820 {"i915_guc_stage_pool", i915_guc_stage_pool, 0}, 4821 {"i915_huc_load_status", i915_huc_load_status_info, 0}, 4822 {"i915_frequency_info", i915_frequency_info, 0}, 4823 {"i915_hangcheck_info", i915_hangcheck_info, 0}, 4824 {"i915_reset_info", i915_reset_info, 0}, 4825 {"i915_drpc_info", i915_drpc_info, 0}, 4826 {"i915_emon_status", i915_emon_status, 0}, 4827 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 4828 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 4829 {"i915_fbc_status", i915_fbc_status, 0}, 4830 {"i915_ips_status", i915_ips_status, 0}, 4831 {"i915_sr_status", i915_sr_status, 0}, 4832 {"i915_opregion", i915_opregion, 0}, 4833 {"i915_vbt", i915_vbt, 0}, 4834 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 4835 {"i915_context_status", i915_context_status, 0}, 4836 {"i915_dump_lrc", i915_dump_lrc, 0}, 4837 {"i915_forcewake_domains", i915_forcewake_domains, 0}, 4838 {"i915_swizzle_info", i915_swizzle_info, 0}, 4839 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 4840 {"i915_llc", i915_llc, 0}, 4841 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 4842 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 4843 {"i915_energy_uJ", i915_energy_uJ, 0}, 4844 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 4845 {"i915_power_domain_info", i915_power_domain_info, 0}, 4846 {"i915_dmc_info", i915_dmc_info, 0}, 4847 {"i915_display_info", i915_display_info, 0}, 4848 {"i915_engine_info", i915_engine_info, 0}, 4849 {"i915_shrinker_info", i915_shrinker_info, 0}, 4850 {"i915_semaphore_status", i915_semaphore_status, 0}, 4851 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 4852 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 4853 {"i915_wa_registers", i915_wa_registers, 0}, 4854 {"i915_ddb_info", i915_ddb_info, 0}, 4855 {"i915_sseu_status", i915_sseu_status, 0}, 4856 {"i915_drrs_status", i915_drrs_status, 0}, 4857 {"i915_rps_boost_info", i915_rps_boost_info, 0}, 4858 }; 4859 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 4860 4861 static const struct i915_debugfs_files { 4862 const char *name; 4863 const struct file_operations *fops; 4864 } i915_debugfs_files[] = { 4865 {"i915_wedged", &i915_wedged_fops}, 4866 {"i915_max_freq", &i915_max_freq_fops}, 4867 {"i915_min_freq", &i915_min_freq_fops}, 4868 {"i915_cache_sharing", &i915_cache_sharing_fops}, 4869 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 4870 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 4871 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 4872 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 4873 {"i915_error_state", &i915_error_state_fops}, 4874 {"i915_gpu_info", &i915_gpu_info_fops}, 4875 #endif 4876 {"i915_next_seqno", &i915_next_seqno_fops}, 4877 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 4878 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 4879 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 4880 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 4881 {"i915_fbc_false_color", &i915_fbc_false_color_fops}, 4882 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 4883 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 4884 {"i915_dp_test_active", &i915_displayport_test_active_fops}, 4885 {"i915_guc_log_control", &i915_guc_log_control_fops}, 4886 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, 4887 {"i915_ipc_status", &i915_ipc_status_fops} 4888 }; 4889 4890 int i915_debugfs_register(struct drm_i915_private *dev_priv) 4891 { 4892 struct drm_minor *minor = dev_priv->drm.primary; 4893 struct dentry *ent; 4894 int ret, i; 4895 4896 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR, 4897 minor->debugfs_root, to_i915(minor->dev), 4898 &i915_forcewake_fops); 4899 if (!ent) 4900 return -ENOMEM; 4901 4902 ret = intel_pipe_crc_create(minor); 4903 if (ret) 4904 return ret; 4905 4906 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 4907 ent = debugfs_create_file(i915_debugfs_files[i].name, 4908 S_IRUGO | S_IWUSR, 4909 minor->debugfs_root, 4910 to_i915(minor->dev), 4911 i915_debugfs_files[i].fops); 4912 if (!ent) 4913 return -ENOMEM; 4914 } 4915 4916 return drm_debugfs_create_files(i915_debugfs_list, 4917 I915_DEBUGFS_ENTRIES, 4918 minor->debugfs_root, minor); 4919 } 4920 4921 struct dpcd_block { 4922 /* DPCD dump start address. */ 4923 unsigned int offset; 4924 /* DPCD dump end address, inclusive. If unset, .size will be used. */ 4925 unsigned int end; 4926 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ 4927 size_t size; 4928 /* Only valid for eDP. */ 4929 bool edp; 4930 }; 4931 4932 static const struct dpcd_block i915_dpcd_debug[] = { 4933 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, 4934 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, 4935 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, 4936 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, 4937 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, 4938 { .offset = DP_SET_POWER }, 4939 { .offset = DP_EDP_DPCD_REV }, 4940 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, 4941 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, 4942 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, 4943 }; 4944 4945 static int i915_dpcd_show(struct seq_file *m, void *data) 4946 { 4947 struct drm_connector *connector = m->private; 4948 struct intel_dp *intel_dp = 4949 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 4950 uint8_t buf[16]; 4951 ssize_t err; 4952 int i; 4953 4954 if (connector->status != connector_status_connected) 4955 return -ENODEV; 4956 4957 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) { 4958 const struct dpcd_block *b = &i915_dpcd_debug[i]; 4959 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); 4960 4961 if (b->edp && 4962 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 4963 continue; 4964 4965 /* low tech for now */ 4966 if (WARN_ON(size > sizeof(buf))) 4967 continue; 4968 4969 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); 4970 if (err <= 0) { 4971 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", 4972 size, b->offset, err); 4973 continue; 4974 } 4975 4976 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); 4977 } 4978 4979 return 0; 4980 } 4981 4982 static int i915_dpcd_open(struct inode *inode, struct file *file) 4983 { 4984 return single_open(file, i915_dpcd_show, inode->i_private); 4985 } 4986 4987 static const struct file_operations i915_dpcd_fops = { 4988 .owner = THIS_MODULE, 4989 .open = i915_dpcd_open, 4990 .read = seq_read, 4991 .llseek = seq_lseek, 4992 .release = single_release, 4993 }; 4994 4995 static int i915_panel_show(struct seq_file *m, void *data) 4996 { 4997 struct drm_connector *connector = m->private; 4998 struct intel_dp *intel_dp = 4999 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 5000 5001 if (connector->status != connector_status_connected) 5002 return -ENODEV; 5003 5004 seq_printf(m, "Panel power up delay: %d\n", 5005 intel_dp->panel_power_up_delay); 5006 seq_printf(m, "Panel power down delay: %d\n", 5007 intel_dp->panel_power_down_delay); 5008 seq_printf(m, "Backlight on delay: %d\n", 5009 intel_dp->backlight_on_delay); 5010 seq_printf(m, "Backlight off delay: %d\n", 5011 intel_dp->backlight_off_delay); 5012 5013 return 0; 5014 } 5015 5016 static int i915_panel_open(struct inode *inode, struct file *file) 5017 { 5018 return single_open(file, i915_panel_show, inode->i_private); 5019 } 5020 5021 static const struct file_operations i915_panel_fops = { 5022 .owner = THIS_MODULE, 5023 .open = i915_panel_open, 5024 .read = seq_read, 5025 .llseek = seq_lseek, 5026 .release = single_release, 5027 }; 5028 5029 /** 5030 * i915_debugfs_connector_add - add i915 specific connector debugfs files 5031 * @connector: pointer to a registered drm_connector 5032 * 5033 * Cleanup will be done by drm_connector_unregister() through a call to 5034 * drm_debugfs_connector_remove(). 5035 * 5036 * Returns 0 on success, negative error codes on error. 5037 */ 5038 int i915_debugfs_connector_add(struct drm_connector *connector) 5039 { 5040 struct dentry *root = connector->debugfs_entry; 5041 5042 /* The connector must have been registered beforehands. */ 5043 if (!root) 5044 return -ENODEV; 5045 5046 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 5047 connector->connector_type == DRM_MODE_CONNECTOR_eDP) 5048 debugfs_create_file("i915_dpcd", S_IRUGO, root, 5049 connector, &i915_dpcd_fops); 5050 5051 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 5052 debugfs_create_file("i915_panel_timings", S_IRUGO, root, 5053 connector, &i915_panel_fops); 5054 5055 return 0; 5056 } 5057