1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/circ_buf.h> 31 #include <linux/ctype.h> 32 #include <linux/debugfs.h> 33 #include <linux/slab.h> 34 #include <linux/export.h> 35 #include <linux/list_sort.h> 36 #include <asm/msr-index.h> 37 #include <drm/drmP.h> 38 #include "intel_drv.h" 39 #include "intel_ringbuffer.h" 40 #include <drm/i915_drm.h> 41 #include "i915_drv.h" 42 43 enum { 44 ACTIVE_LIST, 45 INACTIVE_LIST, 46 PINNED_LIST, 47 }; 48 49 /* As the drm_debugfs_init() routines are called before dev->dev_private is 50 * allocated we need to hook into the minor for release. */ 51 static int 52 drm_add_fake_info_node(struct drm_minor *minor, 53 struct dentry *ent, 54 const void *key) 55 { 56 struct drm_info_node *node; 57 58 node = kmalloc(sizeof(*node), GFP_KERNEL); 59 if (node == NULL) { 60 debugfs_remove(ent); 61 return -ENOMEM; 62 } 63 64 node->minor = minor; 65 node->dent = ent; 66 node->info_ent = (void *) key; 67 68 mutex_lock(&minor->debugfs_lock); 69 list_add(&node->list, &minor->debugfs_list); 70 mutex_unlock(&minor->debugfs_lock); 71 72 return 0; 73 } 74 75 static int i915_capabilities(struct seq_file *m, void *data) 76 { 77 struct drm_info_node *node = m->private; 78 struct drm_device *dev = node->minor->dev; 79 const struct intel_device_info *info = INTEL_INFO(dev); 80 81 seq_printf(m, "gen: %d\n", info->gen); 82 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 83 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 84 #define SEP_SEMICOLON ; 85 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON); 86 #undef PRINT_FLAG 87 #undef SEP_SEMICOLON 88 89 return 0; 90 } 91 92 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 93 { 94 if (obj->pin_display) 95 return "p"; 96 else 97 return " "; 98 } 99 100 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 101 { 102 switch (obj->tiling_mode) { 103 default: 104 case I915_TILING_NONE: return " "; 105 case I915_TILING_X: return "X"; 106 case I915_TILING_Y: return "Y"; 107 } 108 } 109 110 static inline const char *get_global_flag(struct drm_i915_gem_object *obj) 111 { 112 return i915_gem_obj_to_ggtt(obj) ? "g" : " "; 113 } 114 115 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) 116 { 117 u64 size = 0; 118 struct i915_vma *vma; 119 120 list_for_each_entry(vma, &obj->vma_list, vma_link) { 121 if (i915_is_ggtt(vma->vm) && 122 drm_mm_node_allocated(&vma->node)) 123 size += vma->node.size; 124 } 125 126 return size; 127 } 128 129 static void 130 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 131 { 132 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 133 struct intel_engine_cs *ring; 134 struct i915_vma *vma; 135 int pin_count = 0; 136 int i; 137 138 seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ", 139 &obj->base, 140 obj->active ? "*" : " ", 141 get_pin_flag(obj), 142 get_tiling_flag(obj), 143 get_global_flag(obj), 144 obj->base.size / 1024, 145 obj->base.read_domains, 146 obj->base.write_domain); 147 for_each_ring(ring, dev_priv, i) 148 seq_printf(m, "%x ", 149 i915_gem_request_get_seqno(obj->last_read_req[i])); 150 seq_printf(m, "] %x %x%s%s%s", 151 i915_gem_request_get_seqno(obj->last_write_req), 152 i915_gem_request_get_seqno(obj->last_fenced_req), 153 i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level), 154 obj->dirty ? " dirty" : "", 155 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 156 if (obj->base.name) 157 seq_printf(m, " (name: %d)", obj->base.name); 158 list_for_each_entry(vma, &obj->vma_list, vma_link) { 159 if (vma->pin_count > 0) 160 pin_count++; 161 } 162 seq_printf(m, " (pinned x %d)", pin_count); 163 if (obj->pin_display) 164 seq_printf(m, " (display)"); 165 if (obj->fence_reg != I915_FENCE_REG_NONE) 166 seq_printf(m, " (fence: %d)", obj->fence_reg); 167 list_for_each_entry(vma, &obj->vma_list, vma_link) { 168 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", 169 i915_is_ggtt(vma->vm) ? "g" : "pp", 170 vma->node.start, vma->node.size); 171 if (i915_is_ggtt(vma->vm)) 172 seq_printf(m, ", type: %u)", vma->ggtt_view.type); 173 else 174 seq_puts(m, ")"); 175 } 176 if (obj->stolen) 177 seq_printf(m, " (stolen: %08llx)", obj->stolen->start); 178 if (obj->pin_display || obj->fault_mappable) { 179 char s[3], *t = s; 180 if (obj->pin_display) 181 *t++ = 'p'; 182 if (obj->fault_mappable) 183 *t++ = 'f'; 184 *t = '\0'; 185 seq_printf(m, " (%s mappable)", s); 186 } 187 if (obj->last_write_req != NULL) 188 seq_printf(m, " (%s)", 189 i915_gem_request_get_ring(obj->last_write_req)->name); 190 if (obj->frontbuffer_bits) 191 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); 192 } 193 194 static void describe_ctx(struct seq_file *m, struct intel_context *ctx) 195 { 196 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i'); 197 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 198 seq_putc(m, ' '); 199 } 200 201 static int i915_gem_object_list_info(struct seq_file *m, void *data) 202 { 203 struct drm_info_node *node = m->private; 204 uintptr_t list = (uintptr_t) node->info_ent->data; 205 struct list_head *head; 206 struct drm_device *dev = node->minor->dev; 207 struct drm_i915_private *dev_priv = dev->dev_private; 208 struct i915_address_space *vm = &dev_priv->gtt.base; 209 struct i915_vma *vma; 210 u64 total_obj_size, total_gtt_size; 211 int count, ret; 212 213 ret = mutex_lock_interruptible(&dev->struct_mutex); 214 if (ret) 215 return ret; 216 217 /* FIXME: the user of this interface might want more than just GGTT */ 218 switch (list) { 219 case ACTIVE_LIST: 220 seq_puts(m, "Active:\n"); 221 head = &vm->active_list; 222 break; 223 case INACTIVE_LIST: 224 seq_puts(m, "Inactive:\n"); 225 head = &vm->inactive_list; 226 break; 227 default: 228 mutex_unlock(&dev->struct_mutex); 229 return -EINVAL; 230 } 231 232 total_obj_size = total_gtt_size = count = 0; 233 list_for_each_entry(vma, head, mm_list) { 234 seq_printf(m, " "); 235 describe_obj(m, vma->obj); 236 seq_printf(m, "\n"); 237 total_obj_size += vma->obj->base.size; 238 total_gtt_size += vma->node.size; 239 count++; 240 } 241 mutex_unlock(&dev->struct_mutex); 242 243 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 244 count, total_obj_size, total_gtt_size); 245 return 0; 246 } 247 248 static int obj_rank_by_stolen(void *priv, 249 struct list_head *A, struct list_head *B) 250 { 251 struct drm_i915_gem_object *a = 252 container_of(A, struct drm_i915_gem_object, obj_exec_link); 253 struct drm_i915_gem_object *b = 254 container_of(B, struct drm_i915_gem_object, obj_exec_link); 255 256 if (a->stolen->start < b->stolen->start) 257 return -1; 258 if (a->stolen->start > b->stolen->start) 259 return 1; 260 return 0; 261 } 262 263 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 264 { 265 struct drm_info_node *node = m->private; 266 struct drm_device *dev = node->minor->dev; 267 struct drm_i915_private *dev_priv = dev->dev_private; 268 struct drm_i915_gem_object *obj; 269 u64 total_obj_size, total_gtt_size; 270 LIST_HEAD(stolen); 271 int count, ret; 272 273 ret = mutex_lock_interruptible(&dev->struct_mutex); 274 if (ret) 275 return ret; 276 277 total_obj_size = total_gtt_size = count = 0; 278 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 279 if (obj->stolen == NULL) 280 continue; 281 282 list_add(&obj->obj_exec_link, &stolen); 283 284 total_obj_size += obj->base.size; 285 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 286 count++; 287 } 288 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 289 if (obj->stolen == NULL) 290 continue; 291 292 list_add(&obj->obj_exec_link, &stolen); 293 294 total_obj_size += obj->base.size; 295 count++; 296 } 297 list_sort(NULL, &stolen, obj_rank_by_stolen); 298 seq_puts(m, "Stolen:\n"); 299 while (!list_empty(&stolen)) { 300 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link); 301 seq_puts(m, " "); 302 describe_obj(m, obj); 303 seq_putc(m, '\n'); 304 list_del_init(&obj->obj_exec_link); 305 } 306 mutex_unlock(&dev->struct_mutex); 307 308 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 309 count, total_obj_size, total_gtt_size); 310 return 0; 311 } 312 313 #define count_objects(list, member) do { \ 314 list_for_each_entry(obj, list, member) { \ 315 size += i915_gem_obj_total_ggtt_size(obj); \ 316 ++count; \ 317 if (obj->map_and_fenceable) { \ 318 mappable_size += i915_gem_obj_ggtt_size(obj); \ 319 ++mappable_count; \ 320 } \ 321 } \ 322 } while (0) 323 324 struct file_stats { 325 struct drm_i915_file_private *file_priv; 326 unsigned long count; 327 u64 total, unbound; 328 u64 global, shared; 329 u64 active, inactive; 330 }; 331 332 static int per_file_stats(int id, void *ptr, void *data) 333 { 334 struct drm_i915_gem_object *obj = ptr; 335 struct file_stats *stats = data; 336 struct i915_vma *vma; 337 338 stats->count++; 339 stats->total += obj->base.size; 340 341 if (obj->base.name || obj->base.dma_buf) 342 stats->shared += obj->base.size; 343 344 if (USES_FULL_PPGTT(obj->base.dev)) { 345 list_for_each_entry(vma, &obj->vma_list, vma_link) { 346 struct i915_hw_ppgtt *ppgtt; 347 348 if (!drm_mm_node_allocated(&vma->node)) 349 continue; 350 351 if (i915_is_ggtt(vma->vm)) { 352 stats->global += obj->base.size; 353 continue; 354 } 355 356 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); 357 if (ppgtt->file_priv != stats->file_priv) 358 continue; 359 360 if (obj->active) /* XXX per-vma statistic */ 361 stats->active += obj->base.size; 362 else 363 stats->inactive += obj->base.size; 364 365 return 0; 366 } 367 } else { 368 if (i915_gem_obj_ggtt_bound(obj)) { 369 stats->global += obj->base.size; 370 if (obj->active) 371 stats->active += obj->base.size; 372 else 373 stats->inactive += obj->base.size; 374 return 0; 375 } 376 } 377 378 if (!list_empty(&obj->global_list)) 379 stats->unbound += obj->base.size; 380 381 return 0; 382 } 383 384 #define print_file_stats(m, name, stats) do { \ 385 if (stats.count) \ 386 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \ 387 name, \ 388 stats.count, \ 389 stats.total, \ 390 stats.active, \ 391 stats.inactive, \ 392 stats.global, \ 393 stats.shared, \ 394 stats.unbound); \ 395 } while (0) 396 397 static void print_batch_pool_stats(struct seq_file *m, 398 struct drm_i915_private *dev_priv) 399 { 400 struct drm_i915_gem_object *obj; 401 struct file_stats stats; 402 struct intel_engine_cs *ring; 403 int i, j; 404 405 memset(&stats, 0, sizeof(stats)); 406 407 for_each_ring(ring, dev_priv, i) { 408 for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) { 409 list_for_each_entry(obj, 410 &ring->batch_pool.cache_list[j], 411 batch_pool_link) 412 per_file_stats(0, obj, &stats); 413 } 414 } 415 416 print_file_stats(m, "[k]batch pool", stats); 417 } 418 419 #define count_vmas(list, member) do { \ 420 list_for_each_entry(vma, list, member) { \ 421 size += i915_gem_obj_total_ggtt_size(vma->obj); \ 422 ++count; \ 423 if (vma->obj->map_and_fenceable) { \ 424 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ 425 ++mappable_count; \ 426 } \ 427 } \ 428 } while (0) 429 430 static int i915_gem_object_info(struct seq_file *m, void* data) 431 { 432 struct drm_info_node *node = m->private; 433 struct drm_device *dev = node->minor->dev; 434 struct drm_i915_private *dev_priv = dev->dev_private; 435 u32 count, mappable_count, purgeable_count; 436 u64 size, mappable_size, purgeable_size; 437 struct drm_i915_gem_object *obj; 438 struct i915_address_space *vm = &dev_priv->gtt.base; 439 struct drm_file *file; 440 struct i915_vma *vma; 441 int ret; 442 443 ret = mutex_lock_interruptible(&dev->struct_mutex); 444 if (ret) 445 return ret; 446 447 seq_printf(m, "%u objects, %zu bytes\n", 448 dev_priv->mm.object_count, 449 dev_priv->mm.object_memory); 450 451 size = count = mappable_size = mappable_count = 0; 452 count_objects(&dev_priv->mm.bound_list, global_list); 453 seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n", 454 count, mappable_count, size, mappable_size); 455 456 size = count = mappable_size = mappable_count = 0; 457 count_vmas(&vm->active_list, mm_list); 458 seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n", 459 count, mappable_count, size, mappable_size); 460 461 size = count = mappable_size = mappable_count = 0; 462 count_vmas(&vm->inactive_list, mm_list); 463 seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n", 464 count, mappable_count, size, mappable_size); 465 466 size = count = purgeable_size = purgeable_count = 0; 467 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 468 size += obj->base.size, ++count; 469 if (obj->madv == I915_MADV_DONTNEED) 470 purgeable_size += obj->base.size, ++purgeable_count; 471 } 472 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); 473 474 size = count = mappable_size = mappable_count = 0; 475 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 476 if (obj->fault_mappable) { 477 size += i915_gem_obj_ggtt_size(obj); 478 ++count; 479 } 480 if (obj->pin_display) { 481 mappable_size += i915_gem_obj_ggtt_size(obj); 482 ++mappable_count; 483 } 484 if (obj->madv == I915_MADV_DONTNEED) { 485 purgeable_size += obj->base.size; 486 ++purgeable_count; 487 } 488 } 489 seq_printf(m, "%u purgeable objects, %llu bytes\n", 490 purgeable_count, purgeable_size); 491 seq_printf(m, "%u pinned mappable objects, %llu bytes\n", 492 mappable_count, mappable_size); 493 seq_printf(m, "%u fault mappable objects, %llu bytes\n", 494 count, size); 495 496 seq_printf(m, "%llu [%llu] gtt total\n", 497 dev_priv->gtt.base.total, 498 (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); 499 500 seq_putc(m, '\n'); 501 print_batch_pool_stats(m, dev_priv); 502 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 503 struct file_stats stats; 504 struct task_struct *task; 505 506 memset(&stats, 0, sizeof(stats)); 507 stats.file_priv = file->driver_priv; 508 spin_lock(&file->table_lock); 509 idr_for_each(&file->object_idr, per_file_stats, &stats); 510 spin_unlock(&file->table_lock); 511 /* 512 * Although we have a valid reference on file->pid, that does 513 * not guarantee that the task_struct who called get_pid() is 514 * still alive (e.g. get_pid(current) => fork() => exit()). 515 * Therefore, we need to protect this ->comm access using RCU. 516 */ 517 rcu_read_lock(); 518 task = pid_task(file->pid, PIDTYPE_PID); 519 print_file_stats(m, task ? task->comm : "<unknown>", stats); 520 rcu_read_unlock(); 521 } 522 523 mutex_unlock(&dev->struct_mutex); 524 525 return 0; 526 } 527 528 static int i915_gem_gtt_info(struct seq_file *m, void *data) 529 { 530 struct drm_info_node *node = m->private; 531 struct drm_device *dev = node->minor->dev; 532 uintptr_t list = (uintptr_t) node->info_ent->data; 533 struct drm_i915_private *dev_priv = dev->dev_private; 534 struct drm_i915_gem_object *obj; 535 u64 total_obj_size, total_gtt_size; 536 int count, ret; 537 538 ret = mutex_lock_interruptible(&dev->struct_mutex); 539 if (ret) 540 return ret; 541 542 total_obj_size = total_gtt_size = count = 0; 543 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 544 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj)) 545 continue; 546 547 seq_puts(m, " "); 548 describe_obj(m, obj); 549 seq_putc(m, '\n'); 550 total_obj_size += obj->base.size; 551 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 552 count++; 553 } 554 555 mutex_unlock(&dev->struct_mutex); 556 557 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 558 count, total_obj_size, total_gtt_size); 559 560 return 0; 561 } 562 563 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 564 { 565 struct drm_info_node *node = m->private; 566 struct drm_device *dev = node->minor->dev; 567 struct drm_i915_private *dev_priv = dev->dev_private; 568 struct intel_crtc *crtc; 569 int ret; 570 571 ret = mutex_lock_interruptible(&dev->struct_mutex); 572 if (ret) 573 return ret; 574 575 for_each_intel_crtc(dev, crtc) { 576 const char pipe = pipe_name(crtc->pipe); 577 const char plane = plane_name(crtc->plane); 578 struct intel_unpin_work *work; 579 580 spin_lock_irq(&dev->event_lock); 581 work = crtc->unpin_work; 582 if (work == NULL) { 583 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 584 pipe, plane); 585 } else { 586 u32 addr; 587 588 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 589 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 590 pipe, plane); 591 } else { 592 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 593 pipe, plane); 594 } 595 if (work->flip_queued_req) { 596 struct intel_engine_cs *ring = 597 i915_gem_request_get_ring(work->flip_queued_req); 598 599 seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n", 600 ring->name, 601 i915_gem_request_get_seqno(work->flip_queued_req), 602 dev_priv->next_seqno, 603 ring->get_seqno(ring, true), 604 i915_gem_request_completed(work->flip_queued_req, true)); 605 } else 606 seq_printf(m, "Flip not associated with any ring\n"); 607 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", 608 work->flip_queued_vblank, 609 work->flip_ready_vblank, 610 drm_crtc_vblank_count(&crtc->base)); 611 if (work->enable_stall_check) 612 seq_puts(m, "Stall check enabled, "); 613 else 614 seq_puts(m, "Stall check waiting for page flip ioctl, "); 615 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 616 617 if (INTEL_INFO(dev)->gen >= 4) 618 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane))); 619 else 620 addr = I915_READ(DSPADDR(crtc->plane)); 621 seq_printf(m, "Current scanout address 0x%08x\n", addr); 622 623 if (work->pending_flip_obj) { 624 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset); 625 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset); 626 } 627 } 628 spin_unlock_irq(&dev->event_lock); 629 } 630 631 mutex_unlock(&dev->struct_mutex); 632 633 return 0; 634 } 635 636 static int i915_gem_batch_pool_info(struct seq_file *m, void *data) 637 { 638 struct drm_info_node *node = m->private; 639 struct drm_device *dev = node->minor->dev; 640 struct drm_i915_private *dev_priv = dev->dev_private; 641 struct drm_i915_gem_object *obj; 642 struct intel_engine_cs *ring; 643 int total = 0; 644 int ret, i, j; 645 646 ret = mutex_lock_interruptible(&dev->struct_mutex); 647 if (ret) 648 return ret; 649 650 for_each_ring(ring, dev_priv, i) { 651 for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) { 652 int count; 653 654 count = 0; 655 list_for_each_entry(obj, 656 &ring->batch_pool.cache_list[j], 657 batch_pool_link) 658 count++; 659 seq_printf(m, "%s cache[%d]: %d objects\n", 660 ring->name, j, count); 661 662 list_for_each_entry(obj, 663 &ring->batch_pool.cache_list[j], 664 batch_pool_link) { 665 seq_puts(m, " "); 666 describe_obj(m, obj); 667 seq_putc(m, '\n'); 668 } 669 670 total += count; 671 } 672 } 673 674 seq_printf(m, "total: %d\n", total); 675 676 mutex_unlock(&dev->struct_mutex); 677 678 return 0; 679 } 680 681 static int i915_gem_request_info(struct seq_file *m, void *data) 682 { 683 struct drm_info_node *node = m->private; 684 struct drm_device *dev = node->minor->dev; 685 struct drm_i915_private *dev_priv = dev->dev_private; 686 struct intel_engine_cs *ring; 687 struct drm_i915_gem_request *req; 688 int ret, any, i; 689 690 ret = mutex_lock_interruptible(&dev->struct_mutex); 691 if (ret) 692 return ret; 693 694 any = 0; 695 for_each_ring(ring, dev_priv, i) { 696 int count; 697 698 count = 0; 699 list_for_each_entry(req, &ring->request_list, list) 700 count++; 701 if (count == 0) 702 continue; 703 704 seq_printf(m, "%s requests: %d\n", ring->name, count); 705 list_for_each_entry(req, &ring->request_list, list) { 706 struct task_struct *task; 707 708 rcu_read_lock(); 709 task = NULL; 710 if (req->pid) 711 task = pid_task(req->pid, PIDTYPE_PID); 712 seq_printf(m, " %x @ %d: %s [%d]\n", 713 req->seqno, 714 (int) (jiffies - req->emitted_jiffies), 715 task ? task->comm : "<unknown>", 716 task ? task->pid : -1); 717 rcu_read_unlock(); 718 } 719 720 any++; 721 } 722 mutex_unlock(&dev->struct_mutex); 723 724 if (any == 0) 725 seq_puts(m, "No requests\n"); 726 727 return 0; 728 } 729 730 static void i915_ring_seqno_info(struct seq_file *m, 731 struct intel_engine_cs *ring) 732 { 733 if (ring->get_seqno) { 734 seq_printf(m, "Current sequence (%s): %x\n", 735 ring->name, ring->get_seqno(ring, false)); 736 } 737 } 738 739 static int i915_gem_seqno_info(struct seq_file *m, void *data) 740 { 741 struct drm_info_node *node = m->private; 742 struct drm_device *dev = node->minor->dev; 743 struct drm_i915_private *dev_priv = dev->dev_private; 744 struct intel_engine_cs *ring; 745 int ret, i; 746 747 ret = mutex_lock_interruptible(&dev->struct_mutex); 748 if (ret) 749 return ret; 750 intel_runtime_pm_get(dev_priv); 751 752 for_each_ring(ring, dev_priv, i) 753 i915_ring_seqno_info(m, ring); 754 755 intel_runtime_pm_put(dev_priv); 756 mutex_unlock(&dev->struct_mutex); 757 758 return 0; 759 } 760 761 762 static int i915_interrupt_info(struct seq_file *m, void *data) 763 { 764 struct drm_info_node *node = m->private; 765 struct drm_device *dev = node->minor->dev; 766 struct drm_i915_private *dev_priv = dev->dev_private; 767 struct intel_engine_cs *ring; 768 int ret, i, pipe; 769 770 ret = mutex_lock_interruptible(&dev->struct_mutex); 771 if (ret) 772 return ret; 773 intel_runtime_pm_get(dev_priv); 774 775 if (IS_CHERRYVIEW(dev)) { 776 seq_printf(m, "Master Interrupt Control:\t%08x\n", 777 I915_READ(GEN8_MASTER_IRQ)); 778 779 seq_printf(m, "Display IER:\t%08x\n", 780 I915_READ(VLV_IER)); 781 seq_printf(m, "Display IIR:\t%08x\n", 782 I915_READ(VLV_IIR)); 783 seq_printf(m, "Display IIR_RW:\t%08x\n", 784 I915_READ(VLV_IIR_RW)); 785 seq_printf(m, "Display IMR:\t%08x\n", 786 I915_READ(VLV_IMR)); 787 for_each_pipe(dev_priv, pipe) 788 seq_printf(m, "Pipe %c stat:\t%08x\n", 789 pipe_name(pipe), 790 I915_READ(PIPESTAT(pipe))); 791 792 seq_printf(m, "Port hotplug:\t%08x\n", 793 I915_READ(PORT_HOTPLUG_EN)); 794 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 795 I915_READ(VLV_DPFLIPSTAT)); 796 seq_printf(m, "DPINVGTT:\t%08x\n", 797 I915_READ(DPINVGTT)); 798 799 for (i = 0; i < 4; i++) { 800 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 801 i, I915_READ(GEN8_GT_IMR(i))); 802 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 803 i, I915_READ(GEN8_GT_IIR(i))); 804 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 805 i, I915_READ(GEN8_GT_IER(i))); 806 } 807 808 seq_printf(m, "PCU interrupt mask:\t%08x\n", 809 I915_READ(GEN8_PCU_IMR)); 810 seq_printf(m, "PCU interrupt identity:\t%08x\n", 811 I915_READ(GEN8_PCU_IIR)); 812 seq_printf(m, "PCU interrupt enable:\t%08x\n", 813 I915_READ(GEN8_PCU_IER)); 814 } else if (INTEL_INFO(dev)->gen >= 8) { 815 seq_printf(m, "Master Interrupt Control:\t%08x\n", 816 I915_READ(GEN8_MASTER_IRQ)); 817 818 for (i = 0; i < 4; i++) { 819 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 820 i, I915_READ(GEN8_GT_IMR(i))); 821 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 822 i, I915_READ(GEN8_GT_IIR(i))); 823 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 824 i, I915_READ(GEN8_GT_IER(i))); 825 } 826 827 for_each_pipe(dev_priv, pipe) { 828 enum intel_display_power_domain power_domain; 829 830 power_domain = POWER_DOMAIN_PIPE(pipe); 831 if (!intel_display_power_get_if_enabled(dev_priv, 832 power_domain)) { 833 seq_printf(m, "Pipe %c power disabled\n", 834 pipe_name(pipe)); 835 continue; 836 } 837 seq_printf(m, "Pipe %c IMR:\t%08x\n", 838 pipe_name(pipe), 839 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 840 seq_printf(m, "Pipe %c IIR:\t%08x\n", 841 pipe_name(pipe), 842 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 843 seq_printf(m, "Pipe %c IER:\t%08x\n", 844 pipe_name(pipe), 845 I915_READ(GEN8_DE_PIPE_IER(pipe))); 846 847 intel_display_power_put(dev_priv, power_domain); 848 } 849 850 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 851 I915_READ(GEN8_DE_PORT_IMR)); 852 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 853 I915_READ(GEN8_DE_PORT_IIR)); 854 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 855 I915_READ(GEN8_DE_PORT_IER)); 856 857 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 858 I915_READ(GEN8_DE_MISC_IMR)); 859 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 860 I915_READ(GEN8_DE_MISC_IIR)); 861 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 862 I915_READ(GEN8_DE_MISC_IER)); 863 864 seq_printf(m, "PCU interrupt mask:\t%08x\n", 865 I915_READ(GEN8_PCU_IMR)); 866 seq_printf(m, "PCU interrupt identity:\t%08x\n", 867 I915_READ(GEN8_PCU_IIR)); 868 seq_printf(m, "PCU interrupt enable:\t%08x\n", 869 I915_READ(GEN8_PCU_IER)); 870 } else if (IS_VALLEYVIEW(dev)) { 871 seq_printf(m, "Display IER:\t%08x\n", 872 I915_READ(VLV_IER)); 873 seq_printf(m, "Display IIR:\t%08x\n", 874 I915_READ(VLV_IIR)); 875 seq_printf(m, "Display IIR_RW:\t%08x\n", 876 I915_READ(VLV_IIR_RW)); 877 seq_printf(m, "Display IMR:\t%08x\n", 878 I915_READ(VLV_IMR)); 879 for_each_pipe(dev_priv, pipe) 880 seq_printf(m, "Pipe %c stat:\t%08x\n", 881 pipe_name(pipe), 882 I915_READ(PIPESTAT(pipe))); 883 884 seq_printf(m, "Master IER:\t%08x\n", 885 I915_READ(VLV_MASTER_IER)); 886 887 seq_printf(m, "Render IER:\t%08x\n", 888 I915_READ(GTIER)); 889 seq_printf(m, "Render IIR:\t%08x\n", 890 I915_READ(GTIIR)); 891 seq_printf(m, "Render IMR:\t%08x\n", 892 I915_READ(GTIMR)); 893 894 seq_printf(m, "PM IER:\t\t%08x\n", 895 I915_READ(GEN6_PMIER)); 896 seq_printf(m, "PM IIR:\t\t%08x\n", 897 I915_READ(GEN6_PMIIR)); 898 seq_printf(m, "PM IMR:\t\t%08x\n", 899 I915_READ(GEN6_PMIMR)); 900 901 seq_printf(m, "Port hotplug:\t%08x\n", 902 I915_READ(PORT_HOTPLUG_EN)); 903 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 904 I915_READ(VLV_DPFLIPSTAT)); 905 seq_printf(m, "DPINVGTT:\t%08x\n", 906 I915_READ(DPINVGTT)); 907 908 } else if (!HAS_PCH_SPLIT(dev)) { 909 seq_printf(m, "Interrupt enable: %08x\n", 910 I915_READ(IER)); 911 seq_printf(m, "Interrupt identity: %08x\n", 912 I915_READ(IIR)); 913 seq_printf(m, "Interrupt mask: %08x\n", 914 I915_READ(IMR)); 915 for_each_pipe(dev_priv, pipe) 916 seq_printf(m, "Pipe %c stat: %08x\n", 917 pipe_name(pipe), 918 I915_READ(PIPESTAT(pipe))); 919 } else { 920 seq_printf(m, "North Display Interrupt enable: %08x\n", 921 I915_READ(DEIER)); 922 seq_printf(m, "North Display Interrupt identity: %08x\n", 923 I915_READ(DEIIR)); 924 seq_printf(m, "North Display Interrupt mask: %08x\n", 925 I915_READ(DEIMR)); 926 seq_printf(m, "South Display Interrupt enable: %08x\n", 927 I915_READ(SDEIER)); 928 seq_printf(m, "South Display Interrupt identity: %08x\n", 929 I915_READ(SDEIIR)); 930 seq_printf(m, "South Display Interrupt mask: %08x\n", 931 I915_READ(SDEIMR)); 932 seq_printf(m, "Graphics Interrupt enable: %08x\n", 933 I915_READ(GTIER)); 934 seq_printf(m, "Graphics Interrupt identity: %08x\n", 935 I915_READ(GTIIR)); 936 seq_printf(m, "Graphics Interrupt mask: %08x\n", 937 I915_READ(GTIMR)); 938 } 939 for_each_ring(ring, dev_priv, i) { 940 if (INTEL_INFO(dev)->gen >= 6) { 941 seq_printf(m, 942 "Graphics Interrupt mask (%s): %08x\n", 943 ring->name, I915_READ_IMR(ring)); 944 } 945 i915_ring_seqno_info(m, ring); 946 } 947 intel_runtime_pm_put(dev_priv); 948 mutex_unlock(&dev->struct_mutex); 949 950 return 0; 951 } 952 953 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 954 { 955 struct drm_info_node *node = m->private; 956 struct drm_device *dev = node->minor->dev; 957 struct drm_i915_private *dev_priv = dev->dev_private; 958 int i, ret; 959 960 ret = mutex_lock_interruptible(&dev->struct_mutex); 961 if (ret) 962 return ret; 963 964 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 965 for (i = 0; i < dev_priv->num_fence_regs; i++) { 966 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 967 968 seq_printf(m, "Fence %d, pin count = %d, object = ", 969 i, dev_priv->fence_regs[i].pin_count); 970 if (obj == NULL) 971 seq_puts(m, "unused"); 972 else 973 describe_obj(m, obj); 974 seq_putc(m, '\n'); 975 } 976 977 mutex_unlock(&dev->struct_mutex); 978 return 0; 979 } 980 981 static int i915_hws_info(struct seq_file *m, void *data) 982 { 983 struct drm_info_node *node = m->private; 984 struct drm_device *dev = node->minor->dev; 985 struct drm_i915_private *dev_priv = dev->dev_private; 986 struct intel_engine_cs *ring; 987 const u32 *hws; 988 int i; 989 990 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 991 hws = ring->status_page.page_addr; 992 if (hws == NULL) 993 return 0; 994 995 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 996 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 997 i * 4, 998 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 999 } 1000 return 0; 1001 } 1002 1003 static ssize_t 1004 i915_error_state_write(struct file *filp, 1005 const char __user *ubuf, 1006 size_t cnt, 1007 loff_t *ppos) 1008 { 1009 struct i915_error_state_file_priv *error_priv = filp->private_data; 1010 struct drm_device *dev = error_priv->dev; 1011 int ret; 1012 1013 DRM_DEBUG_DRIVER("Resetting error state\n"); 1014 1015 ret = mutex_lock_interruptible(&dev->struct_mutex); 1016 if (ret) 1017 return ret; 1018 1019 i915_destroy_error_state(dev); 1020 mutex_unlock(&dev->struct_mutex); 1021 1022 return cnt; 1023 } 1024 1025 static int i915_error_state_open(struct inode *inode, struct file *file) 1026 { 1027 struct drm_device *dev = inode->i_private; 1028 struct i915_error_state_file_priv *error_priv; 1029 1030 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 1031 if (!error_priv) 1032 return -ENOMEM; 1033 1034 error_priv->dev = dev; 1035 1036 i915_error_state_get(dev, error_priv); 1037 1038 file->private_data = error_priv; 1039 1040 return 0; 1041 } 1042 1043 static int i915_error_state_release(struct inode *inode, struct file *file) 1044 { 1045 struct i915_error_state_file_priv *error_priv = file->private_data; 1046 1047 i915_error_state_put(error_priv); 1048 kfree(error_priv); 1049 1050 return 0; 1051 } 1052 1053 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, 1054 size_t count, loff_t *pos) 1055 { 1056 struct i915_error_state_file_priv *error_priv = file->private_data; 1057 struct drm_i915_error_state_buf error_str; 1058 loff_t tmp_pos = 0; 1059 ssize_t ret_count = 0; 1060 int ret; 1061 1062 ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos); 1063 if (ret) 1064 return ret; 1065 1066 ret = i915_error_state_to_str(&error_str, error_priv); 1067 if (ret) 1068 goto out; 1069 1070 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, 1071 error_str.buf, 1072 error_str.bytes); 1073 1074 if (ret_count < 0) 1075 ret = ret_count; 1076 else 1077 *pos = error_str.start + ret_count; 1078 out: 1079 i915_error_state_buf_release(&error_str); 1080 return ret ?: ret_count; 1081 } 1082 1083 static const struct file_operations i915_error_state_fops = { 1084 .owner = THIS_MODULE, 1085 .open = i915_error_state_open, 1086 .read = i915_error_state_read, 1087 .write = i915_error_state_write, 1088 .llseek = default_llseek, 1089 .release = i915_error_state_release, 1090 }; 1091 1092 static int 1093 i915_next_seqno_get(void *data, u64 *val) 1094 { 1095 struct drm_device *dev = data; 1096 struct drm_i915_private *dev_priv = dev->dev_private; 1097 int ret; 1098 1099 ret = mutex_lock_interruptible(&dev->struct_mutex); 1100 if (ret) 1101 return ret; 1102 1103 *val = dev_priv->next_seqno; 1104 mutex_unlock(&dev->struct_mutex); 1105 1106 return 0; 1107 } 1108 1109 static int 1110 i915_next_seqno_set(void *data, u64 val) 1111 { 1112 struct drm_device *dev = data; 1113 int ret; 1114 1115 ret = mutex_lock_interruptible(&dev->struct_mutex); 1116 if (ret) 1117 return ret; 1118 1119 ret = i915_gem_set_seqno(dev, val); 1120 mutex_unlock(&dev->struct_mutex); 1121 1122 return ret; 1123 } 1124 1125 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 1126 i915_next_seqno_get, i915_next_seqno_set, 1127 "0x%llx\n"); 1128 1129 static int i915_frequency_info(struct seq_file *m, void *unused) 1130 { 1131 struct drm_info_node *node = m->private; 1132 struct drm_device *dev = node->minor->dev; 1133 struct drm_i915_private *dev_priv = dev->dev_private; 1134 int ret = 0; 1135 1136 intel_runtime_pm_get(dev_priv); 1137 1138 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1139 1140 if (IS_GEN5(dev)) { 1141 u16 rgvswctl = I915_READ16(MEMSWCTL); 1142 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1143 1144 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 1145 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 1146 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 1147 MEMSTAT_VID_SHIFT); 1148 seq_printf(m, "Current P-state: %d\n", 1149 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1150 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1151 u32 freq_sts; 1152 1153 mutex_lock(&dev_priv->rps.hw_lock); 1154 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1155 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1156 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1157 1158 seq_printf(m, "actual GPU freq: %d MHz\n", 1159 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1160 1161 seq_printf(m, "current GPU freq: %d MHz\n", 1162 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 1163 1164 seq_printf(m, "max GPU freq: %d MHz\n", 1165 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1166 1167 seq_printf(m, "min GPU freq: %d MHz\n", 1168 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1169 1170 seq_printf(m, "idle GPU freq: %d MHz\n", 1171 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); 1172 1173 seq_printf(m, 1174 "efficient (RPe) frequency: %d MHz\n", 1175 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1176 mutex_unlock(&dev_priv->rps.hw_lock); 1177 } else if (INTEL_INFO(dev)->gen >= 6) { 1178 u32 rp_state_limits; 1179 u32 gt_perf_status; 1180 u32 rp_state_cap; 1181 u32 rpmodectl, rpinclimit, rpdeclimit; 1182 u32 rpstat, cagf, reqf; 1183 u32 rpupei, rpcurup, rpprevup; 1184 u32 rpdownei, rpcurdown, rpprevdown; 1185 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 1186 int max_freq; 1187 1188 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1189 if (IS_BROXTON(dev)) { 1190 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 1191 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); 1192 } else { 1193 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1194 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1195 } 1196 1197 /* RPSTAT1 is in the GT power well */ 1198 ret = mutex_lock_interruptible(&dev->struct_mutex); 1199 if (ret) 1200 goto out; 1201 1202 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1203 1204 reqf = I915_READ(GEN6_RPNSWREQ); 1205 if (IS_GEN9(dev)) 1206 reqf >>= 23; 1207 else { 1208 reqf &= ~GEN6_TURBO_DISABLE; 1209 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1210 reqf >>= 24; 1211 else 1212 reqf >>= 25; 1213 } 1214 reqf = intel_gpu_freq(dev_priv, reqf); 1215 1216 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1217 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 1218 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 1219 1220 rpstat = I915_READ(GEN6_RPSTAT1); 1221 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 1222 rpcurup = I915_READ(GEN6_RP_CUR_UP); 1223 rpprevup = I915_READ(GEN6_RP_PREV_UP); 1224 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 1225 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 1226 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 1227 if (IS_GEN9(dev)) 1228 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; 1229 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1230 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1231 else 1232 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1233 cagf = intel_gpu_freq(dev_priv, cagf); 1234 1235 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1236 mutex_unlock(&dev->struct_mutex); 1237 1238 if (IS_GEN6(dev) || IS_GEN7(dev)) { 1239 pm_ier = I915_READ(GEN6_PMIER); 1240 pm_imr = I915_READ(GEN6_PMIMR); 1241 pm_isr = I915_READ(GEN6_PMISR); 1242 pm_iir = I915_READ(GEN6_PMIIR); 1243 pm_mask = I915_READ(GEN6_PMINTRMSK); 1244 } else { 1245 pm_ier = I915_READ(GEN8_GT_IER(2)); 1246 pm_imr = I915_READ(GEN8_GT_IMR(2)); 1247 pm_isr = I915_READ(GEN8_GT_ISR(2)); 1248 pm_iir = I915_READ(GEN8_GT_IIR(2)); 1249 pm_mask = I915_READ(GEN6_PMINTRMSK); 1250 } 1251 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1252 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1253 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1254 seq_printf(m, "Render p-state ratio: %d\n", 1255 (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8); 1256 seq_printf(m, "Render p-state VID: %d\n", 1257 gt_perf_status & 0xff); 1258 seq_printf(m, "Render p-state limit: %d\n", 1259 rp_state_limits & 0xff); 1260 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1261 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 1262 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 1263 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 1264 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1265 seq_printf(m, "CAGF: %dMHz\n", cagf); 1266 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 1267 GEN6_CURICONT_MASK); 1268 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 1269 GEN6_CURBSYTAVG_MASK); 1270 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 1271 GEN6_CURBSYTAVG_MASK); 1272 seq_printf(m, "Up threshold: %d%%\n", 1273 dev_priv->rps.up_threshold); 1274 1275 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 1276 GEN6_CURIAVG_MASK); 1277 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 1278 GEN6_CURBSYTAVG_MASK); 1279 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 1280 GEN6_CURBSYTAVG_MASK); 1281 seq_printf(m, "Down threshold: %d%%\n", 1282 dev_priv->rps.down_threshold); 1283 1284 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 : 1285 rp_state_cap >> 16) & 0xff; 1286 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? 1287 GEN9_FREQ_SCALER : 1); 1288 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1289 intel_gpu_freq(dev_priv, max_freq)); 1290 1291 max_freq = (rp_state_cap & 0xff00) >> 8; 1292 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? 1293 GEN9_FREQ_SCALER : 1); 1294 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1295 intel_gpu_freq(dev_priv, max_freq)); 1296 1297 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 : 1298 rp_state_cap >> 0) & 0xff; 1299 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? 1300 GEN9_FREQ_SCALER : 1); 1301 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1302 intel_gpu_freq(dev_priv, max_freq)); 1303 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1304 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1305 1306 seq_printf(m, "Current freq: %d MHz\n", 1307 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 1308 seq_printf(m, "Actual freq: %d MHz\n", cagf); 1309 seq_printf(m, "Idle freq: %d MHz\n", 1310 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); 1311 seq_printf(m, "Min freq: %d MHz\n", 1312 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1313 seq_printf(m, "Max freq: %d MHz\n", 1314 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1315 seq_printf(m, 1316 "efficient (RPe) frequency: %d MHz\n", 1317 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1318 } else { 1319 seq_puts(m, "no P-state info available\n"); 1320 } 1321 1322 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq); 1323 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); 1324 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); 1325 1326 out: 1327 intel_runtime_pm_put(dev_priv); 1328 return ret; 1329 } 1330 1331 static int i915_hangcheck_info(struct seq_file *m, void *unused) 1332 { 1333 struct drm_info_node *node = m->private; 1334 struct drm_device *dev = node->minor->dev; 1335 struct drm_i915_private *dev_priv = dev->dev_private; 1336 struct intel_engine_cs *ring; 1337 u64 acthd[I915_NUM_RINGS]; 1338 u32 seqno[I915_NUM_RINGS]; 1339 int i; 1340 1341 if (!i915.enable_hangcheck) { 1342 seq_printf(m, "Hangcheck disabled\n"); 1343 return 0; 1344 } 1345 1346 intel_runtime_pm_get(dev_priv); 1347 1348 for_each_ring(ring, dev_priv, i) { 1349 seqno[i] = ring->get_seqno(ring, false); 1350 acthd[i] = intel_ring_get_active_head(ring); 1351 } 1352 1353 intel_runtime_pm_put(dev_priv); 1354 1355 if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) { 1356 seq_printf(m, "Hangcheck active, fires in %dms\n", 1357 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - 1358 jiffies)); 1359 } else 1360 seq_printf(m, "Hangcheck inactive\n"); 1361 1362 for_each_ring(ring, dev_priv, i) { 1363 seq_printf(m, "%s:\n", ring->name); 1364 seq_printf(m, "\tseqno = %x [current %x]\n", 1365 ring->hangcheck.seqno, seqno[i]); 1366 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", 1367 (long long)ring->hangcheck.acthd, 1368 (long long)acthd[i]); 1369 seq_printf(m, "\tmax ACTHD = 0x%08llx\n", 1370 (long long)ring->hangcheck.max_acthd); 1371 seq_printf(m, "\tscore = %d\n", ring->hangcheck.score); 1372 seq_printf(m, "\taction = %d\n", ring->hangcheck.action); 1373 } 1374 1375 return 0; 1376 } 1377 1378 static int ironlake_drpc_info(struct seq_file *m) 1379 { 1380 struct drm_info_node *node = m->private; 1381 struct drm_device *dev = node->minor->dev; 1382 struct drm_i915_private *dev_priv = dev->dev_private; 1383 u32 rgvmodectl, rstdbyctl; 1384 u16 crstandvid; 1385 int ret; 1386 1387 ret = mutex_lock_interruptible(&dev->struct_mutex); 1388 if (ret) 1389 return ret; 1390 intel_runtime_pm_get(dev_priv); 1391 1392 rgvmodectl = I915_READ(MEMMODECTL); 1393 rstdbyctl = I915_READ(RSTDBYCTL); 1394 crstandvid = I915_READ16(CRSTANDVID); 1395 1396 intel_runtime_pm_put(dev_priv); 1397 mutex_unlock(&dev->struct_mutex); 1398 1399 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); 1400 seq_printf(m, "Boost freq: %d\n", 1401 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1402 MEMMODE_BOOST_FREQ_SHIFT); 1403 seq_printf(m, "HW control enabled: %s\n", 1404 yesno(rgvmodectl & MEMMODE_HWIDLE_EN)); 1405 seq_printf(m, "SW control enabled: %s\n", 1406 yesno(rgvmodectl & MEMMODE_SWMODE_EN)); 1407 seq_printf(m, "Gated voltage change: %s\n", 1408 yesno(rgvmodectl & MEMMODE_RCLK_GATE)); 1409 seq_printf(m, "Starting frequency: P%d\n", 1410 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1411 seq_printf(m, "Max P-state: P%d\n", 1412 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1413 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1414 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1415 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1416 seq_printf(m, "Render standby enabled: %s\n", 1417 yesno(!(rstdbyctl & RCX_SW_EXIT))); 1418 seq_puts(m, "Current RS state: "); 1419 switch (rstdbyctl & RSX_STATUS_MASK) { 1420 case RSX_STATUS_ON: 1421 seq_puts(m, "on\n"); 1422 break; 1423 case RSX_STATUS_RC1: 1424 seq_puts(m, "RC1\n"); 1425 break; 1426 case RSX_STATUS_RC1E: 1427 seq_puts(m, "RC1E\n"); 1428 break; 1429 case RSX_STATUS_RS1: 1430 seq_puts(m, "RS1\n"); 1431 break; 1432 case RSX_STATUS_RS2: 1433 seq_puts(m, "RS2 (RC6)\n"); 1434 break; 1435 case RSX_STATUS_RS3: 1436 seq_puts(m, "RC3 (RC6+)\n"); 1437 break; 1438 default: 1439 seq_puts(m, "unknown\n"); 1440 break; 1441 } 1442 1443 return 0; 1444 } 1445 1446 static int i915_forcewake_domains(struct seq_file *m, void *data) 1447 { 1448 struct drm_info_node *node = m->private; 1449 struct drm_device *dev = node->minor->dev; 1450 struct drm_i915_private *dev_priv = dev->dev_private; 1451 struct intel_uncore_forcewake_domain *fw_domain; 1452 int i; 1453 1454 spin_lock_irq(&dev_priv->uncore.lock); 1455 for_each_fw_domain(fw_domain, dev_priv, i) { 1456 seq_printf(m, "%s.wake_count = %u\n", 1457 intel_uncore_forcewake_domain_to_str(i), 1458 fw_domain->wake_count); 1459 } 1460 spin_unlock_irq(&dev_priv->uncore.lock); 1461 1462 return 0; 1463 } 1464 1465 static int vlv_drpc_info(struct seq_file *m) 1466 { 1467 struct drm_info_node *node = m->private; 1468 struct drm_device *dev = node->minor->dev; 1469 struct drm_i915_private *dev_priv = dev->dev_private; 1470 u32 rpmodectl1, rcctl1, pw_status; 1471 1472 intel_runtime_pm_get(dev_priv); 1473 1474 pw_status = I915_READ(VLV_GTLC_PW_STATUS); 1475 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1476 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1477 1478 intel_runtime_pm_put(dev_priv); 1479 1480 seq_printf(m, "Video Turbo Mode: %s\n", 1481 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1482 seq_printf(m, "Turbo enabled: %s\n", 1483 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1484 seq_printf(m, "HW control enabled: %s\n", 1485 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1486 seq_printf(m, "SW control enabled: %s\n", 1487 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1488 GEN6_RP_MEDIA_SW_MODE)); 1489 seq_printf(m, "RC6 Enabled: %s\n", 1490 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1491 GEN6_RC_CTL_EI_MODE(1)))); 1492 seq_printf(m, "Render Power Well: %s\n", 1493 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1494 seq_printf(m, "Media Power Well: %s\n", 1495 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1496 1497 seq_printf(m, "Render RC6 residency since boot: %u\n", 1498 I915_READ(VLV_GT_RENDER_RC6)); 1499 seq_printf(m, "Media RC6 residency since boot: %u\n", 1500 I915_READ(VLV_GT_MEDIA_RC6)); 1501 1502 return i915_forcewake_domains(m, NULL); 1503 } 1504 1505 static int gen6_drpc_info(struct seq_file *m) 1506 { 1507 struct drm_info_node *node = m->private; 1508 struct drm_device *dev = node->minor->dev; 1509 struct drm_i915_private *dev_priv = dev->dev_private; 1510 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1511 unsigned forcewake_count; 1512 int count = 0, ret; 1513 1514 ret = mutex_lock_interruptible(&dev->struct_mutex); 1515 if (ret) 1516 return ret; 1517 intel_runtime_pm_get(dev_priv); 1518 1519 spin_lock_irq(&dev_priv->uncore.lock); 1520 forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count; 1521 spin_unlock_irq(&dev_priv->uncore.lock); 1522 1523 if (forcewake_count) { 1524 seq_puts(m, "RC information inaccurate because somebody " 1525 "holds a forcewake reference \n"); 1526 } else { 1527 /* NB: we cannot use forcewake, else we read the wrong values */ 1528 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1529 udelay(10); 1530 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1531 } 1532 1533 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS); 1534 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1535 1536 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1537 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1538 mutex_unlock(&dev->struct_mutex); 1539 mutex_lock(&dev_priv->rps.hw_lock); 1540 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1541 mutex_unlock(&dev_priv->rps.hw_lock); 1542 1543 intel_runtime_pm_put(dev_priv); 1544 1545 seq_printf(m, "Video Turbo Mode: %s\n", 1546 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1547 seq_printf(m, "HW control enabled: %s\n", 1548 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1549 seq_printf(m, "SW control enabled: %s\n", 1550 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1551 GEN6_RP_MEDIA_SW_MODE)); 1552 seq_printf(m, "RC1e Enabled: %s\n", 1553 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1554 seq_printf(m, "RC6 Enabled: %s\n", 1555 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1556 seq_printf(m, "Deep RC6 Enabled: %s\n", 1557 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1558 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1559 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1560 seq_puts(m, "Current RC state: "); 1561 switch (gt_core_status & GEN6_RCn_MASK) { 1562 case GEN6_RC0: 1563 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1564 seq_puts(m, "Core Power Down\n"); 1565 else 1566 seq_puts(m, "on\n"); 1567 break; 1568 case GEN6_RC3: 1569 seq_puts(m, "RC3\n"); 1570 break; 1571 case GEN6_RC6: 1572 seq_puts(m, "RC6\n"); 1573 break; 1574 case GEN6_RC7: 1575 seq_puts(m, "RC7\n"); 1576 break; 1577 default: 1578 seq_puts(m, "Unknown\n"); 1579 break; 1580 } 1581 1582 seq_printf(m, "Core Power Down: %s\n", 1583 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1584 1585 /* Not exactly sure what this is */ 1586 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1587 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1588 seq_printf(m, "RC6 residency since boot: %u\n", 1589 I915_READ(GEN6_GT_GFX_RC6)); 1590 seq_printf(m, "RC6+ residency since boot: %u\n", 1591 I915_READ(GEN6_GT_GFX_RC6p)); 1592 seq_printf(m, "RC6++ residency since boot: %u\n", 1593 I915_READ(GEN6_GT_GFX_RC6pp)); 1594 1595 seq_printf(m, "RC6 voltage: %dmV\n", 1596 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1597 seq_printf(m, "RC6+ voltage: %dmV\n", 1598 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1599 seq_printf(m, "RC6++ voltage: %dmV\n", 1600 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1601 return 0; 1602 } 1603 1604 static int i915_drpc_info(struct seq_file *m, void *unused) 1605 { 1606 struct drm_info_node *node = m->private; 1607 struct drm_device *dev = node->minor->dev; 1608 1609 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 1610 return vlv_drpc_info(m); 1611 else if (INTEL_INFO(dev)->gen >= 6) 1612 return gen6_drpc_info(m); 1613 else 1614 return ironlake_drpc_info(m); 1615 } 1616 1617 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) 1618 { 1619 struct drm_info_node *node = m->private; 1620 struct drm_device *dev = node->minor->dev; 1621 struct drm_i915_private *dev_priv = dev->dev_private; 1622 1623 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 1624 dev_priv->fb_tracking.busy_bits); 1625 1626 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 1627 dev_priv->fb_tracking.flip_bits); 1628 1629 return 0; 1630 } 1631 1632 static int i915_fbc_status(struct seq_file *m, void *unused) 1633 { 1634 struct drm_info_node *node = m->private; 1635 struct drm_device *dev = node->minor->dev; 1636 struct drm_i915_private *dev_priv = dev->dev_private; 1637 1638 if (!HAS_FBC(dev)) { 1639 seq_puts(m, "FBC unsupported on this chipset\n"); 1640 return 0; 1641 } 1642 1643 intel_runtime_pm_get(dev_priv); 1644 mutex_lock(&dev_priv->fbc.lock); 1645 1646 if (intel_fbc_is_active(dev_priv)) 1647 seq_puts(m, "FBC enabled\n"); 1648 else 1649 seq_printf(m, "FBC disabled: %s\n", 1650 dev_priv->fbc.no_fbc_reason); 1651 1652 if (INTEL_INFO(dev_priv)->gen >= 7) 1653 seq_printf(m, "Compressing: %s\n", 1654 yesno(I915_READ(FBC_STATUS2) & 1655 FBC_COMPRESSION_MASK)); 1656 1657 mutex_unlock(&dev_priv->fbc.lock); 1658 intel_runtime_pm_put(dev_priv); 1659 1660 return 0; 1661 } 1662 1663 static int i915_fbc_fc_get(void *data, u64 *val) 1664 { 1665 struct drm_device *dev = data; 1666 struct drm_i915_private *dev_priv = dev->dev_private; 1667 1668 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1669 return -ENODEV; 1670 1671 *val = dev_priv->fbc.false_color; 1672 1673 return 0; 1674 } 1675 1676 static int i915_fbc_fc_set(void *data, u64 val) 1677 { 1678 struct drm_device *dev = data; 1679 struct drm_i915_private *dev_priv = dev->dev_private; 1680 u32 reg; 1681 1682 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1683 return -ENODEV; 1684 1685 mutex_lock(&dev_priv->fbc.lock); 1686 1687 reg = I915_READ(ILK_DPFC_CONTROL); 1688 dev_priv->fbc.false_color = val; 1689 1690 I915_WRITE(ILK_DPFC_CONTROL, val ? 1691 (reg | FBC_CTL_FALSE_COLOR) : 1692 (reg & ~FBC_CTL_FALSE_COLOR)); 1693 1694 mutex_unlock(&dev_priv->fbc.lock); 1695 return 0; 1696 } 1697 1698 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops, 1699 i915_fbc_fc_get, i915_fbc_fc_set, 1700 "%llu\n"); 1701 1702 static int i915_ips_status(struct seq_file *m, void *unused) 1703 { 1704 struct drm_info_node *node = m->private; 1705 struct drm_device *dev = node->minor->dev; 1706 struct drm_i915_private *dev_priv = dev->dev_private; 1707 1708 if (!HAS_IPS(dev)) { 1709 seq_puts(m, "not supported\n"); 1710 return 0; 1711 } 1712 1713 intel_runtime_pm_get(dev_priv); 1714 1715 seq_printf(m, "Enabled by kernel parameter: %s\n", 1716 yesno(i915.enable_ips)); 1717 1718 if (INTEL_INFO(dev)->gen >= 8) { 1719 seq_puts(m, "Currently: unknown\n"); 1720 } else { 1721 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1722 seq_puts(m, "Currently: enabled\n"); 1723 else 1724 seq_puts(m, "Currently: disabled\n"); 1725 } 1726 1727 intel_runtime_pm_put(dev_priv); 1728 1729 return 0; 1730 } 1731 1732 static int i915_sr_status(struct seq_file *m, void *unused) 1733 { 1734 struct drm_info_node *node = m->private; 1735 struct drm_device *dev = node->minor->dev; 1736 struct drm_i915_private *dev_priv = dev->dev_private; 1737 bool sr_enabled = false; 1738 1739 intel_runtime_pm_get(dev_priv); 1740 1741 if (HAS_PCH_SPLIT(dev)) 1742 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1743 else if (IS_CRESTLINE(dev) || IS_G4X(dev) || 1744 IS_I945G(dev) || IS_I945GM(dev)) 1745 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1746 else if (IS_I915GM(dev)) 1747 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1748 else if (IS_PINEVIEW(dev)) 1749 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1750 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 1751 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 1752 1753 intel_runtime_pm_put(dev_priv); 1754 1755 seq_printf(m, "self-refresh: %s\n", 1756 sr_enabled ? "enabled" : "disabled"); 1757 1758 return 0; 1759 } 1760 1761 static int i915_emon_status(struct seq_file *m, void *unused) 1762 { 1763 struct drm_info_node *node = m->private; 1764 struct drm_device *dev = node->minor->dev; 1765 struct drm_i915_private *dev_priv = dev->dev_private; 1766 unsigned long temp, chipset, gfx; 1767 int ret; 1768 1769 if (!IS_GEN5(dev)) 1770 return -ENODEV; 1771 1772 ret = mutex_lock_interruptible(&dev->struct_mutex); 1773 if (ret) 1774 return ret; 1775 1776 temp = i915_mch_val(dev_priv); 1777 chipset = i915_chipset_val(dev_priv); 1778 gfx = i915_gfx_val(dev_priv); 1779 mutex_unlock(&dev->struct_mutex); 1780 1781 seq_printf(m, "GMCH temp: %ld\n", temp); 1782 seq_printf(m, "Chipset power: %ld\n", chipset); 1783 seq_printf(m, "GFX power: %ld\n", gfx); 1784 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1785 1786 return 0; 1787 } 1788 1789 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1790 { 1791 struct drm_info_node *node = m->private; 1792 struct drm_device *dev = node->minor->dev; 1793 struct drm_i915_private *dev_priv = dev->dev_private; 1794 int ret = 0; 1795 int gpu_freq, ia_freq; 1796 unsigned int max_gpu_freq, min_gpu_freq; 1797 1798 if (!HAS_CORE_RING_FREQ(dev)) { 1799 seq_puts(m, "unsupported on this chipset\n"); 1800 return 0; 1801 } 1802 1803 intel_runtime_pm_get(dev_priv); 1804 1805 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1806 1807 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1808 if (ret) 1809 goto out; 1810 1811 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 1812 /* Convert GT frequency to 50 HZ units */ 1813 min_gpu_freq = 1814 dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER; 1815 max_gpu_freq = 1816 dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER; 1817 } else { 1818 min_gpu_freq = dev_priv->rps.min_freq_softlimit; 1819 max_gpu_freq = dev_priv->rps.max_freq_softlimit; 1820 } 1821 1822 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1823 1824 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { 1825 ia_freq = gpu_freq; 1826 sandybridge_pcode_read(dev_priv, 1827 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1828 &ia_freq); 1829 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1830 intel_gpu_freq(dev_priv, (gpu_freq * 1831 (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? 1832 GEN9_FREQ_SCALER : 1))), 1833 ((ia_freq >> 0) & 0xff) * 100, 1834 ((ia_freq >> 8) & 0xff) * 100); 1835 } 1836 1837 mutex_unlock(&dev_priv->rps.hw_lock); 1838 1839 out: 1840 intel_runtime_pm_put(dev_priv); 1841 return ret; 1842 } 1843 1844 static int i915_opregion(struct seq_file *m, void *unused) 1845 { 1846 struct drm_info_node *node = m->private; 1847 struct drm_device *dev = node->minor->dev; 1848 struct drm_i915_private *dev_priv = dev->dev_private; 1849 struct intel_opregion *opregion = &dev_priv->opregion; 1850 int ret; 1851 1852 ret = mutex_lock_interruptible(&dev->struct_mutex); 1853 if (ret) 1854 goto out; 1855 1856 if (opregion->header) 1857 seq_write(m, opregion->header, OPREGION_SIZE); 1858 1859 mutex_unlock(&dev->struct_mutex); 1860 1861 out: 1862 return 0; 1863 } 1864 1865 static int i915_vbt(struct seq_file *m, void *unused) 1866 { 1867 struct drm_info_node *node = m->private; 1868 struct drm_device *dev = node->minor->dev; 1869 struct drm_i915_private *dev_priv = dev->dev_private; 1870 struct intel_opregion *opregion = &dev_priv->opregion; 1871 1872 if (opregion->vbt) 1873 seq_write(m, opregion->vbt, opregion->vbt_size); 1874 1875 return 0; 1876 } 1877 1878 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1879 { 1880 struct drm_info_node *node = m->private; 1881 struct drm_device *dev = node->minor->dev; 1882 struct intel_framebuffer *fbdev_fb = NULL; 1883 struct drm_framebuffer *drm_fb; 1884 1885 #ifdef CONFIG_DRM_FBDEV_EMULATION 1886 if (to_i915(dev)->fbdev) { 1887 fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb); 1888 1889 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1890 fbdev_fb->base.width, 1891 fbdev_fb->base.height, 1892 fbdev_fb->base.depth, 1893 fbdev_fb->base.bits_per_pixel, 1894 fbdev_fb->base.modifier[0], 1895 atomic_read(&fbdev_fb->base.refcount.refcount)); 1896 describe_obj(m, fbdev_fb->obj); 1897 seq_putc(m, '\n'); 1898 } 1899 #endif 1900 1901 mutex_lock(&dev->mode_config.fb_lock); 1902 drm_for_each_fb(drm_fb, dev) { 1903 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 1904 if (fb == fbdev_fb) 1905 continue; 1906 1907 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1908 fb->base.width, 1909 fb->base.height, 1910 fb->base.depth, 1911 fb->base.bits_per_pixel, 1912 fb->base.modifier[0], 1913 atomic_read(&fb->base.refcount.refcount)); 1914 describe_obj(m, fb->obj); 1915 seq_putc(m, '\n'); 1916 } 1917 mutex_unlock(&dev->mode_config.fb_lock); 1918 1919 return 0; 1920 } 1921 1922 static void describe_ctx_ringbuf(struct seq_file *m, 1923 struct intel_ringbuffer *ringbuf) 1924 { 1925 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)", 1926 ringbuf->space, ringbuf->head, ringbuf->tail, 1927 ringbuf->last_retired_head); 1928 } 1929 1930 static int i915_context_status(struct seq_file *m, void *unused) 1931 { 1932 struct drm_info_node *node = m->private; 1933 struct drm_device *dev = node->minor->dev; 1934 struct drm_i915_private *dev_priv = dev->dev_private; 1935 struct intel_engine_cs *ring; 1936 struct intel_context *ctx; 1937 int ret, i; 1938 1939 ret = mutex_lock_interruptible(&dev->struct_mutex); 1940 if (ret) 1941 return ret; 1942 1943 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1944 if (!i915.enable_execlists && 1945 ctx->legacy_hw_ctx.rcs_state == NULL) 1946 continue; 1947 1948 seq_puts(m, "HW context "); 1949 describe_ctx(m, ctx); 1950 for_each_ring(ring, dev_priv, i) { 1951 if (ring->default_context == ctx) 1952 seq_printf(m, "(default context %s) ", 1953 ring->name); 1954 } 1955 1956 if (i915.enable_execlists) { 1957 seq_putc(m, '\n'); 1958 for_each_ring(ring, dev_priv, i) { 1959 struct drm_i915_gem_object *ctx_obj = 1960 ctx->engine[i].state; 1961 struct intel_ringbuffer *ringbuf = 1962 ctx->engine[i].ringbuf; 1963 1964 seq_printf(m, "%s: ", ring->name); 1965 if (ctx_obj) 1966 describe_obj(m, ctx_obj); 1967 if (ringbuf) 1968 describe_ctx_ringbuf(m, ringbuf); 1969 seq_putc(m, '\n'); 1970 } 1971 } else { 1972 describe_obj(m, ctx->legacy_hw_ctx.rcs_state); 1973 } 1974 1975 seq_putc(m, '\n'); 1976 } 1977 1978 mutex_unlock(&dev->struct_mutex); 1979 1980 return 0; 1981 } 1982 1983 static void i915_dump_lrc_obj(struct seq_file *m, 1984 struct intel_engine_cs *ring, 1985 struct drm_i915_gem_object *ctx_obj) 1986 { 1987 struct page *page; 1988 uint32_t *reg_state; 1989 int j; 1990 unsigned long ggtt_offset = 0; 1991 1992 if (ctx_obj == NULL) { 1993 seq_printf(m, "Context on %s with no gem object\n", 1994 ring->name); 1995 return; 1996 } 1997 1998 seq_printf(m, "CONTEXT: %s %u\n", ring->name, 1999 intel_execlists_ctx_id(ctx_obj)); 2000 2001 if (!i915_gem_obj_ggtt_bound(ctx_obj)) 2002 seq_puts(m, "\tNot bound in GGTT\n"); 2003 else 2004 ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj); 2005 2006 if (i915_gem_object_get_pages(ctx_obj)) { 2007 seq_puts(m, "\tFailed to get pages for context object\n"); 2008 return; 2009 } 2010 2011 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); 2012 if (!WARN_ON(page == NULL)) { 2013 reg_state = kmap_atomic(page); 2014 2015 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { 2016 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n", 2017 ggtt_offset + 4096 + (j * 4), 2018 reg_state[j], reg_state[j + 1], 2019 reg_state[j + 2], reg_state[j + 3]); 2020 } 2021 kunmap_atomic(reg_state); 2022 } 2023 2024 seq_putc(m, '\n'); 2025 } 2026 2027 static int i915_dump_lrc(struct seq_file *m, void *unused) 2028 { 2029 struct drm_info_node *node = (struct drm_info_node *) m->private; 2030 struct drm_device *dev = node->minor->dev; 2031 struct drm_i915_private *dev_priv = dev->dev_private; 2032 struct intel_engine_cs *ring; 2033 struct intel_context *ctx; 2034 int ret, i; 2035 2036 if (!i915.enable_execlists) { 2037 seq_printf(m, "Logical Ring Contexts are disabled\n"); 2038 return 0; 2039 } 2040 2041 ret = mutex_lock_interruptible(&dev->struct_mutex); 2042 if (ret) 2043 return ret; 2044 2045 list_for_each_entry(ctx, &dev_priv->context_list, link) { 2046 for_each_ring(ring, dev_priv, i) { 2047 if (ring->default_context != ctx) 2048 i915_dump_lrc_obj(m, ring, 2049 ctx->engine[i].state); 2050 } 2051 } 2052 2053 mutex_unlock(&dev->struct_mutex); 2054 2055 return 0; 2056 } 2057 2058 static int i915_execlists(struct seq_file *m, void *data) 2059 { 2060 struct drm_info_node *node = (struct drm_info_node *)m->private; 2061 struct drm_device *dev = node->minor->dev; 2062 struct drm_i915_private *dev_priv = dev->dev_private; 2063 struct intel_engine_cs *ring; 2064 u32 status_pointer; 2065 u8 read_pointer; 2066 u8 write_pointer; 2067 u32 status; 2068 u32 ctx_id; 2069 struct list_head *cursor; 2070 int ring_id, i; 2071 int ret; 2072 2073 if (!i915.enable_execlists) { 2074 seq_puts(m, "Logical Ring Contexts are disabled\n"); 2075 return 0; 2076 } 2077 2078 ret = mutex_lock_interruptible(&dev->struct_mutex); 2079 if (ret) 2080 return ret; 2081 2082 intel_runtime_pm_get(dev_priv); 2083 2084 for_each_ring(ring, dev_priv, ring_id) { 2085 struct drm_i915_gem_request *head_req = NULL; 2086 int count = 0; 2087 unsigned long flags; 2088 2089 seq_printf(m, "%s\n", ring->name); 2090 2091 status = I915_READ(RING_EXECLIST_STATUS_LO(ring)); 2092 ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring)); 2093 seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n", 2094 status, ctx_id); 2095 2096 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); 2097 seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer); 2098 2099 read_pointer = ring->next_context_status_buffer; 2100 write_pointer = status_pointer & 0x07; 2101 if (read_pointer > write_pointer) 2102 write_pointer += 6; 2103 seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n", 2104 read_pointer, write_pointer); 2105 2106 for (i = 0; i < 6; i++) { 2107 status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i)); 2108 ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i)); 2109 2110 seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n", 2111 i, status, ctx_id); 2112 } 2113 2114 spin_lock_irqsave(&ring->execlist_lock, flags); 2115 list_for_each(cursor, &ring->execlist_queue) 2116 count++; 2117 head_req = list_first_entry_or_null(&ring->execlist_queue, 2118 struct drm_i915_gem_request, execlist_link); 2119 spin_unlock_irqrestore(&ring->execlist_lock, flags); 2120 2121 seq_printf(m, "\t%d requests in queue\n", count); 2122 if (head_req) { 2123 struct drm_i915_gem_object *ctx_obj; 2124 2125 ctx_obj = head_req->ctx->engine[ring_id].state; 2126 seq_printf(m, "\tHead request id: %u\n", 2127 intel_execlists_ctx_id(ctx_obj)); 2128 seq_printf(m, "\tHead request tail: %u\n", 2129 head_req->tail); 2130 } 2131 2132 seq_putc(m, '\n'); 2133 } 2134 2135 intel_runtime_pm_put(dev_priv); 2136 mutex_unlock(&dev->struct_mutex); 2137 2138 return 0; 2139 } 2140 2141 static const char *swizzle_string(unsigned swizzle) 2142 { 2143 switch (swizzle) { 2144 case I915_BIT_6_SWIZZLE_NONE: 2145 return "none"; 2146 case I915_BIT_6_SWIZZLE_9: 2147 return "bit9"; 2148 case I915_BIT_6_SWIZZLE_9_10: 2149 return "bit9/bit10"; 2150 case I915_BIT_6_SWIZZLE_9_11: 2151 return "bit9/bit11"; 2152 case I915_BIT_6_SWIZZLE_9_10_11: 2153 return "bit9/bit10/bit11"; 2154 case I915_BIT_6_SWIZZLE_9_17: 2155 return "bit9/bit17"; 2156 case I915_BIT_6_SWIZZLE_9_10_17: 2157 return "bit9/bit10/bit17"; 2158 case I915_BIT_6_SWIZZLE_UNKNOWN: 2159 return "unknown"; 2160 } 2161 2162 return "bug"; 2163 } 2164 2165 static int i915_swizzle_info(struct seq_file *m, void *data) 2166 { 2167 struct drm_info_node *node = m->private; 2168 struct drm_device *dev = node->minor->dev; 2169 struct drm_i915_private *dev_priv = dev->dev_private; 2170 int ret; 2171 2172 ret = mutex_lock_interruptible(&dev->struct_mutex); 2173 if (ret) 2174 return ret; 2175 intel_runtime_pm_get(dev_priv); 2176 2177 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 2178 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 2179 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 2180 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 2181 2182 if (IS_GEN3(dev) || IS_GEN4(dev)) { 2183 seq_printf(m, "DDC = 0x%08x\n", 2184 I915_READ(DCC)); 2185 seq_printf(m, "DDC2 = 0x%08x\n", 2186 I915_READ(DCC2)); 2187 seq_printf(m, "C0DRB3 = 0x%04x\n", 2188 I915_READ16(C0DRB3)); 2189 seq_printf(m, "C1DRB3 = 0x%04x\n", 2190 I915_READ16(C1DRB3)); 2191 } else if (INTEL_INFO(dev)->gen >= 6) { 2192 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 2193 I915_READ(MAD_DIMM_C0)); 2194 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 2195 I915_READ(MAD_DIMM_C1)); 2196 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 2197 I915_READ(MAD_DIMM_C2)); 2198 seq_printf(m, "TILECTL = 0x%08x\n", 2199 I915_READ(TILECTL)); 2200 if (INTEL_INFO(dev)->gen >= 8) 2201 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 2202 I915_READ(GAMTARBMODE)); 2203 else 2204 seq_printf(m, "ARB_MODE = 0x%08x\n", 2205 I915_READ(ARB_MODE)); 2206 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 2207 I915_READ(DISP_ARB_CTL)); 2208 } 2209 2210 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 2211 seq_puts(m, "L-shaped memory detected\n"); 2212 2213 intel_runtime_pm_put(dev_priv); 2214 mutex_unlock(&dev->struct_mutex); 2215 2216 return 0; 2217 } 2218 2219 static int per_file_ctx(int id, void *ptr, void *data) 2220 { 2221 struct intel_context *ctx = ptr; 2222 struct seq_file *m = data; 2223 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2224 2225 if (!ppgtt) { 2226 seq_printf(m, " no ppgtt for context %d\n", 2227 ctx->user_handle); 2228 return 0; 2229 } 2230 2231 if (i915_gem_context_is_default(ctx)) 2232 seq_puts(m, " default context:\n"); 2233 else 2234 seq_printf(m, " context %d:\n", ctx->user_handle); 2235 ppgtt->debug_dump(ppgtt, m); 2236 2237 return 0; 2238 } 2239 2240 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2241 { 2242 struct drm_i915_private *dev_priv = dev->dev_private; 2243 struct intel_engine_cs *ring; 2244 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2245 int unused, i; 2246 2247 if (!ppgtt) 2248 return; 2249 2250 for_each_ring(ring, dev_priv, unused) { 2251 seq_printf(m, "%s\n", ring->name); 2252 for (i = 0; i < 4; i++) { 2253 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i)); 2254 pdp <<= 32; 2255 pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i)); 2256 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 2257 } 2258 } 2259 } 2260 2261 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2262 { 2263 struct drm_i915_private *dev_priv = dev->dev_private; 2264 struct intel_engine_cs *ring; 2265 int i; 2266 2267 if (INTEL_INFO(dev)->gen == 6) 2268 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2269 2270 for_each_ring(ring, dev_priv, i) { 2271 seq_printf(m, "%s\n", ring->name); 2272 if (INTEL_INFO(dev)->gen == 7) 2273 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 2274 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 2275 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 2276 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 2277 } 2278 if (dev_priv->mm.aliasing_ppgtt) { 2279 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2280 2281 seq_puts(m, "aliasing PPGTT:\n"); 2282 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset); 2283 2284 ppgtt->debug_dump(ppgtt, m); 2285 } 2286 2287 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 2288 } 2289 2290 static int i915_ppgtt_info(struct seq_file *m, void *data) 2291 { 2292 struct drm_info_node *node = m->private; 2293 struct drm_device *dev = node->minor->dev; 2294 struct drm_i915_private *dev_priv = dev->dev_private; 2295 struct drm_file *file; 2296 2297 int ret = mutex_lock_interruptible(&dev->struct_mutex); 2298 if (ret) 2299 return ret; 2300 intel_runtime_pm_get(dev_priv); 2301 2302 if (INTEL_INFO(dev)->gen >= 8) 2303 gen8_ppgtt_info(m, dev); 2304 else if (INTEL_INFO(dev)->gen >= 6) 2305 gen6_ppgtt_info(m, dev); 2306 2307 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2308 struct drm_i915_file_private *file_priv = file->driver_priv; 2309 struct task_struct *task; 2310 2311 task = get_pid_task(file->pid, PIDTYPE_PID); 2312 if (!task) { 2313 ret = -ESRCH; 2314 goto out_put; 2315 } 2316 seq_printf(m, "\nproc: %s\n", task->comm); 2317 put_task_struct(task); 2318 idr_for_each(&file_priv->context_idr, per_file_ctx, 2319 (void *)(unsigned long)m); 2320 } 2321 2322 out_put: 2323 intel_runtime_pm_put(dev_priv); 2324 mutex_unlock(&dev->struct_mutex); 2325 2326 return ret; 2327 } 2328 2329 static int count_irq_waiters(struct drm_i915_private *i915) 2330 { 2331 struct intel_engine_cs *ring; 2332 int count = 0; 2333 int i; 2334 2335 for_each_ring(ring, i915, i) 2336 count += ring->irq_refcount; 2337 2338 return count; 2339 } 2340 2341 static int i915_rps_boost_info(struct seq_file *m, void *data) 2342 { 2343 struct drm_info_node *node = m->private; 2344 struct drm_device *dev = node->minor->dev; 2345 struct drm_i915_private *dev_priv = dev->dev_private; 2346 struct drm_file *file; 2347 2348 seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled); 2349 seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy); 2350 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); 2351 seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n", 2352 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 2353 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 2354 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit), 2355 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit), 2356 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 2357 spin_lock(&dev_priv->rps.client_lock); 2358 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2359 struct drm_i915_file_private *file_priv = file->driver_priv; 2360 struct task_struct *task; 2361 2362 rcu_read_lock(); 2363 task = pid_task(file->pid, PIDTYPE_PID); 2364 seq_printf(m, "%s [%d]: %d boosts%s\n", 2365 task ? task->comm : "<unknown>", 2366 task ? task->pid : -1, 2367 file_priv->rps.boosts, 2368 list_empty(&file_priv->rps.link) ? "" : ", active"); 2369 rcu_read_unlock(); 2370 } 2371 seq_printf(m, "Semaphore boosts: %d%s\n", 2372 dev_priv->rps.semaphores.boosts, 2373 list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active"); 2374 seq_printf(m, "MMIO flip boosts: %d%s\n", 2375 dev_priv->rps.mmioflips.boosts, 2376 list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active"); 2377 seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts); 2378 spin_unlock(&dev_priv->rps.client_lock); 2379 2380 return 0; 2381 } 2382 2383 static int i915_llc(struct seq_file *m, void *data) 2384 { 2385 struct drm_info_node *node = m->private; 2386 struct drm_device *dev = node->minor->dev; 2387 struct drm_i915_private *dev_priv = dev->dev_private; 2388 2389 /* Size calculation for LLC is a bit of a pain. Ignore for now. */ 2390 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); 2391 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size); 2392 2393 return 0; 2394 } 2395 2396 static int i915_guc_load_status_info(struct seq_file *m, void *data) 2397 { 2398 struct drm_info_node *node = m->private; 2399 struct drm_i915_private *dev_priv = node->minor->dev->dev_private; 2400 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 2401 u32 tmp, i; 2402 2403 if (!HAS_GUC_UCODE(dev_priv->dev)) 2404 return 0; 2405 2406 seq_printf(m, "GuC firmware status:\n"); 2407 seq_printf(m, "\tpath: %s\n", 2408 guc_fw->guc_fw_path); 2409 seq_printf(m, "\tfetch: %s\n", 2410 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status)); 2411 seq_printf(m, "\tload: %s\n", 2412 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); 2413 seq_printf(m, "\tversion wanted: %d.%d\n", 2414 guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); 2415 seq_printf(m, "\tversion found: %d.%d\n", 2416 guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found); 2417 seq_printf(m, "\theader: offset is %d; size = %d\n", 2418 guc_fw->header_offset, guc_fw->header_size); 2419 seq_printf(m, "\tuCode: offset is %d; size = %d\n", 2420 guc_fw->ucode_offset, guc_fw->ucode_size); 2421 seq_printf(m, "\tRSA: offset is %d; size = %d\n", 2422 guc_fw->rsa_offset, guc_fw->rsa_size); 2423 2424 tmp = I915_READ(GUC_STATUS); 2425 2426 seq_printf(m, "\nGuC status 0x%08x:\n", tmp); 2427 seq_printf(m, "\tBootrom status = 0x%x\n", 2428 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); 2429 seq_printf(m, "\tuKernel status = 0x%x\n", 2430 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); 2431 seq_printf(m, "\tMIA Core status = 0x%x\n", 2432 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); 2433 seq_puts(m, "\nScratch registers:\n"); 2434 for (i = 0; i < 16; i++) 2435 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i))); 2436 2437 return 0; 2438 } 2439 2440 static void i915_guc_client_info(struct seq_file *m, 2441 struct drm_i915_private *dev_priv, 2442 struct i915_guc_client *client) 2443 { 2444 struct intel_engine_cs *ring; 2445 uint64_t tot = 0; 2446 uint32_t i; 2447 2448 seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n", 2449 client->priority, client->ctx_index, client->proc_desc_offset); 2450 seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n", 2451 client->doorbell_id, client->doorbell_offset, client->cookie); 2452 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", 2453 client->wq_size, client->wq_offset, client->wq_tail); 2454 2455 seq_printf(m, "\tFailed to queue: %u\n", client->q_fail); 2456 seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); 2457 seq_printf(m, "\tLast submission result: %d\n", client->retcode); 2458 2459 for_each_ring(ring, dev_priv, i) { 2460 seq_printf(m, "\tSubmissions: %llu %s\n", 2461 client->submissions[i], 2462 ring->name); 2463 tot += client->submissions[i]; 2464 } 2465 seq_printf(m, "\tTotal: %llu\n", tot); 2466 } 2467 2468 static int i915_guc_info(struct seq_file *m, void *data) 2469 { 2470 struct drm_info_node *node = m->private; 2471 struct drm_device *dev = node->minor->dev; 2472 struct drm_i915_private *dev_priv = dev->dev_private; 2473 struct intel_guc guc; 2474 struct i915_guc_client client = {}; 2475 struct intel_engine_cs *ring; 2476 enum intel_ring_id i; 2477 u64 total = 0; 2478 2479 if (!HAS_GUC_SCHED(dev_priv->dev)) 2480 return 0; 2481 2482 if (mutex_lock_interruptible(&dev->struct_mutex)) 2483 return 0; 2484 2485 /* Take a local copy of the GuC data, so we can dump it at leisure */ 2486 guc = dev_priv->guc; 2487 if (guc.execbuf_client) 2488 client = *guc.execbuf_client; 2489 2490 mutex_unlock(&dev->struct_mutex); 2491 2492 seq_printf(m, "GuC total action count: %llu\n", guc.action_count); 2493 seq_printf(m, "GuC action failure count: %u\n", guc.action_fail); 2494 seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd); 2495 seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status); 2496 seq_printf(m, "GuC last action error code: %d\n", guc.action_err); 2497 2498 seq_printf(m, "\nGuC submissions:\n"); 2499 for_each_ring(ring, dev_priv, i) { 2500 seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n", 2501 ring->name, guc.submissions[i], 2502 guc.last_seqno[i], guc.last_seqno[i]); 2503 total += guc.submissions[i]; 2504 } 2505 seq_printf(m, "\t%s: %llu\n", "Total", total); 2506 2507 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client); 2508 i915_guc_client_info(m, dev_priv, &client); 2509 2510 /* Add more as required ... */ 2511 2512 return 0; 2513 } 2514 2515 static int i915_guc_log_dump(struct seq_file *m, void *data) 2516 { 2517 struct drm_info_node *node = m->private; 2518 struct drm_device *dev = node->minor->dev; 2519 struct drm_i915_private *dev_priv = dev->dev_private; 2520 struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj; 2521 u32 *log; 2522 int i = 0, pg; 2523 2524 if (!log_obj) 2525 return 0; 2526 2527 for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) { 2528 log = kmap_atomic(i915_gem_object_get_page(log_obj, pg)); 2529 2530 for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4) 2531 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n", 2532 *(log + i), *(log + i + 1), 2533 *(log + i + 2), *(log + i + 3)); 2534 2535 kunmap_atomic(log); 2536 } 2537 2538 seq_putc(m, '\n'); 2539 2540 return 0; 2541 } 2542 2543 static int i915_edp_psr_status(struct seq_file *m, void *data) 2544 { 2545 struct drm_info_node *node = m->private; 2546 struct drm_device *dev = node->minor->dev; 2547 struct drm_i915_private *dev_priv = dev->dev_private; 2548 u32 psrperf = 0; 2549 u32 stat[3]; 2550 enum pipe pipe; 2551 bool enabled = false; 2552 2553 if (!HAS_PSR(dev)) { 2554 seq_puts(m, "PSR not supported\n"); 2555 return 0; 2556 } 2557 2558 intel_runtime_pm_get(dev_priv); 2559 2560 mutex_lock(&dev_priv->psr.lock); 2561 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 2562 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 2563 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); 2564 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); 2565 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", 2566 dev_priv->psr.busy_frontbuffer_bits); 2567 seq_printf(m, "Re-enable work scheduled: %s\n", 2568 yesno(work_busy(&dev_priv->psr.work.work))); 2569 2570 if (HAS_DDI(dev)) 2571 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 2572 else { 2573 for_each_pipe(dev_priv, pipe) { 2574 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & 2575 VLV_EDP_PSR_CURR_STATE_MASK; 2576 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2577 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2578 enabled = true; 2579 } 2580 } 2581 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); 2582 2583 if (!HAS_DDI(dev)) 2584 for_each_pipe(dev_priv, pipe) { 2585 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2586 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2587 seq_printf(m, " pipe %c", pipe_name(pipe)); 2588 } 2589 seq_puts(m, "\n"); 2590 2591 /* 2592 * VLV/CHV PSR has no kind of performance counter 2593 * SKL+ Perf counter is reset to 0 everytime DC state is entered 2594 */ 2595 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2596 psrperf = I915_READ(EDP_PSR_PERF_CNT) & 2597 EDP_PSR_PERF_CNT_MASK; 2598 2599 seq_printf(m, "Performance_Counter: %u\n", psrperf); 2600 } 2601 mutex_unlock(&dev_priv->psr.lock); 2602 2603 intel_runtime_pm_put(dev_priv); 2604 return 0; 2605 } 2606 2607 static int i915_sink_crc(struct seq_file *m, void *data) 2608 { 2609 struct drm_info_node *node = m->private; 2610 struct drm_device *dev = node->minor->dev; 2611 struct intel_encoder *encoder; 2612 struct intel_connector *connector; 2613 struct intel_dp *intel_dp = NULL; 2614 int ret; 2615 u8 crc[6]; 2616 2617 drm_modeset_lock_all(dev); 2618 for_each_intel_connector(dev, connector) { 2619 2620 if (connector->base.dpms != DRM_MODE_DPMS_ON) 2621 continue; 2622 2623 if (!connector->base.encoder) 2624 continue; 2625 2626 encoder = to_intel_encoder(connector->base.encoder); 2627 if (encoder->type != INTEL_OUTPUT_EDP) 2628 continue; 2629 2630 intel_dp = enc_to_intel_dp(&encoder->base); 2631 2632 ret = intel_dp_sink_crc(intel_dp, crc); 2633 if (ret) 2634 goto out; 2635 2636 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 2637 crc[0], crc[1], crc[2], 2638 crc[3], crc[4], crc[5]); 2639 goto out; 2640 } 2641 ret = -ENODEV; 2642 out: 2643 drm_modeset_unlock_all(dev); 2644 return ret; 2645 } 2646 2647 static int i915_energy_uJ(struct seq_file *m, void *data) 2648 { 2649 struct drm_info_node *node = m->private; 2650 struct drm_device *dev = node->minor->dev; 2651 struct drm_i915_private *dev_priv = dev->dev_private; 2652 u64 power; 2653 u32 units; 2654 2655 if (INTEL_INFO(dev)->gen < 6) 2656 return -ENODEV; 2657 2658 intel_runtime_pm_get(dev_priv); 2659 2660 rdmsrl(MSR_RAPL_POWER_UNIT, power); 2661 power = (power & 0x1f00) >> 8; 2662 units = 1000000 / (1 << power); /* convert to uJ */ 2663 power = I915_READ(MCH_SECP_NRG_STTS); 2664 power *= units; 2665 2666 intel_runtime_pm_put(dev_priv); 2667 2668 seq_printf(m, "%llu", (long long unsigned)power); 2669 2670 return 0; 2671 } 2672 2673 static int i915_runtime_pm_status(struct seq_file *m, void *unused) 2674 { 2675 struct drm_info_node *node = m->private; 2676 struct drm_device *dev = node->minor->dev; 2677 struct drm_i915_private *dev_priv = dev->dev_private; 2678 2679 if (!HAS_RUNTIME_PM(dev)) { 2680 seq_puts(m, "not supported\n"); 2681 return 0; 2682 } 2683 2684 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); 2685 seq_printf(m, "IRQs disabled: %s\n", 2686 yesno(!intel_irqs_enabled(dev_priv))); 2687 #ifdef CONFIG_PM 2688 seq_printf(m, "Usage count: %d\n", 2689 atomic_read(&dev->dev->power.usage_count)); 2690 #else 2691 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 2692 #endif 2693 2694 return 0; 2695 } 2696 2697 static int i915_power_domain_info(struct seq_file *m, void *unused) 2698 { 2699 struct drm_info_node *node = m->private; 2700 struct drm_device *dev = node->minor->dev; 2701 struct drm_i915_private *dev_priv = dev->dev_private; 2702 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2703 int i; 2704 2705 mutex_lock(&power_domains->lock); 2706 2707 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2708 for (i = 0; i < power_domains->power_well_count; i++) { 2709 struct i915_power_well *power_well; 2710 enum intel_display_power_domain power_domain; 2711 2712 power_well = &power_domains->power_wells[i]; 2713 seq_printf(m, "%-25s %d\n", power_well->name, 2714 power_well->count); 2715 2716 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM; 2717 power_domain++) { 2718 if (!(BIT(power_domain) & power_well->domains)) 2719 continue; 2720 2721 seq_printf(m, " %-23s %d\n", 2722 intel_display_power_domain_str(power_domain), 2723 power_domains->domain_use_count[power_domain]); 2724 } 2725 } 2726 2727 mutex_unlock(&power_domains->lock); 2728 2729 return 0; 2730 } 2731 2732 static int i915_dmc_info(struct seq_file *m, void *unused) 2733 { 2734 struct drm_info_node *node = m->private; 2735 struct drm_device *dev = node->minor->dev; 2736 struct drm_i915_private *dev_priv = dev->dev_private; 2737 struct intel_csr *csr; 2738 2739 if (!HAS_CSR(dev)) { 2740 seq_puts(m, "not supported\n"); 2741 return 0; 2742 } 2743 2744 csr = &dev_priv->csr; 2745 2746 intel_runtime_pm_get(dev_priv); 2747 2748 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); 2749 seq_printf(m, "path: %s\n", csr->fw_path); 2750 2751 if (!csr->dmc_payload) 2752 goto out; 2753 2754 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), 2755 CSR_VERSION_MINOR(csr->version)); 2756 2757 if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) { 2758 seq_printf(m, "DC3 -> DC5 count: %d\n", 2759 I915_READ(SKL_CSR_DC3_DC5_COUNT)); 2760 seq_printf(m, "DC5 -> DC6 count: %d\n", 2761 I915_READ(SKL_CSR_DC5_DC6_COUNT)); 2762 } else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) { 2763 seq_printf(m, "DC3 -> DC5 count: %d\n", 2764 I915_READ(BXT_CSR_DC3_DC5_COUNT)); 2765 } 2766 2767 out: 2768 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); 2769 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); 2770 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); 2771 2772 intel_runtime_pm_put(dev_priv); 2773 2774 return 0; 2775 } 2776 2777 static void intel_seq_print_mode(struct seq_file *m, int tabs, 2778 struct drm_display_mode *mode) 2779 { 2780 int i; 2781 2782 for (i = 0; i < tabs; i++) 2783 seq_putc(m, '\t'); 2784 2785 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2786 mode->base.id, mode->name, 2787 mode->vrefresh, mode->clock, 2788 mode->hdisplay, mode->hsync_start, 2789 mode->hsync_end, mode->htotal, 2790 mode->vdisplay, mode->vsync_start, 2791 mode->vsync_end, mode->vtotal, 2792 mode->type, mode->flags); 2793 } 2794 2795 static void intel_encoder_info(struct seq_file *m, 2796 struct intel_crtc *intel_crtc, 2797 struct intel_encoder *intel_encoder) 2798 { 2799 struct drm_info_node *node = m->private; 2800 struct drm_device *dev = node->minor->dev; 2801 struct drm_crtc *crtc = &intel_crtc->base; 2802 struct intel_connector *intel_connector; 2803 struct drm_encoder *encoder; 2804 2805 encoder = &intel_encoder->base; 2806 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2807 encoder->base.id, encoder->name); 2808 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2809 struct drm_connector *connector = &intel_connector->base; 2810 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2811 connector->base.id, 2812 connector->name, 2813 drm_get_connector_status_name(connector->status)); 2814 if (connector->status == connector_status_connected) { 2815 struct drm_display_mode *mode = &crtc->mode; 2816 seq_printf(m, ", mode:\n"); 2817 intel_seq_print_mode(m, 2, mode); 2818 } else { 2819 seq_putc(m, '\n'); 2820 } 2821 } 2822 } 2823 2824 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2825 { 2826 struct drm_info_node *node = m->private; 2827 struct drm_device *dev = node->minor->dev; 2828 struct drm_crtc *crtc = &intel_crtc->base; 2829 struct intel_encoder *intel_encoder; 2830 struct drm_plane_state *plane_state = crtc->primary->state; 2831 struct drm_framebuffer *fb = plane_state->fb; 2832 2833 if (fb) 2834 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2835 fb->base.id, plane_state->src_x >> 16, 2836 plane_state->src_y >> 16, fb->width, fb->height); 2837 else 2838 seq_puts(m, "\tprimary plane disabled\n"); 2839 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2840 intel_encoder_info(m, intel_crtc, intel_encoder); 2841 } 2842 2843 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 2844 { 2845 struct drm_display_mode *mode = panel->fixed_mode; 2846 2847 seq_printf(m, "\tfixed mode:\n"); 2848 intel_seq_print_mode(m, 2, mode); 2849 } 2850 2851 static void intel_dp_info(struct seq_file *m, 2852 struct intel_connector *intel_connector) 2853 { 2854 struct intel_encoder *intel_encoder = intel_connector->encoder; 2855 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2856 2857 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2858 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); 2859 if (intel_encoder->type == INTEL_OUTPUT_EDP) 2860 intel_panel_info(m, &intel_connector->panel); 2861 } 2862 2863 static void intel_dp_mst_info(struct seq_file *m, 2864 struct intel_connector *intel_connector) 2865 { 2866 struct intel_encoder *intel_encoder = intel_connector->encoder; 2867 struct intel_dp_mst_encoder *intel_mst = 2868 enc_to_mst(&intel_encoder->base); 2869 struct intel_digital_port *intel_dig_port = intel_mst->primary; 2870 struct intel_dp *intel_dp = &intel_dig_port->dp; 2871 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, 2872 intel_connector->port); 2873 2874 seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); 2875 } 2876 2877 static void intel_hdmi_info(struct seq_file *m, 2878 struct intel_connector *intel_connector) 2879 { 2880 struct intel_encoder *intel_encoder = intel_connector->encoder; 2881 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 2882 2883 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); 2884 } 2885 2886 static void intel_lvds_info(struct seq_file *m, 2887 struct intel_connector *intel_connector) 2888 { 2889 intel_panel_info(m, &intel_connector->panel); 2890 } 2891 2892 static void intel_connector_info(struct seq_file *m, 2893 struct drm_connector *connector) 2894 { 2895 struct intel_connector *intel_connector = to_intel_connector(connector); 2896 struct intel_encoder *intel_encoder = intel_connector->encoder; 2897 struct drm_display_mode *mode; 2898 2899 seq_printf(m, "connector %d: type %s, status: %s\n", 2900 connector->base.id, connector->name, 2901 drm_get_connector_status_name(connector->status)); 2902 if (connector->status == connector_status_connected) { 2903 seq_printf(m, "\tname: %s\n", connector->display_info.name); 2904 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 2905 connector->display_info.width_mm, 2906 connector->display_info.height_mm); 2907 seq_printf(m, "\tsubpixel order: %s\n", 2908 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 2909 seq_printf(m, "\tCEA rev: %d\n", 2910 connector->display_info.cea_rev); 2911 } 2912 if (intel_encoder) { 2913 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2914 intel_encoder->type == INTEL_OUTPUT_EDP) 2915 intel_dp_info(m, intel_connector); 2916 else if (intel_encoder->type == INTEL_OUTPUT_HDMI) 2917 intel_hdmi_info(m, intel_connector); 2918 else if (intel_encoder->type == INTEL_OUTPUT_LVDS) 2919 intel_lvds_info(m, intel_connector); 2920 else if (intel_encoder->type == INTEL_OUTPUT_DP_MST) 2921 intel_dp_mst_info(m, intel_connector); 2922 } 2923 2924 seq_printf(m, "\tmodes:\n"); 2925 list_for_each_entry(mode, &connector->modes, head) 2926 intel_seq_print_mode(m, 2, mode); 2927 } 2928 2929 static bool cursor_active(struct drm_device *dev, int pipe) 2930 { 2931 struct drm_i915_private *dev_priv = dev->dev_private; 2932 u32 state; 2933 2934 if (IS_845G(dev) || IS_I865G(dev)) 2935 state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 2936 else 2937 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 2938 2939 return state; 2940 } 2941 2942 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) 2943 { 2944 struct drm_i915_private *dev_priv = dev->dev_private; 2945 u32 pos; 2946 2947 pos = I915_READ(CURPOS(pipe)); 2948 2949 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK; 2950 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT)) 2951 *x = -*x; 2952 2953 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK; 2954 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT)) 2955 *y = -*y; 2956 2957 return cursor_active(dev, pipe); 2958 } 2959 2960 static const char *plane_type(enum drm_plane_type type) 2961 { 2962 switch (type) { 2963 case DRM_PLANE_TYPE_OVERLAY: 2964 return "OVL"; 2965 case DRM_PLANE_TYPE_PRIMARY: 2966 return "PRI"; 2967 case DRM_PLANE_TYPE_CURSOR: 2968 return "CUR"; 2969 /* 2970 * Deliberately omitting default: to generate compiler warnings 2971 * when a new drm_plane_type gets added. 2972 */ 2973 } 2974 2975 return "unknown"; 2976 } 2977 2978 static const char *plane_rotation(unsigned int rotation) 2979 { 2980 static char buf[48]; 2981 /* 2982 * According to doc only one DRM_ROTATE_ is allowed but this 2983 * will print them all to visualize if the values are misused 2984 */ 2985 snprintf(buf, sizeof(buf), 2986 "%s%s%s%s%s%s(0x%08x)", 2987 (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "", 2988 (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "", 2989 (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "", 2990 (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "", 2991 (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "", 2992 (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "", 2993 rotation); 2994 2995 return buf; 2996 } 2997 2998 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2999 { 3000 struct drm_info_node *node = m->private; 3001 struct drm_device *dev = node->minor->dev; 3002 struct intel_plane *intel_plane; 3003 3004 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3005 struct drm_plane_state *state; 3006 struct drm_plane *plane = &intel_plane->base; 3007 3008 if (!plane->state) { 3009 seq_puts(m, "plane->state is NULL!\n"); 3010 continue; 3011 } 3012 3013 state = plane->state; 3014 3015 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n", 3016 plane->base.id, 3017 plane_type(intel_plane->base.type), 3018 state->crtc_x, state->crtc_y, 3019 state->crtc_w, state->crtc_h, 3020 (state->src_x >> 16), 3021 ((state->src_x & 0xffff) * 15625) >> 10, 3022 (state->src_y >> 16), 3023 ((state->src_y & 0xffff) * 15625) >> 10, 3024 (state->src_w >> 16), 3025 ((state->src_w & 0xffff) * 15625) >> 10, 3026 (state->src_h >> 16), 3027 ((state->src_h & 0xffff) * 15625) >> 10, 3028 state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A", 3029 plane_rotation(state->rotation)); 3030 } 3031 } 3032 3033 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3034 { 3035 struct intel_crtc_state *pipe_config; 3036 int num_scalers = intel_crtc->num_scalers; 3037 int i; 3038 3039 pipe_config = to_intel_crtc_state(intel_crtc->base.state); 3040 3041 /* Not all platformas have a scaler */ 3042 if (num_scalers) { 3043 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", 3044 num_scalers, 3045 pipe_config->scaler_state.scaler_users, 3046 pipe_config->scaler_state.scaler_id); 3047 3048 for (i = 0; i < SKL_NUM_SCALERS; i++) { 3049 struct intel_scaler *sc = 3050 &pipe_config->scaler_state.scalers[i]; 3051 3052 seq_printf(m, ", scalers[%d]: use=%s, mode=%x", 3053 i, yesno(sc->in_use), sc->mode); 3054 } 3055 seq_puts(m, "\n"); 3056 } else { 3057 seq_puts(m, "\tNo scalers available on this platform\n"); 3058 } 3059 } 3060 3061 static int i915_display_info(struct seq_file *m, void *unused) 3062 { 3063 struct drm_info_node *node = m->private; 3064 struct drm_device *dev = node->minor->dev; 3065 struct drm_i915_private *dev_priv = dev->dev_private; 3066 struct intel_crtc *crtc; 3067 struct drm_connector *connector; 3068 3069 intel_runtime_pm_get(dev_priv); 3070 drm_modeset_lock_all(dev); 3071 seq_printf(m, "CRTC info\n"); 3072 seq_printf(m, "---------\n"); 3073 for_each_intel_crtc(dev, crtc) { 3074 bool active; 3075 struct intel_crtc_state *pipe_config; 3076 int x, y; 3077 3078 pipe_config = to_intel_crtc_state(crtc->base.state); 3079 3080 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n", 3081 crtc->base.base.id, pipe_name(crtc->pipe), 3082 yesno(pipe_config->base.active), 3083 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 3084 yesno(pipe_config->dither), pipe_config->pipe_bpp); 3085 3086 if (pipe_config->base.active) { 3087 intel_crtc_info(m, crtc); 3088 3089 active = cursor_position(dev, crtc->pipe, &x, &y); 3090 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n", 3091 yesno(crtc->cursor_base), 3092 x, y, crtc->base.cursor->state->crtc_w, 3093 crtc->base.cursor->state->crtc_h, 3094 crtc->cursor_addr, yesno(active)); 3095 intel_scaler_info(m, crtc); 3096 intel_plane_info(m, crtc); 3097 } 3098 3099 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 3100 yesno(!crtc->cpu_fifo_underrun_disabled), 3101 yesno(!crtc->pch_fifo_underrun_disabled)); 3102 } 3103 3104 seq_printf(m, "\n"); 3105 seq_printf(m, "Connector info\n"); 3106 seq_printf(m, "--------------\n"); 3107 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 3108 intel_connector_info(m, connector); 3109 } 3110 drm_modeset_unlock_all(dev); 3111 intel_runtime_pm_put(dev_priv); 3112 3113 return 0; 3114 } 3115 3116 static int i915_semaphore_status(struct seq_file *m, void *unused) 3117 { 3118 struct drm_info_node *node = (struct drm_info_node *) m->private; 3119 struct drm_device *dev = node->minor->dev; 3120 struct drm_i915_private *dev_priv = dev->dev_private; 3121 struct intel_engine_cs *ring; 3122 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 3123 int i, j, ret; 3124 3125 if (!i915_semaphore_is_enabled(dev)) { 3126 seq_puts(m, "Semaphores are disabled\n"); 3127 return 0; 3128 } 3129 3130 ret = mutex_lock_interruptible(&dev->struct_mutex); 3131 if (ret) 3132 return ret; 3133 intel_runtime_pm_get(dev_priv); 3134 3135 if (IS_BROADWELL(dev)) { 3136 struct page *page; 3137 uint64_t *seqno; 3138 3139 page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); 3140 3141 seqno = (uint64_t *)kmap_atomic(page); 3142 for_each_ring(ring, dev_priv, i) { 3143 uint64_t offset; 3144 3145 seq_printf(m, "%s\n", ring->name); 3146 3147 seq_puts(m, " Last signal:"); 3148 for (j = 0; j < num_rings; j++) { 3149 offset = i * I915_NUM_RINGS + j; 3150 seq_printf(m, "0x%08llx (0x%02llx) ", 3151 seqno[offset], offset * 8); 3152 } 3153 seq_putc(m, '\n'); 3154 3155 seq_puts(m, " Last wait: "); 3156 for (j = 0; j < num_rings; j++) { 3157 offset = i + (j * I915_NUM_RINGS); 3158 seq_printf(m, "0x%08llx (0x%02llx) ", 3159 seqno[offset], offset * 8); 3160 } 3161 seq_putc(m, '\n'); 3162 3163 } 3164 kunmap_atomic(seqno); 3165 } else { 3166 seq_puts(m, " Last signal:"); 3167 for_each_ring(ring, dev_priv, i) 3168 for (j = 0; j < num_rings; j++) 3169 seq_printf(m, "0x%08x\n", 3170 I915_READ(ring->semaphore.mbox.signal[j])); 3171 seq_putc(m, '\n'); 3172 } 3173 3174 seq_puts(m, "\nSync seqno:\n"); 3175 for_each_ring(ring, dev_priv, i) { 3176 for (j = 0; j < num_rings; j++) { 3177 seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]); 3178 } 3179 seq_putc(m, '\n'); 3180 } 3181 seq_putc(m, '\n'); 3182 3183 intel_runtime_pm_put(dev_priv); 3184 mutex_unlock(&dev->struct_mutex); 3185 return 0; 3186 } 3187 3188 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 3189 { 3190 struct drm_info_node *node = (struct drm_info_node *) m->private; 3191 struct drm_device *dev = node->minor->dev; 3192 struct drm_i915_private *dev_priv = dev->dev_private; 3193 int i; 3194 3195 drm_modeset_lock_all(dev); 3196 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3197 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 3198 3199 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); 3200 seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n", 3201 pll->config.crtc_mask, pll->active, yesno(pll->on)); 3202 seq_printf(m, " tracked hardware state:\n"); 3203 seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll); 3204 seq_printf(m, " dpll_md: 0x%08x\n", 3205 pll->config.hw_state.dpll_md); 3206 seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0); 3207 seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1); 3208 seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll); 3209 } 3210 drm_modeset_unlock_all(dev); 3211 3212 return 0; 3213 } 3214 3215 static int i915_wa_registers(struct seq_file *m, void *unused) 3216 { 3217 int i; 3218 int ret; 3219 struct drm_info_node *node = (struct drm_info_node *) m->private; 3220 struct drm_device *dev = node->minor->dev; 3221 struct drm_i915_private *dev_priv = dev->dev_private; 3222 3223 ret = mutex_lock_interruptible(&dev->struct_mutex); 3224 if (ret) 3225 return ret; 3226 3227 intel_runtime_pm_get(dev_priv); 3228 3229 seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count); 3230 for (i = 0; i < dev_priv->workarounds.count; ++i) { 3231 i915_reg_t addr; 3232 u32 mask, value, read; 3233 bool ok; 3234 3235 addr = dev_priv->workarounds.reg[i].addr; 3236 mask = dev_priv->workarounds.reg[i].mask; 3237 value = dev_priv->workarounds.reg[i].value; 3238 read = I915_READ(addr); 3239 ok = (value & mask) == (read & mask); 3240 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", 3241 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL"); 3242 } 3243 3244 intel_runtime_pm_put(dev_priv); 3245 mutex_unlock(&dev->struct_mutex); 3246 3247 return 0; 3248 } 3249 3250 static int i915_ddb_info(struct seq_file *m, void *unused) 3251 { 3252 struct drm_info_node *node = m->private; 3253 struct drm_device *dev = node->minor->dev; 3254 struct drm_i915_private *dev_priv = dev->dev_private; 3255 struct skl_ddb_allocation *ddb; 3256 struct skl_ddb_entry *entry; 3257 enum pipe pipe; 3258 int plane; 3259 3260 if (INTEL_INFO(dev)->gen < 9) 3261 return 0; 3262 3263 drm_modeset_lock_all(dev); 3264 3265 ddb = &dev_priv->wm.skl_hw.ddb; 3266 3267 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 3268 3269 for_each_pipe(dev_priv, pipe) { 3270 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 3271 3272 for_each_plane(dev_priv, pipe, plane) { 3273 entry = &ddb->plane[pipe][plane]; 3274 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, 3275 entry->start, entry->end, 3276 skl_ddb_entry_size(entry)); 3277 } 3278 3279 entry = &ddb->plane[pipe][PLANE_CURSOR]; 3280 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 3281 entry->end, skl_ddb_entry_size(entry)); 3282 } 3283 3284 drm_modeset_unlock_all(dev); 3285 3286 return 0; 3287 } 3288 3289 static void drrs_status_per_crtc(struct seq_file *m, 3290 struct drm_device *dev, struct intel_crtc *intel_crtc) 3291 { 3292 struct intel_encoder *intel_encoder; 3293 struct drm_i915_private *dev_priv = dev->dev_private; 3294 struct i915_drrs *drrs = &dev_priv->drrs; 3295 int vrefresh = 0; 3296 3297 for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) { 3298 /* Encoder connected on this CRTC */ 3299 switch (intel_encoder->type) { 3300 case INTEL_OUTPUT_EDP: 3301 seq_puts(m, "eDP:\n"); 3302 break; 3303 case INTEL_OUTPUT_DSI: 3304 seq_puts(m, "DSI:\n"); 3305 break; 3306 case INTEL_OUTPUT_HDMI: 3307 seq_puts(m, "HDMI:\n"); 3308 break; 3309 case INTEL_OUTPUT_DISPLAYPORT: 3310 seq_puts(m, "DP:\n"); 3311 break; 3312 default: 3313 seq_printf(m, "Other encoder (id=%d).\n", 3314 intel_encoder->type); 3315 return; 3316 } 3317 } 3318 3319 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) 3320 seq_puts(m, "\tVBT: DRRS_type: Static"); 3321 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT) 3322 seq_puts(m, "\tVBT: DRRS_type: Seamless"); 3323 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED) 3324 seq_puts(m, "\tVBT: DRRS_type: None"); 3325 else 3326 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value"); 3327 3328 seq_puts(m, "\n\n"); 3329 3330 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { 3331 struct intel_panel *panel; 3332 3333 mutex_lock(&drrs->mutex); 3334 /* DRRS Supported */ 3335 seq_puts(m, "\tDRRS Supported: Yes\n"); 3336 3337 /* disable_drrs() will make drrs->dp NULL */ 3338 if (!drrs->dp) { 3339 seq_puts(m, "Idleness DRRS: Disabled"); 3340 mutex_unlock(&drrs->mutex); 3341 return; 3342 } 3343 3344 panel = &drrs->dp->attached_connector->panel; 3345 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", 3346 drrs->busy_frontbuffer_bits); 3347 3348 seq_puts(m, "\n\t\t"); 3349 if (drrs->refresh_rate_type == DRRS_HIGH_RR) { 3350 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); 3351 vrefresh = panel->fixed_mode->vrefresh; 3352 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { 3353 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); 3354 vrefresh = panel->downclock_mode->vrefresh; 3355 } else { 3356 seq_printf(m, "DRRS_State: Unknown(%d)\n", 3357 drrs->refresh_rate_type); 3358 mutex_unlock(&drrs->mutex); 3359 return; 3360 } 3361 seq_printf(m, "\t\tVrefresh: %d", vrefresh); 3362 3363 seq_puts(m, "\n\t\t"); 3364 mutex_unlock(&drrs->mutex); 3365 } else { 3366 /* DRRS not supported. Print the VBT parameter*/ 3367 seq_puts(m, "\tDRRS Supported : No"); 3368 } 3369 seq_puts(m, "\n"); 3370 } 3371 3372 static int i915_drrs_status(struct seq_file *m, void *unused) 3373 { 3374 struct drm_info_node *node = m->private; 3375 struct drm_device *dev = node->minor->dev; 3376 struct intel_crtc *intel_crtc; 3377 int active_crtc_cnt = 0; 3378 3379 for_each_intel_crtc(dev, intel_crtc) { 3380 drm_modeset_lock(&intel_crtc->base.mutex, NULL); 3381 3382 if (intel_crtc->base.state->active) { 3383 active_crtc_cnt++; 3384 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 3385 3386 drrs_status_per_crtc(m, dev, intel_crtc); 3387 } 3388 3389 drm_modeset_unlock(&intel_crtc->base.mutex); 3390 } 3391 3392 if (!active_crtc_cnt) 3393 seq_puts(m, "No active crtc found\n"); 3394 3395 return 0; 3396 } 3397 3398 struct pipe_crc_info { 3399 const char *name; 3400 struct drm_device *dev; 3401 enum pipe pipe; 3402 }; 3403 3404 static int i915_dp_mst_info(struct seq_file *m, void *unused) 3405 { 3406 struct drm_info_node *node = (struct drm_info_node *) m->private; 3407 struct drm_device *dev = node->minor->dev; 3408 struct drm_encoder *encoder; 3409 struct intel_encoder *intel_encoder; 3410 struct intel_digital_port *intel_dig_port; 3411 drm_modeset_lock_all(dev); 3412 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3413 intel_encoder = to_intel_encoder(encoder); 3414 if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT) 3415 continue; 3416 intel_dig_port = enc_to_dig_port(encoder); 3417 if (!intel_dig_port->dp.can_mst) 3418 continue; 3419 3420 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 3421 } 3422 drm_modeset_unlock_all(dev); 3423 return 0; 3424 } 3425 3426 static int i915_pipe_crc_open(struct inode *inode, struct file *filep) 3427 { 3428 struct pipe_crc_info *info = inode->i_private; 3429 struct drm_i915_private *dev_priv = info->dev->dev_private; 3430 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3431 3432 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes) 3433 return -ENODEV; 3434 3435 spin_lock_irq(&pipe_crc->lock); 3436 3437 if (pipe_crc->opened) { 3438 spin_unlock_irq(&pipe_crc->lock); 3439 return -EBUSY; /* already open */ 3440 } 3441 3442 pipe_crc->opened = true; 3443 filep->private_data = inode->i_private; 3444 3445 spin_unlock_irq(&pipe_crc->lock); 3446 3447 return 0; 3448 } 3449 3450 static int i915_pipe_crc_release(struct inode *inode, struct file *filep) 3451 { 3452 struct pipe_crc_info *info = inode->i_private; 3453 struct drm_i915_private *dev_priv = info->dev->dev_private; 3454 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3455 3456 spin_lock_irq(&pipe_crc->lock); 3457 pipe_crc->opened = false; 3458 spin_unlock_irq(&pipe_crc->lock); 3459 3460 return 0; 3461 } 3462 3463 /* (6 fields, 8 chars each, space separated (5) + '\n') */ 3464 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) 3465 /* account for \'0' */ 3466 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) 3467 3468 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) 3469 { 3470 assert_spin_locked(&pipe_crc->lock); 3471 return CIRC_CNT(pipe_crc->head, pipe_crc->tail, 3472 INTEL_PIPE_CRC_ENTRIES_NR); 3473 } 3474 3475 static ssize_t 3476 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, 3477 loff_t *pos) 3478 { 3479 struct pipe_crc_info *info = filep->private_data; 3480 struct drm_device *dev = info->dev; 3481 struct drm_i915_private *dev_priv = dev->dev_private; 3482 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3483 char buf[PIPE_CRC_BUFFER_LEN]; 3484 int n_entries; 3485 ssize_t bytes_read; 3486 3487 /* 3488 * Don't allow user space to provide buffers not big enough to hold 3489 * a line of data. 3490 */ 3491 if (count < PIPE_CRC_LINE_LEN) 3492 return -EINVAL; 3493 3494 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) 3495 return 0; 3496 3497 /* nothing to read */ 3498 spin_lock_irq(&pipe_crc->lock); 3499 while (pipe_crc_data_count(pipe_crc) == 0) { 3500 int ret; 3501 3502 if (filep->f_flags & O_NONBLOCK) { 3503 spin_unlock_irq(&pipe_crc->lock); 3504 return -EAGAIN; 3505 } 3506 3507 ret = wait_event_interruptible_lock_irq(pipe_crc->wq, 3508 pipe_crc_data_count(pipe_crc), pipe_crc->lock); 3509 if (ret) { 3510 spin_unlock_irq(&pipe_crc->lock); 3511 return ret; 3512 } 3513 } 3514 3515 /* We now have one or more entries to read */ 3516 n_entries = count / PIPE_CRC_LINE_LEN; 3517 3518 bytes_read = 0; 3519 while (n_entries > 0) { 3520 struct intel_pipe_crc_entry *entry = 3521 &pipe_crc->entries[pipe_crc->tail]; 3522 int ret; 3523 3524 if (CIRC_CNT(pipe_crc->head, pipe_crc->tail, 3525 INTEL_PIPE_CRC_ENTRIES_NR) < 1) 3526 break; 3527 3528 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); 3529 pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 3530 3531 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, 3532 "%8u %8x %8x %8x %8x %8x\n", 3533 entry->frame, entry->crc[0], 3534 entry->crc[1], entry->crc[2], 3535 entry->crc[3], entry->crc[4]); 3536 3537 spin_unlock_irq(&pipe_crc->lock); 3538 3539 ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN); 3540 if (ret == PIPE_CRC_LINE_LEN) 3541 return -EFAULT; 3542 3543 user_buf += PIPE_CRC_LINE_LEN; 3544 n_entries--; 3545 3546 spin_lock_irq(&pipe_crc->lock); 3547 } 3548 3549 spin_unlock_irq(&pipe_crc->lock); 3550 3551 return bytes_read; 3552 } 3553 3554 static const struct file_operations i915_pipe_crc_fops = { 3555 .owner = THIS_MODULE, 3556 .open = i915_pipe_crc_open, 3557 .read = i915_pipe_crc_read, 3558 .release = i915_pipe_crc_release, 3559 }; 3560 3561 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { 3562 { 3563 .name = "i915_pipe_A_crc", 3564 .pipe = PIPE_A, 3565 }, 3566 { 3567 .name = "i915_pipe_B_crc", 3568 .pipe = PIPE_B, 3569 }, 3570 { 3571 .name = "i915_pipe_C_crc", 3572 .pipe = PIPE_C, 3573 }, 3574 }; 3575 3576 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, 3577 enum pipe pipe) 3578 { 3579 struct drm_device *dev = minor->dev; 3580 struct dentry *ent; 3581 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; 3582 3583 info->dev = dev; 3584 ent = debugfs_create_file(info->name, S_IRUGO, root, info, 3585 &i915_pipe_crc_fops); 3586 if (!ent) 3587 return -ENOMEM; 3588 3589 return drm_add_fake_info_node(minor, ent, info); 3590 } 3591 3592 static const char * const pipe_crc_sources[] = { 3593 "none", 3594 "plane1", 3595 "plane2", 3596 "pf", 3597 "pipe", 3598 "TV", 3599 "DP-B", 3600 "DP-C", 3601 "DP-D", 3602 "auto", 3603 }; 3604 3605 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) 3606 { 3607 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); 3608 return pipe_crc_sources[source]; 3609 } 3610 3611 static int display_crc_ctl_show(struct seq_file *m, void *data) 3612 { 3613 struct drm_device *dev = m->private; 3614 struct drm_i915_private *dev_priv = dev->dev_private; 3615 int i; 3616 3617 for (i = 0; i < I915_MAX_PIPES; i++) 3618 seq_printf(m, "%c %s\n", pipe_name(i), 3619 pipe_crc_source_name(dev_priv->pipe_crc[i].source)); 3620 3621 return 0; 3622 } 3623 3624 static int display_crc_ctl_open(struct inode *inode, struct file *file) 3625 { 3626 struct drm_device *dev = inode->i_private; 3627 3628 return single_open(file, display_crc_ctl_show, dev); 3629 } 3630 3631 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 3632 uint32_t *val) 3633 { 3634 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3635 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3636 3637 switch (*source) { 3638 case INTEL_PIPE_CRC_SOURCE_PIPE: 3639 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; 3640 break; 3641 case INTEL_PIPE_CRC_SOURCE_NONE: 3642 *val = 0; 3643 break; 3644 default: 3645 return -EINVAL; 3646 } 3647 3648 return 0; 3649 } 3650 3651 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe, 3652 enum intel_pipe_crc_source *source) 3653 { 3654 struct intel_encoder *encoder; 3655 struct intel_crtc *crtc; 3656 struct intel_digital_port *dig_port; 3657 int ret = 0; 3658 3659 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3660 3661 drm_modeset_lock_all(dev); 3662 for_each_intel_encoder(dev, encoder) { 3663 if (!encoder->base.crtc) 3664 continue; 3665 3666 crtc = to_intel_crtc(encoder->base.crtc); 3667 3668 if (crtc->pipe != pipe) 3669 continue; 3670 3671 switch (encoder->type) { 3672 case INTEL_OUTPUT_TVOUT: 3673 *source = INTEL_PIPE_CRC_SOURCE_TV; 3674 break; 3675 case INTEL_OUTPUT_DISPLAYPORT: 3676 case INTEL_OUTPUT_EDP: 3677 dig_port = enc_to_dig_port(&encoder->base); 3678 switch (dig_port->port) { 3679 case PORT_B: 3680 *source = INTEL_PIPE_CRC_SOURCE_DP_B; 3681 break; 3682 case PORT_C: 3683 *source = INTEL_PIPE_CRC_SOURCE_DP_C; 3684 break; 3685 case PORT_D: 3686 *source = INTEL_PIPE_CRC_SOURCE_DP_D; 3687 break; 3688 default: 3689 WARN(1, "nonexisting DP port %c\n", 3690 port_name(dig_port->port)); 3691 break; 3692 } 3693 break; 3694 default: 3695 break; 3696 } 3697 } 3698 drm_modeset_unlock_all(dev); 3699 3700 return ret; 3701 } 3702 3703 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, 3704 enum pipe pipe, 3705 enum intel_pipe_crc_source *source, 3706 uint32_t *val) 3707 { 3708 struct drm_i915_private *dev_priv = dev->dev_private; 3709 bool need_stable_symbols = false; 3710 3711 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3712 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 3713 if (ret) 3714 return ret; 3715 } 3716 3717 switch (*source) { 3718 case INTEL_PIPE_CRC_SOURCE_PIPE: 3719 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; 3720 break; 3721 case INTEL_PIPE_CRC_SOURCE_DP_B: 3722 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; 3723 need_stable_symbols = true; 3724 break; 3725 case INTEL_PIPE_CRC_SOURCE_DP_C: 3726 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; 3727 need_stable_symbols = true; 3728 break; 3729 case INTEL_PIPE_CRC_SOURCE_DP_D: 3730 if (!IS_CHERRYVIEW(dev)) 3731 return -EINVAL; 3732 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV; 3733 need_stable_symbols = true; 3734 break; 3735 case INTEL_PIPE_CRC_SOURCE_NONE: 3736 *val = 0; 3737 break; 3738 default: 3739 return -EINVAL; 3740 } 3741 3742 /* 3743 * When the pipe CRC tap point is after the transcoders we need 3744 * to tweak symbol-level features to produce a deterministic series of 3745 * symbols for a given frame. We need to reset those features only once 3746 * a frame (instead of every nth symbol): 3747 * - DC-balance: used to ensure a better clock recovery from the data 3748 * link (SDVO) 3749 * - DisplayPort scrambling: used for EMI reduction 3750 */ 3751 if (need_stable_symbols) { 3752 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3753 3754 tmp |= DC_BALANCE_RESET_VLV; 3755 switch (pipe) { 3756 case PIPE_A: 3757 tmp |= PIPE_A_SCRAMBLE_RESET; 3758 break; 3759 case PIPE_B: 3760 tmp |= PIPE_B_SCRAMBLE_RESET; 3761 break; 3762 case PIPE_C: 3763 tmp |= PIPE_C_SCRAMBLE_RESET; 3764 break; 3765 default: 3766 return -EINVAL; 3767 } 3768 I915_WRITE(PORT_DFT2_G4X, tmp); 3769 } 3770 3771 return 0; 3772 } 3773 3774 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, 3775 enum pipe pipe, 3776 enum intel_pipe_crc_source *source, 3777 uint32_t *val) 3778 { 3779 struct drm_i915_private *dev_priv = dev->dev_private; 3780 bool need_stable_symbols = false; 3781 3782 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3783 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 3784 if (ret) 3785 return ret; 3786 } 3787 3788 switch (*source) { 3789 case INTEL_PIPE_CRC_SOURCE_PIPE: 3790 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; 3791 break; 3792 case INTEL_PIPE_CRC_SOURCE_TV: 3793 if (!SUPPORTS_TV(dev)) 3794 return -EINVAL; 3795 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; 3796 break; 3797 case INTEL_PIPE_CRC_SOURCE_DP_B: 3798 if (!IS_G4X(dev)) 3799 return -EINVAL; 3800 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; 3801 need_stable_symbols = true; 3802 break; 3803 case INTEL_PIPE_CRC_SOURCE_DP_C: 3804 if (!IS_G4X(dev)) 3805 return -EINVAL; 3806 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; 3807 need_stable_symbols = true; 3808 break; 3809 case INTEL_PIPE_CRC_SOURCE_DP_D: 3810 if (!IS_G4X(dev)) 3811 return -EINVAL; 3812 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; 3813 need_stable_symbols = true; 3814 break; 3815 case INTEL_PIPE_CRC_SOURCE_NONE: 3816 *val = 0; 3817 break; 3818 default: 3819 return -EINVAL; 3820 } 3821 3822 /* 3823 * When the pipe CRC tap point is after the transcoders we need 3824 * to tweak symbol-level features to produce a deterministic series of 3825 * symbols for a given frame. We need to reset those features only once 3826 * a frame (instead of every nth symbol): 3827 * - DC-balance: used to ensure a better clock recovery from the data 3828 * link (SDVO) 3829 * - DisplayPort scrambling: used for EMI reduction 3830 */ 3831 if (need_stable_symbols) { 3832 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3833 3834 WARN_ON(!IS_G4X(dev)); 3835 3836 I915_WRITE(PORT_DFT_I9XX, 3837 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); 3838 3839 if (pipe == PIPE_A) 3840 tmp |= PIPE_A_SCRAMBLE_RESET; 3841 else 3842 tmp |= PIPE_B_SCRAMBLE_RESET; 3843 3844 I915_WRITE(PORT_DFT2_G4X, tmp); 3845 } 3846 3847 return 0; 3848 } 3849 3850 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, 3851 enum pipe pipe) 3852 { 3853 struct drm_i915_private *dev_priv = dev->dev_private; 3854 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3855 3856 switch (pipe) { 3857 case PIPE_A: 3858 tmp &= ~PIPE_A_SCRAMBLE_RESET; 3859 break; 3860 case PIPE_B: 3861 tmp &= ~PIPE_B_SCRAMBLE_RESET; 3862 break; 3863 case PIPE_C: 3864 tmp &= ~PIPE_C_SCRAMBLE_RESET; 3865 break; 3866 default: 3867 return; 3868 } 3869 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) 3870 tmp &= ~DC_BALANCE_RESET_VLV; 3871 I915_WRITE(PORT_DFT2_G4X, tmp); 3872 3873 } 3874 3875 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, 3876 enum pipe pipe) 3877 { 3878 struct drm_i915_private *dev_priv = dev->dev_private; 3879 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3880 3881 if (pipe == PIPE_A) 3882 tmp &= ~PIPE_A_SCRAMBLE_RESET; 3883 else 3884 tmp &= ~PIPE_B_SCRAMBLE_RESET; 3885 I915_WRITE(PORT_DFT2_G4X, tmp); 3886 3887 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { 3888 I915_WRITE(PORT_DFT_I9XX, 3889 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); 3890 } 3891 } 3892 3893 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 3894 uint32_t *val) 3895 { 3896 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3897 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3898 3899 switch (*source) { 3900 case INTEL_PIPE_CRC_SOURCE_PLANE1: 3901 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; 3902 break; 3903 case INTEL_PIPE_CRC_SOURCE_PLANE2: 3904 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; 3905 break; 3906 case INTEL_PIPE_CRC_SOURCE_PIPE: 3907 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; 3908 break; 3909 case INTEL_PIPE_CRC_SOURCE_NONE: 3910 *val = 0; 3911 break; 3912 default: 3913 return -EINVAL; 3914 } 3915 3916 return 0; 3917 } 3918 3919 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable) 3920 { 3921 struct drm_i915_private *dev_priv = dev->dev_private; 3922 struct intel_crtc *crtc = 3923 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); 3924 struct intel_crtc_state *pipe_config; 3925 struct drm_atomic_state *state; 3926 int ret = 0; 3927 3928 drm_modeset_lock_all(dev); 3929 state = drm_atomic_state_alloc(dev); 3930 if (!state) { 3931 ret = -ENOMEM; 3932 goto out; 3933 } 3934 3935 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base); 3936 pipe_config = intel_atomic_get_crtc_state(state, crtc); 3937 if (IS_ERR(pipe_config)) { 3938 ret = PTR_ERR(pipe_config); 3939 goto out; 3940 } 3941 3942 pipe_config->pch_pfit.force_thru = enable; 3943 if (pipe_config->cpu_transcoder == TRANSCODER_EDP && 3944 pipe_config->pch_pfit.enabled != enable) 3945 pipe_config->base.connectors_changed = true; 3946 3947 ret = drm_atomic_commit(state); 3948 out: 3949 drm_modeset_unlock_all(dev); 3950 WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret); 3951 if (ret) 3952 drm_atomic_state_free(state); 3953 } 3954 3955 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev, 3956 enum pipe pipe, 3957 enum intel_pipe_crc_source *source, 3958 uint32_t *val) 3959 { 3960 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3961 *source = INTEL_PIPE_CRC_SOURCE_PF; 3962 3963 switch (*source) { 3964 case INTEL_PIPE_CRC_SOURCE_PLANE1: 3965 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; 3966 break; 3967 case INTEL_PIPE_CRC_SOURCE_PLANE2: 3968 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; 3969 break; 3970 case INTEL_PIPE_CRC_SOURCE_PF: 3971 if (IS_HASWELL(dev) && pipe == PIPE_A) 3972 hsw_trans_edp_pipe_A_crc_wa(dev, true); 3973 3974 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; 3975 break; 3976 case INTEL_PIPE_CRC_SOURCE_NONE: 3977 *val = 0; 3978 break; 3979 default: 3980 return -EINVAL; 3981 } 3982 3983 return 0; 3984 } 3985 3986 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, 3987 enum intel_pipe_crc_source source) 3988 { 3989 struct drm_i915_private *dev_priv = dev->dev_private; 3990 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 3991 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, 3992 pipe)); 3993 enum intel_display_power_domain power_domain; 3994 u32 val = 0; /* shut up gcc */ 3995 int ret; 3996 3997 if (pipe_crc->source == source) 3998 return 0; 3999 4000 /* forbid changing the source without going back to 'none' */ 4001 if (pipe_crc->source && source) 4002 return -EINVAL; 4003 4004 power_domain = POWER_DOMAIN_PIPE(pipe); 4005 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) { 4006 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); 4007 return -EIO; 4008 } 4009 4010 if (IS_GEN2(dev)) 4011 ret = i8xx_pipe_crc_ctl_reg(&source, &val); 4012 else if (INTEL_INFO(dev)->gen < 5) 4013 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); 4014 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4015 ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val); 4016 else if (IS_GEN5(dev) || IS_GEN6(dev)) 4017 ret = ilk_pipe_crc_ctl_reg(&source, &val); 4018 else 4019 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val); 4020 4021 if (ret != 0) 4022 goto out; 4023 4024 /* none -> real source transition */ 4025 if (source) { 4026 struct intel_pipe_crc_entry *entries; 4027 4028 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", 4029 pipe_name(pipe), pipe_crc_source_name(source)); 4030 4031 entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, 4032 sizeof(pipe_crc->entries[0]), 4033 GFP_KERNEL); 4034 if (!entries) { 4035 ret = -ENOMEM; 4036 goto out; 4037 } 4038 4039 /* 4040 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 4041 * enabled and disabled dynamically based on package C states, 4042 * user space can't make reliable use of the CRCs, so let's just 4043 * completely disable it. 4044 */ 4045 hsw_disable_ips(crtc); 4046 4047 spin_lock_irq(&pipe_crc->lock); 4048 kfree(pipe_crc->entries); 4049 pipe_crc->entries = entries; 4050 pipe_crc->head = 0; 4051 pipe_crc->tail = 0; 4052 spin_unlock_irq(&pipe_crc->lock); 4053 } 4054 4055 pipe_crc->source = source; 4056 4057 I915_WRITE(PIPE_CRC_CTL(pipe), val); 4058 POSTING_READ(PIPE_CRC_CTL(pipe)); 4059 4060 /* real source -> none transition */ 4061 if (source == INTEL_PIPE_CRC_SOURCE_NONE) { 4062 struct intel_pipe_crc_entry *entries; 4063 struct intel_crtc *crtc = 4064 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 4065 4066 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", 4067 pipe_name(pipe)); 4068 4069 drm_modeset_lock(&crtc->base.mutex, NULL); 4070 if (crtc->base.state->active) 4071 intel_wait_for_vblank(dev, pipe); 4072 drm_modeset_unlock(&crtc->base.mutex); 4073 4074 spin_lock_irq(&pipe_crc->lock); 4075 entries = pipe_crc->entries; 4076 pipe_crc->entries = NULL; 4077 pipe_crc->head = 0; 4078 pipe_crc->tail = 0; 4079 spin_unlock_irq(&pipe_crc->lock); 4080 4081 kfree(entries); 4082 4083 if (IS_G4X(dev)) 4084 g4x_undo_pipe_scramble_reset(dev, pipe); 4085 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4086 vlv_undo_pipe_scramble_reset(dev, pipe); 4087 else if (IS_HASWELL(dev) && pipe == PIPE_A) 4088 hsw_trans_edp_pipe_A_crc_wa(dev, false); 4089 4090 hsw_enable_ips(crtc); 4091 } 4092 4093 ret = 0; 4094 4095 out: 4096 intel_display_power_put(dev_priv, power_domain); 4097 4098 return ret; 4099 } 4100 4101 /* 4102 * Parse pipe CRC command strings: 4103 * command: wsp* object wsp+ name wsp+ source wsp* 4104 * object: 'pipe' 4105 * name: (A | B | C) 4106 * source: (none | plane1 | plane2 | pf) 4107 * wsp: (#0x20 | #0x9 | #0xA)+ 4108 * 4109 * eg.: 4110 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A 4111 * "pipe A none" -> Stop CRC 4112 */ 4113 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) 4114 { 4115 int n_words = 0; 4116 4117 while (*buf) { 4118 char *end; 4119 4120 /* skip leading white space */ 4121 buf = skip_spaces(buf); 4122 if (!*buf) 4123 break; /* end of buffer */ 4124 4125 /* find end of word */ 4126 for (end = buf; *end && !isspace(*end); end++) 4127 ; 4128 4129 if (n_words == max_words) { 4130 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", 4131 max_words); 4132 return -EINVAL; /* ran out of words[] before bytes */ 4133 } 4134 4135 if (*end) 4136 *end++ = '\0'; 4137 words[n_words++] = buf; 4138 buf = end; 4139 } 4140 4141 return n_words; 4142 } 4143 4144 enum intel_pipe_crc_object { 4145 PIPE_CRC_OBJECT_PIPE, 4146 }; 4147 4148 static const char * const pipe_crc_objects[] = { 4149 "pipe", 4150 }; 4151 4152 static int 4153 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) 4154 { 4155 int i; 4156 4157 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) 4158 if (!strcmp(buf, pipe_crc_objects[i])) { 4159 *o = i; 4160 return 0; 4161 } 4162 4163 return -EINVAL; 4164 } 4165 4166 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) 4167 { 4168 const char name = buf[0]; 4169 4170 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) 4171 return -EINVAL; 4172 4173 *pipe = name - 'A'; 4174 4175 return 0; 4176 } 4177 4178 static int 4179 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) 4180 { 4181 int i; 4182 4183 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) 4184 if (!strcmp(buf, pipe_crc_sources[i])) { 4185 *s = i; 4186 return 0; 4187 } 4188 4189 return -EINVAL; 4190 } 4191 4192 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len) 4193 { 4194 #define N_WORDS 3 4195 int n_words; 4196 char *words[N_WORDS]; 4197 enum pipe pipe; 4198 enum intel_pipe_crc_object object; 4199 enum intel_pipe_crc_source source; 4200 4201 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); 4202 if (n_words != N_WORDS) { 4203 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", 4204 N_WORDS); 4205 return -EINVAL; 4206 } 4207 4208 if (display_crc_ctl_parse_object(words[0], &object) < 0) { 4209 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); 4210 return -EINVAL; 4211 } 4212 4213 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { 4214 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); 4215 return -EINVAL; 4216 } 4217 4218 if (display_crc_ctl_parse_source(words[2], &source) < 0) { 4219 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); 4220 return -EINVAL; 4221 } 4222 4223 return pipe_crc_set_source(dev, pipe, source); 4224 } 4225 4226 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, 4227 size_t len, loff_t *offp) 4228 { 4229 struct seq_file *m = file->private_data; 4230 struct drm_device *dev = m->private; 4231 char *tmpbuf; 4232 int ret; 4233 4234 if (len == 0) 4235 return 0; 4236 4237 if (len > PAGE_SIZE - 1) { 4238 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", 4239 PAGE_SIZE); 4240 return -E2BIG; 4241 } 4242 4243 tmpbuf = kmalloc(len + 1, GFP_KERNEL); 4244 if (!tmpbuf) 4245 return -ENOMEM; 4246 4247 if (copy_from_user(tmpbuf, ubuf, len)) { 4248 ret = -EFAULT; 4249 goto out; 4250 } 4251 tmpbuf[len] = '\0'; 4252 4253 ret = display_crc_ctl_parse(dev, tmpbuf, len); 4254 4255 out: 4256 kfree(tmpbuf); 4257 if (ret < 0) 4258 return ret; 4259 4260 *offp += len; 4261 return len; 4262 } 4263 4264 static const struct file_operations i915_display_crc_ctl_fops = { 4265 .owner = THIS_MODULE, 4266 .open = display_crc_ctl_open, 4267 .read = seq_read, 4268 .llseek = seq_lseek, 4269 .release = single_release, 4270 .write = display_crc_ctl_write 4271 }; 4272 4273 static ssize_t i915_displayport_test_active_write(struct file *file, 4274 const char __user *ubuf, 4275 size_t len, loff_t *offp) 4276 { 4277 char *input_buffer; 4278 int status = 0; 4279 struct drm_device *dev; 4280 struct drm_connector *connector; 4281 struct list_head *connector_list; 4282 struct intel_dp *intel_dp; 4283 int val = 0; 4284 4285 dev = ((struct seq_file *)file->private_data)->private; 4286 4287 connector_list = &dev->mode_config.connector_list; 4288 4289 if (len == 0) 4290 return 0; 4291 4292 input_buffer = kmalloc(len + 1, GFP_KERNEL); 4293 if (!input_buffer) 4294 return -ENOMEM; 4295 4296 if (copy_from_user(input_buffer, ubuf, len)) { 4297 status = -EFAULT; 4298 goto out; 4299 } 4300 4301 input_buffer[len] = '\0'; 4302 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); 4303 4304 list_for_each_entry(connector, connector_list, head) { 4305 4306 if (connector->connector_type != 4307 DRM_MODE_CONNECTOR_DisplayPort) 4308 continue; 4309 4310 if (connector->status == connector_status_connected && 4311 connector->encoder != NULL) { 4312 intel_dp = enc_to_intel_dp(connector->encoder); 4313 status = kstrtoint(input_buffer, 10, &val); 4314 if (status < 0) 4315 goto out; 4316 DRM_DEBUG_DRIVER("Got %d for test active\n", val); 4317 /* To prevent erroneous activation of the compliance 4318 * testing code, only accept an actual value of 1 here 4319 */ 4320 if (val == 1) 4321 intel_dp->compliance_test_active = 1; 4322 else 4323 intel_dp->compliance_test_active = 0; 4324 } 4325 } 4326 out: 4327 kfree(input_buffer); 4328 if (status < 0) 4329 return status; 4330 4331 *offp += len; 4332 return len; 4333 } 4334 4335 static int i915_displayport_test_active_show(struct seq_file *m, void *data) 4336 { 4337 struct drm_device *dev = m->private; 4338 struct drm_connector *connector; 4339 struct list_head *connector_list = &dev->mode_config.connector_list; 4340 struct intel_dp *intel_dp; 4341 4342 list_for_each_entry(connector, connector_list, head) { 4343 4344 if (connector->connector_type != 4345 DRM_MODE_CONNECTOR_DisplayPort) 4346 continue; 4347 4348 if (connector->status == connector_status_connected && 4349 connector->encoder != NULL) { 4350 intel_dp = enc_to_intel_dp(connector->encoder); 4351 if (intel_dp->compliance_test_active) 4352 seq_puts(m, "1"); 4353 else 4354 seq_puts(m, "0"); 4355 } else 4356 seq_puts(m, "0"); 4357 } 4358 4359 return 0; 4360 } 4361 4362 static int i915_displayport_test_active_open(struct inode *inode, 4363 struct file *file) 4364 { 4365 struct drm_device *dev = inode->i_private; 4366 4367 return single_open(file, i915_displayport_test_active_show, dev); 4368 } 4369 4370 static const struct file_operations i915_displayport_test_active_fops = { 4371 .owner = THIS_MODULE, 4372 .open = i915_displayport_test_active_open, 4373 .read = seq_read, 4374 .llseek = seq_lseek, 4375 .release = single_release, 4376 .write = i915_displayport_test_active_write 4377 }; 4378 4379 static int i915_displayport_test_data_show(struct seq_file *m, void *data) 4380 { 4381 struct drm_device *dev = m->private; 4382 struct drm_connector *connector; 4383 struct list_head *connector_list = &dev->mode_config.connector_list; 4384 struct intel_dp *intel_dp; 4385 4386 list_for_each_entry(connector, connector_list, head) { 4387 4388 if (connector->connector_type != 4389 DRM_MODE_CONNECTOR_DisplayPort) 4390 continue; 4391 4392 if (connector->status == connector_status_connected && 4393 connector->encoder != NULL) { 4394 intel_dp = enc_to_intel_dp(connector->encoder); 4395 seq_printf(m, "%lx", intel_dp->compliance_test_data); 4396 } else 4397 seq_puts(m, "0"); 4398 } 4399 4400 return 0; 4401 } 4402 static int i915_displayport_test_data_open(struct inode *inode, 4403 struct file *file) 4404 { 4405 struct drm_device *dev = inode->i_private; 4406 4407 return single_open(file, i915_displayport_test_data_show, dev); 4408 } 4409 4410 static const struct file_operations i915_displayport_test_data_fops = { 4411 .owner = THIS_MODULE, 4412 .open = i915_displayport_test_data_open, 4413 .read = seq_read, 4414 .llseek = seq_lseek, 4415 .release = single_release 4416 }; 4417 4418 static int i915_displayport_test_type_show(struct seq_file *m, void *data) 4419 { 4420 struct drm_device *dev = m->private; 4421 struct drm_connector *connector; 4422 struct list_head *connector_list = &dev->mode_config.connector_list; 4423 struct intel_dp *intel_dp; 4424 4425 list_for_each_entry(connector, connector_list, head) { 4426 4427 if (connector->connector_type != 4428 DRM_MODE_CONNECTOR_DisplayPort) 4429 continue; 4430 4431 if (connector->status == connector_status_connected && 4432 connector->encoder != NULL) { 4433 intel_dp = enc_to_intel_dp(connector->encoder); 4434 seq_printf(m, "%02lx", intel_dp->compliance_test_type); 4435 } else 4436 seq_puts(m, "0"); 4437 } 4438 4439 return 0; 4440 } 4441 4442 static int i915_displayport_test_type_open(struct inode *inode, 4443 struct file *file) 4444 { 4445 struct drm_device *dev = inode->i_private; 4446 4447 return single_open(file, i915_displayport_test_type_show, dev); 4448 } 4449 4450 static const struct file_operations i915_displayport_test_type_fops = { 4451 .owner = THIS_MODULE, 4452 .open = i915_displayport_test_type_open, 4453 .read = seq_read, 4454 .llseek = seq_lseek, 4455 .release = single_release 4456 }; 4457 4458 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) 4459 { 4460 struct drm_device *dev = m->private; 4461 int level; 4462 int num_levels; 4463 4464 if (IS_CHERRYVIEW(dev)) 4465 num_levels = 3; 4466 else if (IS_VALLEYVIEW(dev)) 4467 num_levels = 1; 4468 else 4469 num_levels = ilk_wm_max_level(dev) + 1; 4470 4471 drm_modeset_lock_all(dev); 4472 4473 for (level = 0; level < num_levels; level++) { 4474 unsigned int latency = wm[level]; 4475 4476 /* 4477 * - WM1+ latency values in 0.5us units 4478 * - latencies are in us on gen9/vlv/chv 4479 */ 4480 if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) || 4481 IS_CHERRYVIEW(dev)) 4482 latency *= 10; 4483 else if (level > 0) 4484 latency *= 5; 4485 4486 seq_printf(m, "WM%d %u (%u.%u usec)\n", 4487 level, wm[level], latency / 10, latency % 10); 4488 } 4489 4490 drm_modeset_unlock_all(dev); 4491 } 4492 4493 static int pri_wm_latency_show(struct seq_file *m, void *data) 4494 { 4495 struct drm_device *dev = m->private; 4496 struct drm_i915_private *dev_priv = dev->dev_private; 4497 const uint16_t *latencies; 4498 4499 if (INTEL_INFO(dev)->gen >= 9) 4500 latencies = dev_priv->wm.skl_latency; 4501 else 4502 latencies = to_i915(dev)->wm.pri_latency; 4503 4504 wm_latency_show(m, latencies); 4505 4506 return 0; 4507 } 4508 4509 static int spr_wm_latency_show(struct seq_file *m, void *data) 4510 { 4511 struct drm_device *dev = m->private; 4512 struct drm_i915_private *dev_priv = dev->dev_private; 4513 const uint16_t *latencies; 4514 4515 if (INTEL_INFO(dev)->gen >= 9) 4516 latencies = dev_priv->wm.skl_latency; 4517 else 4518 latencies = to_i915(dev)->wm.spr_latency; 4519 4520 wm_latency_show(m, latencies); 4521 4522 return 0; 4523 } 4524 4525 static int cur_wm_latency_show(struct seq_file *m, void *data) 4526 { 4527 struct drm_device *dev = m->private; 4528 struct drm_i915_private *dev_priv = dev->dev_private; 4529 const uint16_t *latencies; 4530 4531 if (INTEL_INFO(dev)->gen >= 9) 4532 latencies = dev_priv->wm.skl_latency; 4533 else 4534 latencies = to_i915(dev)->wm.cur_latency; 4535 4536 wm_latency_show(m, latencies); 4537 4538 return 0; 4539 } 4540 4541 static int pri_wm_latency_open(struct inode *inode, struct file *file) 4542 { 4543 struct drm_device *dev = inode->i_private; 4544 4545 if (INTEL_INFO(dev)->gen < 5) 4546 return -ENODEV; 4547 4548 return single_open(file, pri_wm_latency_show, dev); 4549 } 4550 4551 static int spr_wm_latency_open(struct inode *inode, struct file *file) 4552 { 4553 struct drm_device *dev = inode->i_private; 4554 4555 if (HAS_GMCH_DISPLAY(dev)) 4556 return -ENODEV; 4557 4558 return single_open(file, spr_wm_latency_show, dev); 4559 } 4560 4561 static int cur_wm_latency_open(struct inode *inode, struct file *file) 4562 { 4563 struct drm_device *dev = inode->i_private; 4564 4565 if (HAS_GMCH_DISPLAY(dev)) 4566 return -ENODEV; 4567 4568 return single_open(file, cur_wm_latency_show, dev); 4569 } 4570 4571 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 4572 size_t len, loff_t *offp, uint16_t wm[8]) 4573 { 4574 struct seq_file *m = file->private_data; 4575 struct drm_device *dev = m->private; 4576 uint16_t new[8] = { 0 }; 4577 int num_levels; 4578 int level; 4579 int ret; 4580 char tmp[32]; 4581 4582 if (IS_CHERRYVIEW(dev)) 4583 num_levels = 3; 4584 else if (IS_VALLEYVIEW(dev)) 4585 num_levels = 1; 4586 else 4587 num_levels = ilk_wm_max_level(dev) + 1; 4588 4589 if (len >= sizeof(tmp)) 4590 return -EINVAL; 4591 4592 if (copy_from_user(tmp, ubuf, len)) 4593 return -EFAULT; 4594 4595 tmp[len] = '\0'; 4596 4597 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 4598 &new[0], &new[1], &new[2], &new[3], 4599 &new[4], &new[5], &new[6], &new[7]); 4600 if (ret != num_levels) 4601 return -EINVAL; 4602 4603 drm_modeset_lock_all(dev); 4604 4605 for (level = 0; level < num_levels; level++) 4606 wm[level] = new[level]; 4607 4608 drm_modeset_unlock_all(dev); 4609 4610 return len; 4611 } 4612 4613 4614 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 4615 size_t len, loff_t *offp) 4616 { 4617 struct seq_file *m = file->private_data; 4618 struct drm_device *dev = m->private; 4619 struct drm_i915_private *dev_priv = dev->dev_private; 4620 uint16_t *latencies; 4621 4622 if (INTEL_INFO(dev)->gen >= 9) 4623 latencies = dev_priv->wm.skl_latency; 4624 else 4625 latencies = to_i915(dev)->wm.pri_latency; 4626 4627 return wm_latency_write(file, ubuf, len, offp, latencies); 4628 } 4629 4630 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 4631 size_t len, loff_t *offp) 4632 { 4633 struct seq_file *m = file->private_data; 4634 struct drm_device *dev = m->private; 4635 struct drm_i915_private *dev_priv = dev->dev_private; 4636 uint16_t *latencies; 4637 4638 if (INTEL_INFO(dev)->gen >= 9) 4639 latencies = dev_priv->wm.skl_latency; 4640 else 4641 latencies = to_i915(dev)->wm.spr_latency; 4642 4643 return wm_latency_write(file, ubuf, len, offp, latencies); 4644 } 4645 4646 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 4647 size_t len, loff_t *offp) 4648 { 4649 struct seq_file *m = file->private_data; 4650 struct drm_device *dev = m->private; 4651 struct drm_i915_private *dev_priv = dev->dev_private; 4652 uint16_t *latencies; 4653 4654 if (INTEL_INFO(dev)->gen >= 9) 4655 latencies = dev_priv->wm.skl_latency; 4656 else 4657 latencies = to_i915(dev)->wm.cur_latency; 4658 4659 return wm_latency_write(file, ubuf, len, offp, latencies); 4660 } 4661 4662 static const struct file_operations i915_pri_wm_latency_fops = { 4663 .owner = THIS_MODULE, 4664 .open = pri_wm_latency_open, 4665 .read = seq_read, 4666 .llseek = seq_lseek, 4667 .release = single_release, 4668 .write = pri_wm_latency_write 4669 }; 4670 4671 static const struct file_operations i915_spr_wm_latency_fops = { 4672 .owner = THIS_MODULE, 4673 .open = spr_wm_latency_open, 4674 .read = seq_read, 4675 .llseek = seq_lseek, 4676 .release = single_release, 4677 .write = spr_wm_latency_write 4678 }; 4679 4680 static const struct file_operations i915_cur_wm_latency_fops = { 4681 .owner = THIS_MODULE, 4682 .open = cur_wm_latency_open, 4683 .read = seq_read, 4684 .llseek = seq_lseek, 4685 .release = single_release, 4686 .write = cur_wm_latency_write 4687 }; 4688 4689 static int 4690 i915_wedged_get(void *data, u64 *val) 4691 { 4692 struct drm_device *dev = data; 4693 struct drm_i915_private *dev_priv = dev->dev_private; 4694 4695 *val = atomic_read(&dev_priv->gpu_error.reset_counter); 4696 4697 return 0; 4698 } 4699 4700 static int 4701 i915_wedged_set(void *data, u64 val) 4702 { 4703 struct drm_device *dev = data; 4704 struct drm_i915_private *dev_priv = dev->dev_private; 4705 4706 /* 4707 * There is no safeguard against this debugfs entry colliding 4708 * with the hangcheck calling same i915_handle_error() in 4709 * parallel, causing an explosion. For now we assume that the 4710 * test harness is responsible enough not to inject gpu hangs 4711 * while it is writing to 'i915_wedged' 4712 */ 4713 4714 if (i915_reset_in_progress(&dev_priv->gpu_error)) 4715 return -EAGAIN; 4716 4717 intel_runtime_pm_get(dev_priv); 4718 4719 i915_handle_error(dev, val, 4720 "Manually setting wedged to %llu", val); 4721 4722 intel_runtime_pm_put(dev_priv); 4723 4724 return 0; 4725 } 4726 4727 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 4728 i915_wedged_get, i915_wedged_set, 4729 "%llu\n"); 4730 4731 static int 4732 i915_ring_stop_get(void *data, u64 *val) 4733 { 4734 struct drm_device *dev = data; 4735 struct drm_i915_private *dev_priv = dev->dev_private; 4736 4737 *val = dev_priv->gpu_error.stop_rings; 4738 4739 return 0; 4740 } 4741 4742 static int 4743 i915_ring_stop_set(void *data, u64 val) 4744 { 4745 struct drm_device *dev = data; 4746 struct drm_i915_private *dev_priv = dev->dev_private; 4747 int ret; 4748 4749 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); 4750 4751 ret = mutex_lock_interruptible(&dev->struct_mutex); 4752 if (ret) 4753 return ret; 4754 4755 dev_priv->gpu_error.stop_rings = val; 4756 mutex_unlock(&dev->struct_mutex); 4757 4758 return 0; 4759 } 4760 4761 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, 4762 i915_ring_stop_get, i915_ring_stop_set, 4763 "0x%08llx\n"); 4764 4765 static int 4766 i915_ring_missed_irq_get(void *data, u64 *val) 4767 { 4768 struct drm_device *dev = data; 4769 struct drm_i915_private *dev_priv = dev->dev_private; 4770 4771 *val = dev_priv->gpu_error.missed_irq_rings; 4772 return 0; 4773 } 4774 4775 static int 4776 i915_ring_missed_irq_set(void *data, u64 val) 4777 { 4778 struct drm_device *dev = data; 4779 struct drm_i915_private *dev_priv = dev->dev_private; 4780 int ret; 4781 4782 /* Lock against concurrent debugfs callers */ 4783 ret = mutex_lock_interruptible(&dev->struct_mutex); 4784 if (ret) 4785 return ret; 4786 dev_priv->gpu_error.missed_irq_rings = val; 4787 mutex_unlock(&dev->struct_mutex); 4788 4789 return 0; 4790 } 4791 4792 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 4793 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 4794 "0x%08llx\n"); 4795 4796 static int 4797 i915_ring_test_irq_get(void *data, u64 *val) 4798 { 4799 struct drm_device *dev = data; 4800 struct drm_i915_private *dev_priv = dev->dev_private; 4801 4802 *val = dev_priv->gpu_error.test_irq_rings; 4803 4804 return 0; 4805 } 4806 4807 static int 4808 i915_ring_test_irq_set(void *data, u64 val) 4809 { 4810 struct drm_device *dev = data; 4811 struct drm_i915_private *dev_priv = dev->dev_private; 4812 int ret; 4813 4814 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 4815 4816 /* Lock against concurrent debugfs callers */ 4817 ret = mutex_lock_interruptible(&dev->struct_mutex); 4818 if (ret) 4819 return ret; 4820 4821 dev_priv->gpu_error.test_irq_rings = val; 4822 mutex_unlock(&dev->struct_mutex); 4823 4824 return 0; 4825 } 4826 4827 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 4828 i915_ring_test_irq_get, i915_ring_test_irq_set, 4829 "0x%08llx\n"); 4830 4831 #define DROP_UNBOUND 0x1 4832 #define DROP_BOUND 0x2 4833 #define DROP_RETIRE 0x4 4834 #define DROP_ACTIVE 0x8 4835 #define DROP_ALL (DROP_UNBOUND | \ 4836 DROP_BOUND | \ 4837 DROP_RETIRE | \ 4838 DROP_ACTIVE) 4839 static int 4840 i915_drop_caches_get(void *data, u64 *val) 4841 { 4842 *val = DROP_ALL; 4843 4844 return 0; 4845 } 4846 4847 static int 4848 i915_drop_caches_set(void *data, u64 val) 4849 { 4850 struct drm_device *dev = data; 4851 struct drm_i915_private *dev_priv = dev->dev_private; 4852 int ret; 4853 4854 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 4855 4856 /* No need to check and wait for gpu resets, only libdrm auto-restarts 4857 * on ioctls on -EAGAIN. */ 4858 ret = mutex_lock_interruptible(&dev->struct_mutex); 4859 if (ret) 4860 return ret; 4861 4862 if (val & DROP_ACTIVE) { 4863 ret = i915_gpu_idle(dev); 4864 if (ret) 4865 goto unlock; 4866 } 4867 4868 if (val & (DROP_RETIRE | DROP_ACTIVE)) 4869 i915_gem_retire_requests(dev); 4870 4871 if (val & DROP_BOUND) 4872 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); 4873 4874 if (val & DROP_UNBOUND) 4875 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND); 4876 4877 unlock: 4878 mutex_unlock(&dev->struct_mutex); 4879 4880 return ret; 4881 } 4882 4883 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 4884 i915_drop_caches_get, i915_drop_caches_set, 4885 "0x%08llx\n"); 4886 4887 static int 4888 i915_max_freq_get(void *data, u64 *val) 4889 { 4890 struct drm_device *dev = data; 4891 struct drm_i915_private *dev_priv = dev->dev_private; 4892 int ret; 4893 4894 if (INTEL_INFO(dev)->gen < 6) 4895 return -ENODEV; 4896 4897 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4898 4899 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4900 if (ret) 4901 return ret; 4902 4903 *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); 4904 mutex_unlock(&dev_priv->rps.hw_lock); 4905 4906 return 0; 4907 } 4908 4909 static int 4910 i915_max_freq_set(void *data, u64 val) 4911 { 4912 struct drm_device *dev = data; 4913 struct drm_i915_private *dev_priv = dev->dev_private; 4914 u32 hw_max, hw_min; 4915 int ret; 4916 4917 if (INTEL_INFO(dev)->gen < 6) 4918 return -ENODEV; 4919 4920 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4921 4922 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 4923 4924 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4925 if (ret) 4926 return ret; 4927 4928 /* 4929 * Turbo will still be enabled, but won't go above the set value. 4930 */ 4931 val = intel_freq_opcode(dev_priv, val); 4932 4933 hw_max = dev_priv->rps.max_freq; 4934 hw_min = dev_priv->rps.min_freq; 4935 4936 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { 4937 mutex_unlock(&dev_priv->rps.hw_lock); 4938 return -EINVAL; 4939 } 4940 4941 dev_priv->rps.max_freq_softlimit = val; 4942 4943 intel_set_rps(dev, val); 4944 4945 mutex_unlock(&dev_priv->rps.hw_lock); 4946 4947 return 0; 4948 } 4949 4950 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 4951 i915_max_freq_get, i915_max_freq_set, 4952 "%llu\n"); 4953 4954 static int 4955 i915_min_freq_get(void *data, u64 *val) 4956 { 4957 struct drm_device *dev = data; 4958 struct drm_i915_private *dev_priv = dev->dev_private; 4959 int ret; 4960 4961 if (INTEL_INFO(dev)->gen < 6) 4962 return -ENODEV; 4963 4964 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4965 4966 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4967 if (ret) 4968 return ret; 4969 4970 *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); 4971 mutex_unlock(&dev_priv->rps.hw_lock); 4972 4973 return 0; 4974 } 4975 4976 static int 4977 i915_min_freq_set(void *data, u64 val) 4978 { 4979 struct drm_device *dev = data; 4980 struct drm_i915_private *dev_priv = dev->dev_private; 4981 u32 hw_max, hw_min; 4982 int ret; 4983 4984 if (INTEL_INFO(dev)->gen < 6) 4985 return -ENODEV; 4986 4987 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4988 4989 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 4990 4991 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4992 if (ret) 4993 return ret; 4994 4995 /* 4996 * Turbo will still be enabled, but won't go below the set value. 4997 */ 4998 val = intel_freq_opcode(dev_priv, val); 4999 5000 hw_max = dev_priv->rps.max_freq; 5001 hw_min = dev_priv->rps.min_freq; 5002 5003 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { 5004 mutex_unlock(&dev_priv->rps.hw_lock); 5005 return -EINVAL; 5006 } 5007 5008 dev_priv->rps.min_freq_softlimit = val; 5009 5010 intel_set_rps(dev, val); 5011 5012 mutex_unlock(&dev_priv->rps.hw_lock); 5013 5014 return 0; 5015 } 5016 5017 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 5018 i915_min_freq_get, i915_min_freq_set, 5019 "%llu\n"); 5020 5021 static int 5022 i915_cache_sharing_get(void *data, u64 *val) 5023 { 5024 struct drm_device *dev = data; 5025 struct drm_i915_private *dev_priv = dev->dev_private; 5026 u32 snpcr; 5027 int ret; 5028 5029 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 5030 return -ENODEV; 5031 5032 ret = mutex_lock_interruptible(&dev->struct_mutex); 5033 if (ret) 5034 return ret; 5035 intel_runtime_pm_get(dev_priv); 5036 5037 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 5038 5039 intel_runtime_pm_put(dev_priv); 5040 mutex_unlock(&dev_priv->dev->struct_mutex); 5041 5042 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 5043 5044 return 0; 5045 } 5046 5047 static int 5048 i915_cache_sharing_set(void *data, u64 val) 5049 { 5050 struct drm_device *dev = data; 5051 struct drm_i915_private *dev_priv = dev->dev_private; 5052 u32 snpcr; 5053 5054 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 5055 return -ENODEV; 5056 5057 if (val > 3) 5058 return -EINVAL; 5059 5060 intel_runtime_pm_get(dev_priv); 5061 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 5062 5063 /* Update the cache sharing policy here as well */ 5064 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 5065 snpcr &= ~GEN6_MBC_SNPCR_MASK; 5066 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 5067 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 5068 5069 intel_runtime_pm_put(dev_priv); 5070 return 0; 5071 } 5072 5073 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 5074 i915_cache_sharing_get, i915_cache_sharing_set, 5075 "%llu\n"); 5076 5077 struct sseu_dev_status { 5078 unsigned int slice_total; 5079 unsigned int subslice_total; 5080 unsigned int subslice_per_slice; 5081 unsigned int eu_total; 5082 unsigned int eu_per_subslice; 5083 }; 5084 5085 static void cherryview_sseu_device_status(struct drm_device *dev, 5086 struct sseu_dev_status *stat) 5087 { 5088 struct drm_i915_private *dev_priv = dev->dev_private; 5089 int ss_max = 2; 5090 int ss; 5091 u32 sig1[ss_max], sig2[ss_max]; 5092 5093 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); 5094 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); 5095 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); 5096 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); 5097 5098 for (ss = 0; ss < ss_max; ss++) { 5099 unsigned int eu_cnt; 5100 5101 if (sig1[ss] & CHV_SS_PG_ENABLE) 5102 /* skip disabled subslice */ 5103 continue; 5104 5105 stat->slice_total = 1; 5106 stat->subslice_per_slice++; 5107 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + 5108 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + 5109 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + 5110 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); 5111 stat->eu_total += eu_cnt; 5112 stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt); 5113 } 5114 stat->subslice_total = stat->subslice_per_slice; 5115 } 5116 5117 static void gen9_sseu_device_status(struct drm_device *dev, 5118 struct sseu_dev_status *stat) 5119 { 5120 struct drm_i915_private *dev_priv = dev->dev_private; 5121 int s_max = 3, ss_max = 4; 5122 int s, ss; 5123 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; 5124 5125 /* BXT has a single slice and at most 3 subslices. */ 5126 if (IS_BROXTON(dev)) { 5127 s_max = 1; 5128 ss_max = 3; 5129 } 5130 5131 for (s = 0; s < s_max; s++) { 5132 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s)); 5133 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s)); 5134 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s)); 5135 } 5136 5137 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | 5138 GEN9_PGCTL_SSA_EU19_ACK | 5139 GEN9_PGCTL_SSA_EU210_ACK | 5140 GEN9_PGCTL_SSA_EU311_ACK; 5141 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | 5142 GEN9_PGCTL_SSB_EU19_ACK | 5143 GEN9_PGCTL_SSB_EU210_ACK | 5144 GEN9_PGCTL_SSB_EU311_ACK; 5145 5146 for (s = 0; s < s_max; s++) { 5147 unsigned int ss_cnt = 0; 5148 5149 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) 5150 /* skip disabled slice */ 5151 continue; 5152 5153 stat->slice_total++; 5154 5155 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 5156 ss_cnt = INTEL_INFO(dev)->subslice_per_slice; 5157 5158 for (ss = 0; ss < ss_max; ss++) { 5159 unsigned int eu_cnt; 5160 5161 if (IS_BROXTON(dev) && 5162 !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) 5163 /* skip disabled subslice */ 5164 continue; 5165 5166 if (IS_BROXTON(dev)) 5167 ss_cnt++; 5168 5169 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & 5170 eu_mask[ss%2]); 5171 stat->eu_total += eu_cnt; 5172 stat->eu_per_subslice = max(stat->eu_per_subslice, 5173 eu_cnt); 5174 } 5175 5176 stat->subslice_total += ss_cnt; 5177 stat->subslice_per_slice = max(stat->subslice_per_slice, 5178 ss_cnt); 5179 } 5180 } 5181 5182 static void broadwell_sseu_device_status(struct drm_device *dev, 5183 struct sseu_dev_status *stat) 5184 { 5185 struct drm_i915_private *dev_priv = dev->dev_private; 5186 int s; 5187 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); 5188 5189 stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK); 5190 5191 if (stat->slice_total) { 5192 stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice; 5193 stat->subslice_total = stat->slice_total * 5194 stat->subslice_per_slice; 5195 stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice; 5196 stat->eu_total = stat->eu_per_subslice * stat->subslice_total; 5197 5198 /* subtract fused off EU(s) from enabled slice(s) */ 5199 for (s = 0; s < stat->slice_total; s++) { 5200 u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s]; 5201 5202 stat->eu_total -= hweight8(subslice_7eu); 5203 } 5204 } 5205 } 5206 5207 static int i915_sseu_status(struct seq_file *m, void *unused) 5208 { 5209 struct drm_info_node *node = (struct drm_info_node *) m->private; 5210 struct drm_device *dev = node->minor->dev; 5211 struct sseu_dev_status stat; 5212 5213 if (INTEL_INFO(dev)->gen < 8) 5214 return -ENODEV; 5215 5216 seq_puts(m, "SSEU Device Info\n"); 5217 seq_printf(m, " Available Slice Total: %u\n", 5218 INTEL_INFO(dev)->slice_total); 5219 seq_printf(m, " Available Subslice Total: %u\n", 5220 INTEL_INFO(dev)->subslice_total); 5221 seq_printf(m, " Available Subslice Per Slice: %u\n", 5222 INTEL_INFO(dev)->subslice_per_slice); 5223 seq_printf(m, " Available EU Total: %u\n", 5224 INTEL_INFO(dev)->eu_total); 5225 seq_printf(m, " Available EU Per Subslice: %u\n", 5226 INTEL_INFO(dev)->eu_per_subslice); 5227 seq_printf(m, " Has Slice Power Gating: %s\n", 5228 yesno(INTEL_INFO(dev)->has_slice_pg)); 5229 seq_printf(m, " Has Subslice Power Gating: %s\n", 5230 yesno(INTEL_INFO(dev)->has_subslice_pg)); 5231 seq_printf(m, " Has EU Power Gating: %s\n", 5232 yesno(INTEL_INFO(dev)->has_eu_pg)); 5233 5234 seq_puts(m, "SSEU Device Status\n"); 5235 memset(&stat, 0, sizeof(stat)); 5236 if (IS_CHERRYVIEW(dev)) { 5237 cherryview_sseu_device_status(dev, &stat); 5238 } else if (IS_BROADWELL(dev)) { 5239 broadwell_sseu_device_status(dev, &stat); 5240 } else if (INTEL_INFO(dev)->gen >= 9) { 5241 gen9_sseu_device_status(dev, &stat); 5242 } 5243 seq_printf(m, " Enabled Slice Total: %u\n", 5244 stat.slice_total); 5245 seq_printf(m, " Enabled Subslice Total: %u\n", 5246 stat.subslice_total); 5247 seq_printf(m, " Enabled Subslice Per Slice: %u\n", 5248 stat.subslice_per_slice); 5249 seq_printf(m, " Enabled EU Total: %u\n", 5250 stat.eu_total); 5251 seq_printf(m, " Enabled EU Per Subslice: %u\n", 5252 stat.eu_per_subslice); 5253 5254 return 0; 5255 } 5256 5257 static int i915_forcewake_open(struct inode *inode, struct file *file) 5258 { 5259 struct drm_device *dev = inode->i_private; 5260 struct drm_i915_private *dev_priv = dev->dev_private; 5261 5262 if (INTEL_INFO(dev)->gen < 6) 5263 return 0; 5264 5265 intel_runtime_pm_get(dev_priv); 5266 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5267 5268 return 0; 5269 } 5270 5271 static int i915_forcewake_release(struct inode *inode, struct file *file) 5272 { 5273 struct drm_device *dev = inode->i_private; 5274 struct drm_i915_private *dev_priv = dev->dev_private; 5275 5276 if (INTEL_INFO(dev)->gen < 6) 5277 return 0; 5278 5279 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5280 intel_runtime_pm_put(dev_priv); 5281 5282 return 0; 5283 } 5284 5285 static const struct file_operations i915_forcewake_fops = { 5286 .owner = THIS_MODULE, 5287 .open = i915_forcewake_open, 5288 .release = i915_forcewake_release, 5289 }; 5290 5291 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 5292 { 5293 struct drm_device *dev = minor->dev; 5294 struct dentry *ent; 5295 5296 ent = debugfs_create_file("i915_forcewake_user", 5297 S_IRUSR, 5298 root, dev, 5299 &i915_forcewake_fops); 5300 if (!ent) 5301 return -ENOMEM; 5302 5303 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 5304 } 5305 5306 static int i915_debugfs_create(struct dentry *root, 5307 struct drm_minor *minor, 5308 const char *name, 5309 const struct file_operations *fops) 5310 { 5311 struct drm_device *dev = minor->dev; 5312 struct dentry *ent; 5313 5314 ent = debugfs_create_file(name, 5315 S_IRUGO | S_IWUSR, 5316 root, dev, 5317 fops); 5318 if (!ent) 5319 return -ENOMEM; 5320 5321 return drm_add_fake_info_node(minor, ent, fops); 5322 } 5323 5324 static const struct drm_info_list i915_debugfs_list[] = { 5325 {"i915_capabilities", i915_capabilities, 0}, 5326 {"i915_gem_objects", i915_gem_object_info, 0}, 5327 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 5328 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 5329 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 5330 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 5331 {"i915_gem_stolen", i915_gem_stolen_list_info }, 5332 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 5333 {"i915_gem_request", i915_gem_request_info, 0}, 5334 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 5335 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 5336 {"i915_gem_interrupt", i915_interrupt_info, 0}, 5337 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 5338 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 5339 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 5340 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 5341 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, 5342 {"i915_guc_info", i915_guc_info, 0}, 5343 {"i915_guc_load_status", i915_guc_load_status_info, 0}, 5344 {"i915_guc_log_dump", i915_guc_log_dump, 0}, 5345 {"i915_frequency_info", i915_frequency_info, 0}, 5346 {"i915_hangcheck_info", i915_hangcheck_info, 0}, 5347 {"i915_drpc_info", i915_drpc_info, 0}, 5348 {"i915_emon_status", i915_emon_status, 0}, 5349 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 5350 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 5351 {"i915_fbc_status", i915_fbc_status, 0}, 5352 {"i915_ips_status", i915_ips_status, 0}, 5353 {"i915_sr_status", i915_sr_status, 0}, 5354 {"i915_opregion", i915_opregion, 0}, 5355 {"i915_vbt", i915_vbt, 0}, 5356 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 5357 {"i915_context_status", i915_context_status, 0}, 5358 {"i915_dump_lrc", i915_dump_lrc, 0}, 5359 {"i915_execlists", i915_execlists, 0}, 5360 {"i915_forcewake_domains", i915_forcewake_domains, 0}, 5361 {"i915_swizzle_info", i915_swizzle_info, 0}, 5362 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 5363 {"i915_llc", i915_llc, 0}, 5364 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 5365 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 5366 {"i915_energy_uJ", i915_energy_uJ, 0}, 5367 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 5368 {"i915_power_domain_info", i915_power_domain_info, 0}, 5369 {"i915_dmc_info", i915_dmc_info, 0}, 5370 {"i915_display_info", i915_display_info, 0}, 5371 {"i915_semaphore_status", i915_semaphore_status, 0}, 5372 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 5373 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 5374 {"i915_wa_registers", i915_wa_registers, 0}, 5375 {"i915_ddb_info", i915_ddb_info, 0}, 5376 {"i915_sseu_status", i915_sseu_status, 0}, 5377 {"i915_drrs_status", i915_drrs_status, 0}, 5378 {"i915_rps_boost_info", i915_rps_boost_info, 0}, 5379 }; 5380 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 5381 5382 static const struct i915_debugfs_files { 5383 const char *name; 5384 const struct file_operations *fops; 5385 } i915_debugfs_files[] = { 5386 {"i915_wedged", &i915_wedged_fops}, 5387 {"i915_max_freq", &i915_max_freq_fops}, 5388 {"i915_min_freq", &i915_min_freq_fops}, 5389 {"i915_cache_sharing", &i915_cache_sharing_fops}, 5390 {"i915_ring_stop", &i915_ring_stop_fops}, 5391 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 5392 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 5393 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 5394 {"i915_error_state", &i915_error_state_fops}, 5395 {"i915_next_seqno", &i915_next_seqno_fops}, 5396 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 5397 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 5398 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 5399 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 5400 {"i915_fbc_false_color", &i915_fbc_fc_fops}, 5401 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 5402 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 5403 {"i915_dp_test_active", &i915_displayport_test_active_fops} 5404 }; 5405 5406 void intel_display_crc_init(struct drm_device *dev) 5407 { 5408 struct drm_i915_private *dev_priv = dev->dev_private; 5409 enum pipe pipe; 5410 5411 for_each_pipe(dev_priv, pipe) { 5412 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 5413 5414 pipe_crc->opened = false; 5415 spin_lock_init(&pipe_crc->lock); 5416 init_waitqueue_head(&pipe_crc->wq); 5417 } 5418 } 5419 5420 int i915_debugfs_init(struct drm_minor *minor) 5421 { 5422 int ret, i; 5423 5424 ret = i915_forcewake_create(minor->debugfs_root, minor); 5425 if (ret) 5426 return ret; 5427 5428 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 5429 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); 5430 if (ret) 5431 return ret; 5432 } 5433 5434 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 5435 ret = i915_debugfs_create(minor->debugfs_root, minor, 5436 i915_debugfs_files[i].name, 5437 i915_debugfs_files[i].fops); 5438 if (ret) 5439 return ret; 5440 } 5441 5442 return drm_debugfs_create_files(i915_debugfs_list, 5443 I915_DEBUGFS_ENTRIES, 5444 minor->debugfs_root, minor); 5445 } 5446 5447 void i915_debugfs_cleanup(struct drm_minor *minor) 5448 { 5449 int i; 5450 5451 drm_debugfs_remove_files(i915_debugfs_list, 5452 I915_DEBUGFS_ENTRIES, minor); 5453 5454 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 5455 1, minor); 5456 5457 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 5458 struct drm_info_list *info_list = 5459 (struct drm_info_list *)&i915_pipe_crc_data[i]; 5460 5461 drm_debugfs_remove_files(info_list, 1, minor); 5462 } 5463 5464 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 5465 struct drm_info_list *info_list = 5466 (struct drm_info_list *) i915_debugfs_files[i].fops; 5467 5468 drm_debugfs_remove_files(info_list, 1, minor); 5469 } 5470 } 5471 5472 struct dpcd_block { 5473 /* DPCD dump start address. */ 5474 unsigned int offset; 5475 /* DPCD dump end address, inclusive. If unset, .size will be used. */ 5476 unsigned int end; 5477 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ 5478 size_t size; 5479 /* Only valid for eDP. */ 5480 bool edp; 5481 }; 5482 5483 static const struct dpcd_block i915_dpcd_debug[] = { 5484 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, 5485 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, 5486 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, 5487 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, 5488 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, 5489 { .offset = DP_SET_POWER }, 5490 { .offset = DP_EDP_DPCD_REV }, 5491 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, 5492 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, 5493 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, 5494 }; 5495 5496 static int i915_dpcd_show(struct seq_file *m, void *data) 5497 { 5498 struct drm_connector *connector = m->private; 5499 struct intel_dp *intel_dp = 5500 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 5501 uint8_t buf[16]; 5502 ssize_t err; 5503 int i; 5504 5505 if (connector->status != connector_status_connected) 5506 return -ENODEV; 5507 5508 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) { 5509 const struct dpcd_block *b = &i915_dpcd_debug[i]; 5510 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); 5511 5512 if (b->edp && 5513 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 5514 continue; 5515 5516 /* low tech for now */ 5517 if (WARN_ON(size > sizeof(buf))) 5518 continue; 5519 5520 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); 5521 if (err <= 0) { 5522 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", 5523 size, b->offset, err); 5524 continue; 5525 } 5526 5527 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); 5528 } 5529 5530 return 0; 5531 } 5532 5533 static int i915_dpcd_open(struct inode *inode, struct file *file) 5534 { 5535 return single_open(file, i915_dpcd_show, inode->i_private); 5536 } 5537 5538 static const struct file_operations i915_dpcd_fops = { 5539 .owner = THIS_MODULE, 5540 .open = i915_dpcd_open, 5541 .read = seq_read, 5542 .llseek = seq_lseek, 5543 .release = single_release, 5544 }; 5545 5546 /** 5547 * i915_debugfs_connector_add - add i915 specific connector debugfs files 5548 * @connector: pointer to a registered drm_connector 5549 * 5550 * Cleanup will be done by drm_connector_unregister() through a call to 5551 * drm_debugfs_connector_remove(). 5552 * 5553 * Returns 0 on success, negative error codes on error. 5554 */ 5555 int i915_debugfs_connector_add(struct drm_connector *connector) 5556 { 5557 struct dentry *root = connector->debugfs_entry; 5558 5559 /* The connector must have been registered beforehands. */ 5560 if (!root) 5561 return -ENODEV; 5562 5563 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 5564 connector->connector_type == DRM_MODE_CONNECTOR_eDP) 5565 debugfs_create_file("i915_dpcd", S_IRUGO, root, connector, 5566 &i915_dpcd_fops); 5567 5568 return 0; 5569 } 5570