1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/circ_buf.h> 31 #include <linux/ctype.h> 32 #include <linux/debugfs.h> 33 #include <linux/slab.h> 34 #include <linux/export.h> 35 #include <linux/list_sort.h> 36 #include <asm/msr-index.h> 37 #include <drm/drmP.h> 38 #include "intel_drv.h" 39 #include "intel_ringbuffer.h" 40 #include <drm/i915_drm.h> 41 #include "i915_drv.h" 42 43 enum { 44 ACTIVE_LIST, 45 INACTIVE_LIST, 46 PINNED_LIST, 47 }; 48 49 static const char *yesno(int v) 50 { 51 return v ? "yes" : "no"; 52 } 53 54 /* As the drm_debugfs_init() routines are called before dev->dev_private is 55 * allocated we need to hook into the minor for release. */ 56 static int 57 drm_add_fake_info_node(struct drm_minor *minor, 58 struct dentry *ent, 59 const void *key) 60 { 61 struct drm_info_node *node; 62 63 node = kmalloc(sizeof(*node), GFP_KERNEL); 64 if (node == NULL) { 65 debugfs_remove(ent); 66 return -ENOMEM; 67 } 68 69 node->minor = minor; 70 node->dent = ent; 71 node->info_ent = (void *) key; 72 73 mutex_lock(&minor->debugfs_lock); 74 list_add(&node->list, &minor->debugfs_list); 75 mutex_unlock(&minor->debugfs_lock); 76 77 return 0; 78 } 79 80 static int i915_capabilities(struct seq_file *m, void *data) 81 { 82 struct drm_info_node *node = m->private; 83 struct drm_device *dev = node->minor->dev; 84 const struct intel_device_info *info = INTEL_INFO(dev); 85 86 seq_printf(m, "gen: %d\n", info->gen); 87 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 88 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 89 #define SEP_SEMICOLON ; 90 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON); 91 #undef PRINT_FLAG 92 #undef SEP_SEMICOLON 93 94 return 0; 95 } 96 97 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 98 { 99 if (obj->user_pin_count > 0) 100 return "P"; 101 else if (i915_gem_obj_is_pinned(obj)) 102 return "p"; 103 else 104 return " "; 105 } 106 107 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 108 { 109 switch (obj->tiling_mode) { 110 default: 111 case I915_TILING_NONE: return " "; 112 case I915_TILING_X: return "X"; 113 case I915_TILING_Y: return "Y"; 114 } 115 } 116 117 static inline const char *get_global_flag(struct drm_i915_gem_object *obj) 118 { 119 return i915_gem_obj_to_ggtt(obj) ? "g" : " "; 120 } 121 122 static void 123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 124 { 125 struct i915_vma *vma; 126 int pin_count = 0; 127 128 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s", 129 &obj->base, 130 get_pin_flag(obj), 131 get_tiling_flag(obj), 132 get_global_flag(obj), 133 obj->base.size / 1024, 134 obj->base.read_domains, 135 obj->base.write_domain, 136 obj->last_read_seqno, 137 obj->last_write_seqno, 138 obj->last_fenced_seqno, 139 i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level), 140 obj->dirty ? " dirty" : "", 141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 142 if (obj->base.name) 143 seq_printf(m, " (name: %d)", obj->base.name); 144 list_for_each_entry(vma, &obj->vma_list, vma_link) 145 if (vma->pin_count > 0) 146 pin_count++; 147 seq_printf(m, " (pinned x %d)", pin_count); 148 if (obj->pin_display) 149 seq_printf(m, " (display)"); 150 if (obj->fence_reg != I915_FENCE_REG_NONE) 151 seq_printf(m, " (fence: %d)", obj->fence_reg); 152 list_for_each_entry(vma, &obj->vma_list, vma_link) { 153 if (!i915_is_ggtt(vma->vm)) 154 seq_puts(m, " (pp"); 155 else 156 seq_puts(m, " (g"); 157 seq_printf(m, "gtt offset: %08lx, size: %08lx)", 158 vma->node.start, vma->node.size); 159 } 160 if (obj->stolen) 161 seq_printf(m, " (stolen: %08lx)", obj->stolen->start); 162 if (obj->pin_mappable || obj->fault_mappable) { 163 char s[3], *t = s; 164 if (obj->pin_mappable) 165 *t++ = 'p'; 166 if (obj->fault_mappable) 167 *t++ = 'f'; 168 *t = '\0'; 169 seq_printf(m, " (%s mappable)", s); 170 } 171 if (obj->ring != NULL) 172 seq_printf(m, " (%s)", obj->ring->name); 173 if (obj->frontbuffer_bits) 174 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); 175 } 176 177 static void describe_ctx(struct seq_file *m, struct intel_context *ctx) 178 { 179 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i'); 180 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 181 seq_putc(m, ' '); 182 } 183 184 static int i915_gem_object_list_info(struct seq_file *m, void *data) 185 { 186 struct drm_info_node *node = m->private; 187 uintptr_t list = (uintptr_t) node->info_ent->data; 188 struct list_head *head; 189 struct drm_device *dev = node->minor->dev; 190 struct drm_i915_private *dev_priv = dev->dev_private; 191 struct i915_address_space *vm = &dev_priv->gtt.base; 192 struct i915_vma *vma; 193 size_t total_obj_size, total_gtt_size; 194 int count, ret; 195 196 ret = mutex_lock_interruptible(&dev->struct_mutex); 197 if (ret) 198 return ret; 199 200 /* FIXME: the user of this interface might want more than just GGTT */ 201 switch (list) { 202 case ACTIVE_LIST: 203 seq_puts(m, "Active:\n"); 204 head = &vm->active_list; 205 break; 206 case INACTIVE_LIST: 207 seq_puts(m, "Inactive:\n"); 208 head = &vm->inactive_list; 209 break; 210 default: 211 mutex_unlock(&dev->struct_mutex); 212 return -EINVAL; 213 } 214 215 total_obj_size = total_gtt_size = count = 0; 216 list_for_each_entry(vma, head, mm_list) { 217 seq_printf(m, " "); 218 describe_obj(m, vma->obj); 219 seq_printf(m, "\n"); 220 total_obj_size += vma->obj->base.size; 221 total_gtt_size += vma->node.size; 222 count++; 223 } 224 mutex_unlock(&dev->struct_mutex); 225 226 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 227 count, total_obj_size, total_gtt_size); 228 return 0; 229 } 230 231 static int obj_rank_by_stolen(void *priv, 232 struct list_head *A, struct list_head *B) 233 { 234 struct drm_i915_gem_object *a = 235 container_of(A, struct drm_i915_gem_object, obj_exec_link); 236 struct drm_i915_gem_object *b = 237 container_of(B, struct drm_i915_gem_object, obj_exec_link); 238 239 return a->stolen->start - b->stolen->start; 240 } 241 242 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 243 { 244 struct drm_info_node *node = m->private; 245 struct drm_device *dev = node->minor->dev; 246 struct drm_i915_private *dev_priv = dev->dev_private; 247 struct drm_i915_gem_object *obj; 248 size_t total_obj_size, total_gtt_size; 249 LIST_HEAD(stolen); 250 int count, ret; 251 252 ret = mutex_lock_interruptible(&dev->struct_mutex); 253 if (ret) 254 return ret; 255 256 total_obj_size = total_gtt_size = count = 0; 257 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 258 if (obj->stolen == NULL) 259 continue; 260 261 list_add(&obj->obj_exec_link, &stolen); 262 263 total_obj_size += obj->base.size; 264 total_gtt_size += i915_gem_obj_ggtt_size(obj); 265 count++; 266 } 267 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 268 if (obj->stolen == NULL) 269 continue; 270 271 list_add(&obj->obj_exec_link, &stolen); 272 273 total_obj_size += obj->base.size; 274 count++; 275 } 276 list_sort(NULL, &stolen, obj_rank_by_stolen); 277 seq_puts(m, "Stolen:\n"); 278 while (!list_empty(&stolen)) { 279 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link); 280 seq_puts(m, " "); 281 describe_obj(m, obj); 282 seq_putc(m, '\n'); 283 list_del_init(&obj->obj_exec_link); 284 } 285 mutex_unlock(&dev->struct_mutex); 286 287 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 288 count, total_obj_size, total_gtt_size); 289 return 0; 290 } 291 292 #define count_objects(list, member) do { \ 293 list_for_each_entry(obj, list, member) { \ 294 size += i915_gem_obj_ggtt_size(obj); \ 295 ++count; \ 296 if (obj->map_and_fenceable) { \ 297 mappable_size += i915_gem_obj_ggtt_size(obj); \ 298 ++mappable_count; \ 299 } \ 300 } \ 301 } while (0) 302 303 struct file_stats { 304 struct drm_i915_file_private *file_priv; 305 int count; 306 size_t total, unbound; 307 size_t global, shared; 308 size_t active, inactive; 309 }; 310 311 static int per_file_stats(int id, void *ptr, void *data) 312 { 313 struct drm_i915_gem_object *obj = ptr; 314 struct file_stats *stats = data; 315 struct i915_vma *vma; 316 317 stats->count++; 318 stats->total += obj->base.size; 319 320 if (obj->base.name || obj->base.dma_buf) 321 stats->shared += obj->base.size; 322 323 if (USES_FULL_PPGTT(obj->base.dev)) { 324 list_for_each_entry(vma, &obj->vma_list, vma_link) { 325 struct i915_hw_ppgtt *ppgtt; 326 327 if (!drm_mm_node_allocated(&vma->node)) 328 continue; 329 330 if (i915_is_ggtt(vma->vm)) { 331 stats->global += obj->base.size; 332 continue; 333 } 334 335 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); 336 if (ppgtt->file_priv != stats->file_priv) 337 continue; 338 339 if (obj->ring) /* XXX per-vma statistic */ 340 stats->active += obj->base.size; 341 else 342 stats->inactive += obj->base.size; 343 344 return 0; 345 } 346 } else { 347 if (i915_gem_obj_ggtt_bound(obj)) { 348 stats->global += obj->base.size; 349 if (obj->ring) 350 stats->active += obj->base.size; 351 else 352 stats->inactive += obj->base.size; 353 return 0; 354 } 355 } 356 357 if (!list_empty(&obj->global_list)) 358 stats->unbound += obj->base.size; 359 360 return 0; 361 } 362 363 #define count_vmas(list, member) do { \ 364 list_for_each_entry(vma, list, member) { \ 365 size += i915_gem_obj_ggtt_size(vma->obj); \ 366 ++count; \ 367 if (vma->obj->map_and_fenceable) { \ 368 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ 369 ++mappable_count; \ 370 } \ 371 } \ 372 } while (0) 373 374 static int i915_gem_object_info(struct seq_file *m, void* data) 375 { 376 struct drm_info_node *node = m->private; 377 struct drm_device *dev = node->minor->dev; 378 struct drm_i915_private *dev_priv = dev->dev_private; 379 u32 count, mappable_count, purgeable_count; 380 size_t size, mappable_size, purgeable_size; 381 struct drm_i915_gem_object *obj; 382 struct i915_address_space *vm = &dev_priv->gtt.base; 383 struct drm_file *file; 384 struct i915_vma *vma; 385 int ret; 386 387 ret = mutex_lock_interruptible(&dev->struct_mutex); 388 if (ret) 389 return ret; 390 391 seq_printf(m, "%u objects, %zu bytes\n", 392 dev_priv->mm.object_count, 393 dev_priv->mm.object_memory); 394 395 size = count = mappable_size = mappable_count = 0; 396 count_objects(&dev_priv->mm.bound_list, global_list); 397 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 398 count, mappable_count, size, mappable_size); 399 400 size = count = mappable_size = mappable_count = 0; 401 count_vmas(&vm->active_list, mm_list); 402 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 403 count, mappable_count, size, mappable_size); 404 405 size = count = mappable_size = mappable_count = 0; 406 count_vmas(&vm->inactive_list, mm_list); 407 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 408 count, mappable_count, size, mappable_size); 409 410 size = count = purgeable_size = purgeable_count = 0; 411 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 412 size += obj->base.size, ++count; 413 if (obj->madv == I915_MADV_DONTNEED) 414 purgeable_size += obj->base.size, ++purgeable_count; 415 } 416 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); 417 418 size = count = mappable_size = mappable_count = 0; 419 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 420 if (obj->fault_mappable) { 421 size += i915_gem_obj_ggtt_size(obj); 422 ++count; 423 } 424 if (obj->pin_mappable) { 425 mappable_size += i915_gem_obj_ggtt_size(obj); 426 ++mappable_count; 427 } 428 if (obj->madv == I915_MADV_DONTNEED) { 429 purgeable_size += obj->base.size; 430 ++purgeable_count; 431 } 432 } 433 seq_printf(m, "%u purgeable objects, %zu bytes\n", 434 purgeable_count, purgeable_size); 435 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 436 mappable_count, mappable_size); 437 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 438 count, size); 439 440 seq_printf(m, "%zu [%lu] gtt total\n", 441 dev_priv->gtt.base.total, 442 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); 443 444 seq_putc(m, '\n'); 445 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 446 struct file_stats stats; 447 struct task_struct *task; 448 449 memset(&stats, 0, sizeof(stats)); 450 stats.file_priv = file->driver_priv; 451 spin_lock(&file->table_lock); 452 idr_for_each(&file->object_idr, per_file_stats, &stats); 453 spin_unlock(&file->table_lock); 454 /* 455 * Although we have a valid reference on file->pid, that does 456 * not guarantee that the task_struct who called get_pid() is 457 * still alive (e.g. get_pid(current) => fork() => exit()). 458 * Therefore, we need to protect this ->comm access using RCU. 459 */ 460 rcu_read_lock(); 461 task = pid_task(file->pid, PIDTYPE_PID); 462 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", 463 task ? task->comm : "<unknown>", 464 stats.count, 465 stats.total, 466 stats.active, 467 stats.inactive, 468 stats.global, 469 stats.shared, 470 stats.unbound); 471 rcu_read_unlock(); 472 } 473 474 mutex_unlock(&dev->struct_mutex); 475 476 return 0; 477 } 478 479 static int i915_gem_gtt_info(struct seq_file *m, void *data) 480 { 481 struct drm_info_node *node = m->private; 482 struct drm_device *dev = node->minor->dev; 483 uintptr_t list = (uintptr_t) node->info_ent->data; 484 struct drm_i915_private *dev_priv = dev->dev_private; 485 struct drm_i915_gem_object *obj; 486 size_t total_obj_size, total_gtt_size; 487 int count, ret; 488 489 ret = mutex_lock_interruptible(&dev->struct_mutex); 490 if (ret) 491 return ret; 492 493 total_obj_size = total_gtt_size = count = 0; 494 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 495 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj)) 496 continue; 497 498 seq_puts(m, " "); 499 describe_obj(m, obj); 500 seq_putc(m, '\n'); 501 total_obj_size += obj->base.size; 502 total_gtt_size += i915_gem_obj_ggtt_size(obj); 503 count++; 504 } 505 506 mutex_unlock(&dev->struct_mutex); 507 508 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 509 count, total_obj_size, total_gtt_size); 510 511 return 0; 512 } 513 514 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 515 { 516 struct drm_info_node *node = m->private; 517 struct drm_device *dev = node->minor->dev; 518 struct drm_i915_private *dev_priv = dev->dev_private; 519 struct intel_crtc *crtc; 520 int ret; 521 522 ret = mutex_lock_interruptible(&dev->struct_mutex); 523 if (ret) 524 return ret; 525 526 for_each_intel_crtc(dev, crtc) { 527 const char pipe = pipe_name(crtc->pipe); 528 const char plane = plane_name(crtc->plane); 529 struct intel_unpin_work *work; 530 531 spin_lock_irq(&dev->event_lock); 532 work = crtc->unpin_work; 533 if (work == NULL) { 534 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 535 pipe, plane); 536 } else { 537 u32 addr; 538 539 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 540 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 541 pipe, plane); 542 } else { 543 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 544 pipe, plane); 545 } 546 if (work->flip_queued_ring) { 547 seq_printf(m, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n", 548 work->flip_queued_ring->name, 549 work->flip_queued_seqno, 550 dev_priv->next_seqno, 551 work->flip_queued_ring->get_seqno(work->flip_queued_ring, true), 552 i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true), 553 work->flip_queued_seqno)); 554 } else 555 seq_printf(m, "Flip not associated with any ring\n"); 556 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", 557 work->flip_queued_vblank, 558 work->flip_ready_vblank, 559 drm_vblank_count(dev, crtc->pipe)); 560 if (work->enable_stall_check) 561 seq_puts(m, "Stall check enabled, "); 562 else 563 seq_puts(m, "Stall check waiting for page flip ioctl, "); 564 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 565 566 if (INTEL_INFO(dev)->gen >= 4) 567 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane))); 568 else 569 addr = I915_READ(DSPADDR(crtc->plane)); 570 seq_printf(m, "Current scanout address 0x%08x\n", addr); 571 572 if (work->pending_flip_obj) { 573 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset); 574 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset); 575 } 576 } 577 spin_unlock_irq(&dev->event_lock); 578 } 579 580 mutex_unlock(&dev->struct_mutex); 581 582 return 0; 583 } 584 585 static int i915_gem_request_info(struct seq_file *m, void *data) 586 { 587 struct drm_info_node *node = m->private; 588 struct drm_device *dev = node->minor->dev; 589 struct drm_i915_private *dev_priv = dev->dev_private; 590 struct intel_engine_cs *ring; 591 struct drm_i915_gem_request *gem_request; 592 int ret, count, i; 593 594 ret = mutex_lock_interruptible(&dev->struct_mutex); 595 if (ret) 596 return ret; 597 598 count = 0; 599 for_each_ring(ring, dev_priv, i) { 600 if (list_empty(&ring->request_list)) 601 continue; 602 603 seq_printf(m, "%s requests:\n", ring->name); 604 list_for_each_entry(gem_request, 605 &ring->request_list, 606 list) { 607 seq_printf(m, " %d @ %d\n", 608 gem_request->seqno, 609 (int) (jiffies - gem_request->emitted_jiffies)); 610 } 611 count++; 612 } 613 mutex_unlock(&dev->struct_mutex); 614 615 if (count == 0) 616 seq_puts(m, "No requests\n"); 617 618 return 0; 619 } 620 621 static void i915_ring_seqno_info(struct seq_file *m, 622 struct intel_engine_cs *ring) 623 { 624 if (ring->get_seqno) { 625 seq_printf(m, "Current sequence (%s): %u\n", 626 ring->name, ring->get_seqno(ring, false)); 627 } 628 } 629 630 static int i915_gem_seqno_info(struct seq_file *m, void *data) 631 { 632 struct drm_info_node *node = m->private; 633 struct drm_device *dev = node->minor->dev; 634 struct drm_i915_private *dev_priv = dev->dev_private; 635 struct intel_engine_cs *ring; 636 int ret, i; 637 638 ret = mutex_lock_interruptible(&dev->struct_mutex); 639 if (ret) 640 return ret; 641 intel_runtime_pm_get(dev_priv); 642 643 for_each_ring(ring, dev_priv, i) 644 i915_ring_seqno_info(m, ring); 645 646 intel_runtime_pm_put(dev_priv); 647 mutex_unlock(&dev->struct_mutex); 648 649 return 0; 650 } 651 652 653 static int i915_interrupt_info(struct seq_file *m, void *data) 654 { 655 struct drm_info_node *node = m->private; 656 struct drm_device *dev = node->minor->dev; 657 struct drm_i915_private *dev_priv = dev->dev_private; 658 struct intel_engine_cs *ring; 659 int ret, i, pipe; 660 661 ret = mutex_lock_interruptible(&dev->struct_mutex); 662 if (ret) 663 return ret; 664 intel_runtime_pm_get(dev_priv); 665 666 if (IS_CHERRYVIEW(dev)) { 667 seq_printf(m, "Master Interrupt Control:\t%08x\n", 668 I915_READ(GEN8_MASTER_IRQ)); 669 670 seq_printf(m, "Display IER:\t%08x\n", 671 I915_READ(VLV_IER)); 672 seq_printf(m, "Display IIR:\t%08x\n", 673 I915_READ(VLV_IIR)); 674 seq_printf(m, "Display IIR_RW:\t%08x\n", 675 I915_READ(VLV_IIR_RW)); 676 seq_printf(m, "Display IMR:\t%08x\n", 677 I915_READ(VLV_IMR)); 678 for_each_pipe(dev_priv, pipe) 679 seq_printf(m, "Pipe %c stat:\t%08x\n", 680 pipe_name(pipe), 681 I915_READ(PIPESTAT(pipe))); 682 683 seq_printf(m, "Port hotplug:\t%08x\n", 684 I915_READ(PORT_HOTPLUG_EN)); 685 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 686 I915_READ(VLV_DPFLIPSTAT)); 687 seq_printf(m, "DPINVGTT:\t%08x\n", 688 I915_READ(DPINVGTT)); 689 690 for (i = 0; i < 4; i++) { 691 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 692 i, I915_READ(GEN8_GT_IMR(i))); 693 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 694 i, I915_READ(GEN8_GT_IIR(i))); 695 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 696 i, I915_READ(GEN8_GT_IER(i))); 697 } 698 699 seq_printf(m, "PCU interrupt mask:\t%08x\n", 700 I915_READ(GEN8_PCU_IMR)); 701 seq_printf(m, "PCU interrupt identity:\t%08x\n", 702 I915_READ(GEN8_PCU_IIR)); 703 seq_printf(m, "PCU interrupt enable:\t%08x\n", 704 I915_READ(GEN8_PCU_IER)); 705 } else if (INTEL_INFO(dev)->gen >= 8) { 706 seq_printf(m, "Master Interrupt Control:\t%08x\n", 707 I915_READ(GEN8_MASTER_IRQ)); 708 709 for (i = 0; i < 4; i++) { 710 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 711 i, I915_READ(GEN8_GT_IMR(i))); 712 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 713 i, I915_READ(GEN8_GT_IIR(i))); 714 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 715 i, I915_READ(GEN8_GT_IER(i))); 716 } 717 718 for_each_pipe(dev_priv, pipe) { 719 if (!intel_display_power_is_enabled(dev_priv, 720 POWER_DOMAIN_PIPE(pipe))) { 721 seq_printf(m, "Pipe %c power disabled\n", 722 pipe_name(pipe)); 723 continue; 724 } 725 seq_printf(m, "Pipe %c IMR:\t%08x\n", 726 pipe_name(pipe), 727 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 728 seq_printf(m, "Pipe %c IIR:\t%08x\n", 729 pipe_name(pipe), 730 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 731 seq_printf(m, "Pipe %c IER:\t%08x\n", 732 pipe_name(pipe), 733 I915_READ(GEN8_DE_PIPE_IER(pipe))); 734 } 735 736 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 737 I915_READ(GEN8_DE_PORT_IMR)); 738 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 739 I915_READ(GEN8_DE_PORT_IIR)); 740 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 741 I915_READ(GEN8_DE_PORT_IER)); 742 743 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 744 I915_READ(GEN8_DE_MISC_IMR)); 745 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 746 I915_READ(GEN8_DE_MISC_IIR)); 747 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 748 I915_READ(GEN8_DE_MISC_IER)); 749 750 seq_printf(m, "PCU interrupt mask:\t%08x\n", 751 I915_READ(GEN8_PCU_IMR)); 752 seq_printf(m, "PCU interrupt identity:\t%08x\n", 753 I915_READ(GEN8_PCU_IIR)); 754 seq_printf(m, "PCU interrupt enable:\t%08x\n", 755 I915_READ(GEN8_PCU_IER)); 756 } else if (IS_VALLEYVIEW(dev)) { 757 seq_printf(m, "Display IER:\t%08x\n", 758 I915_READ(VLV_IER)); 759 seq_printf(m, "Display IIR:\t%08x\n", 760 I915_READ(VLV_IIR)); 761 seq_printf(m, "Display IIR_RW:\t%08x\n", 762 I915_READ(VLV_IIR_RW)); 763 seq_printf(m, "Display IMR:\t%08x\n", 764 I915_READ(VLV_IMR)); 765 for_each_pipe(dev_priv, pipe) 766 seq_printf(m, "Pipe %c stat:\t%08x\n", 767 pipe_name(pipe), 768 I915_READ(PIPESTAT(pipe))); 769 770 seq_printf(m, "Master IER:\t%08x\n", 771 I915_READ(VLV_MASTER_IER)); 772 773 seq_printf(m, "Render IER:\t%08x\n", 774 I915_READ(GTIER)); 775 seq_printf(m, "Render IIR:\t%08x\n", 776 I915_READ(GTIIR)); 777 seq_printf(m, "Render IMR:\t%08x\n", 778 I915_READ(GTIMR)); 779 780 seq_printf(m, "PM IER:\t\t%08x\n", 781 I915_READ(GEN6_PMIER)); 782 seq_printf(m, "PM IIR:\t\t%08x\n", 783 I915_READ(GEN6_PMIIR)); 784 seq_printf(m, "PM IMR:\t\t%08x\n", 785 I915_READ(GEN6_PMIMR)); 786 787 seq_printf(m, "Port hotplug:\t%08x\n", 788 I915_READ(PORT_HOTPLUG_EN)); 789 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 790 I915_READ(VLV_DPFLIPSTAT)); 791 seq_printf(m, "DPINVGTT:\t%08x\n", 792 I915_READ(DPINVGTT)); 793 794 } else if (!HAS_PCH_SPLIT(dev)) { 795 seq_printf(m, "Interrupt enable: %08x\n", 796 I915_READ(IER)); 797 seq_printf(m, "Interrupt identity: %08x\n", 798 I915_READ(IIR)); 799 seq_printf(m, "Interrupt mask: %08x\n", 800 I915_READ(IMR)); 801 for_each_pipe(dev_priv, pipe) 802 seq_printf(m, "Pipe %c stat: %08x\n", 803 pipe_name(pipe), 804 I915_READ(PIPESTAT(pipe))); 805 } else { 806 seq_printf(m, "North Display Interrupt enable: %08x\n", 807 I915_READ(DEIER)); 808 seq_printf(m, "North Display Interrupt identity: %08x\n", 809 I915_READ(DEIIR)); 810 seq_printf(m, "North Display Interrupt mask: %08x\n", 811 I915_READ(DEIMR)); 812 seq_printf(m, "South Display Interrupt enable: %08x\n", 813 I915_READ(SDEIER)); 814 seq_printf(m, "South Display Interrupt identity: %08x\n", 815 I915_READ(SDEIIR)); 816 seq_printf(m, "South Display Interrupt mask: %08x\n", 817 I915_READ(SDEIMR)); 818 seq_printf(m, "Graphics Interrupt enable: %08x\n", 819 I915_READ(GTIER)); 820 seq_printf(m, "Graphics Interrupt identity: %08x\n", 821 I915_READ(GTIIR)); 822 seq_printf(m, "Graphics Interrupt mask: %08x\n", 823 I915_READ(GTIMR)); 824 } 825 for_each_ring(ring, dev_priv, i) { 826 if (INTEL_INFO(dev)->gen >= 6) { 827 seq_printf(m, 828 "Graphics Interrupt mask (%s): %08x\n", 829 ring->name, I915_READ_IMR(ring)); 830 } 831 i915_ring_seqno_info(m, ring); 832 } 833 intel_runtime_pm_put(dev_priv); 834 mutex_unlock(&dev->struct_mutex); 835 836 return 0; 837 } 838 839 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 840 { 841 struct drm_info_node *node = m->private; 842 struct drm_device *dev = node->minor->dev; 843 struct drm_i915_private *dev_priv = dev->dev_private; 844 int i, ret; 845 846 ret = mutex_lock_interruptible(&dev->struct_mutex); 847 if (ret) 848 return ret; 849 850 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 851 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 852 for (i = 0; i < dev_priv->num_fence_regs; i++) { 853 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 854 855 seq_printf(m, "Fence %d, pin count = %d, object = ", 856 i, dev_priv->fence_regs[i].pin_count); 857 if (obj == NULL) 858 seq_puts(m, "unused"); 859 else 860 describe_obj(m, obj); 861 seq_putc(m, '\n'); 862 } 863 864 mutex_unlock(&dev->struct_mutex); 865 return 0; 866 } 867 868 static int i915_hws_info(struct seq_file *m, void *data) 869 { 870 struct drm_info_node *node = m->private; 871 struct drm_device *dev = node->minor->dev; 872 struct drm_i915_private *dev_priv = dev->dev_private; 873 struct intel_engine_cs *ring; 874 const u32 *hws; 875 int i; 876 877 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 878 hws = ring->status_page.page_addr; 879 if (hws == NULL) 880 return 0; 881 882 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 883 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 884 i * 4, 885 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 886 } 887 return 0; 888 } 889 890 static ssize_t 891 i915_error_state_write(struct file *filp, 892 const char __user *ubuf, 893 size_t cnt, 894 loff_t *ppos) 895 { 896 struct i915_error_state_file_priv *error_priv = filp->private_data; 897 struct drm_device *dev = error_priv->dev; 898 int ret; 899 900 DRM_DEBUG_DRIVER("Resetting error state\n"); 901 902 ret = mutex_lock_interruptible(&dev->struct_mutex); 903 if (ret) 904 return ret; 905 906 i915_destroy_error_state(dev); 907 mutex_unlock(&dev->struct_mutex); 908 909 return cnt; 910 } 911 912 static int i915_error_state_open(struct inode *inode, struct file *file) 913 { 914 struct drm_device *dev = inode->i_private; 915 struct i915_error_state_file_priv *error_priv; 916 917 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 918 if (!error_priv) 919 return -ENOMEM; 920 921 error_priv->dev = dev; 922 923 i915_error_state_get(dev, error_priv); 924 925 file->private_data = error_priv; 926 927 return 0; 928 } 929 930 static int i915_error_state_release(struct inode *inode, struct file *file) 931 { 932 struct i915_error_state_file_priv *error_priv = file->private_data; 933 934 i915_error_state_put(error_priv); 935 kfree(error_priv); 936 937 return 0; 938 } 939 940 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, 941 size_t count, loff_t *pos) 942 { 943 struct i915_error_state_file_priv *error_priv = file->private_data; 944 struct drm_i915_error_state_buf error_str; 945 loff_t tmp_pos = 0; 946 ssize_t ret_count = 0; 947 int ret; 948 949 ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos); 950 if (ret) 951 return ret; 952 953 ret = i915_error_state_to_str(&error_str, error_priv); 954 if (ret) 955 goto out; 956 957 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, 958 error_str.buf, 959 error_str.bytes); 960 961 if (ret_count < 0) 962 ret = ret_count; 963 else 964 *pos = error_str.start + ret_count; 965 out: 966 i915_error_state_buf_release(&error_str); 967 return ret ?: ret_count; 968 } 969 970 static const struct file_operations i915_error_state_fops = { 971 .owner = THIS_MODULE, 972 .open = i915_error_state_open, 973 .read = i915_error_state_read, 974 .write = i915_error_state_write, 975 .llseek = default_llseek, 976 .release = i915_error_state_release, 977 }; 978 979 static int 980 i915_next_seqno_get(void *data, u64 *val) 981 { 982 struct drm_device *dev = data; 983 struct drm_i915_private *dev_priv = dev->dev_private; 984 int ret; 985 986 ret = mutex_lock_interruptible(&dev->struct_mutex); 987 if (ret) 988 return ret; 989 990 *val = dev_priv->next_seqno; 991 mutex_unlock(&dev->struct_mutex); 992 993 return 0; 994 } 995 996 static int 997 i915_next_seqno_set(void *data, u64 val) 998 { 999 struct drm_device *dev = data; 1000 int ret; 1001 1002 ret = mutex_lock_interruptible(&dev->struct_mutex); 1003 if (ret) 1004 return ret; 1005 1006 ret = i915_gem_set_seqno(dev, val); 1007 mutex_unlock(&dev->struct_mutex); 1008 1009 return ret; 1010 } 1011 1012 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 1013 i915_next_seqno_get, i915_next_seqno_set, 1014 "0x%llx\n"); 1015 1016 static int i915_frequency_info(struct seq_file *m, void *unused) 1017 { 1018 struct drm_info_node *node = m->private; 1019 struct drm_device *dev = node->minor->dev; 1020 struct drm_i915_private *dev_priv = dev->dev_private; 1021 int ret = 0; 1022 1023 intel_runtime_pm_get(dev_priv); 1024 1025 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1026 1027 if (IS_GEN5(dev)) { 1028 u16 rgvswctl = I915_READ16(MEMSWCTL); 1029 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1030 1031 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 1032 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 1033 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 1034 MEMSTAT_VID_SHIFT); 1035 seq_printf(m, "Current P-state: %d\n", 1036 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1037 } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) || 1038 IS_BROADWELL(dev)) { 1039 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1040 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1041 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1042 u32 rpmodectl, rpinclimit, rpdeclimit; 1043 u32 rpstat, cagf, reqf; 1044 u32 rpupei, rpcurup, rpprevup; 1045 u32 rpdownei, rpcurdown, rpprevdown; 1046 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 1047 int max_freq; 1048 1049 /* RPSTAT1 is in the GT power well */ 1050 ret = mutex_lock_interruptible(&dev->struct_mutex); 1051 if (ret) 1052 goto out; 1053 1054 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 1055 1056 reqf = I915_READ(GEN6_RPNSWREQ); 1057 reqf &= ~GEN6_TURBO_DISABLE; 1058 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1059 reqf >>= 24; 1060 else 1061 reqf >>= 25; 1062 reqf *= GT_FREQUENCY_MULTIPLIER; 1063 1064 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1065 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 1066 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 1067 1068 rpstat = I915_READ(GEN6_RPSTAT1); 1069 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 1070 rpcurup = I915_READ(GEN6_RP_CUR_UP); 1071 rpprevup = I915_READ(GEN6_RP_PREV_UP); 1072 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 1073 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 1074 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 1075 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1076 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1077 else 1078 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1079 cagf *= GT_FREQUENCY_MULTIPLIER; 1080 1081 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 1082 mutex_unlock(&dev->struct_mutex); 1083 1084 if (IS_GEN6(dev) || IS_GEN7(dev)) { 1085 pm_ier = I915_READ(GEN6_PMIER); 1086 pm_imr = I915_READ(GEN6_PMIMR); 1087 pm_isr = I915_READ(GEN6_PMISR); 1088 pm_iir = I915_READ(GEN6_PMIIR); 1089 pm_mask = I915_READ(GEN6_PMINTRMSK); 1090 } else { 1091 pm_ier = I915_READ(GEN8_GT_IER(2)); 1092 pm_imr = I915_READ(GEN8_GT_IMR(2)); 1093 pm_isr = I915_READ(GEN8_GT_ISR(2)); 1094 pm_iir = I915_READ(GEN8_GT_IIR(2)); 1095 pm_mask = I915_READ(GEN6_PMINTRMSK); 1096 } 1097 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1098 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1099 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1100 seq_printf(m, "Render p-state ratio: %d\n", 1101 (gt_perf_status & 0xff00) >> 8); 1102 seq_printf(m, "Render p-state VID: %d\n", 1103 gt_perf_status & 0xff); 1104 seq_printf(m, "Render p-state limit: %d\n", 1105 rp_state_limits & 0xff); 1106 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1107 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 1108 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 1109 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 1110 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1111 seq_printf(m, "CAGF: %dMHz\n", cagf); 1112 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 1113 GEN6_CURICONT_MASK); 1114 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 1115 GEN6_CURBSYTAVG_MASK); 1116 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 1117 GEN6_CURBSYTAVG_MASK); 1118 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 1119 GEN6_CURIAVG_MASK); 1120 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 1121 GEN6_CURBSYTAVG_MASK); 1122 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 1123 GEN6_CURBSYTAVG_MASK); 1124 1125 max_freq = (rp_state_cap & 0xff0000) >> 16; 1126 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1127 max_freq * GT_FREQUENCY_MULTIPLIER); 1128 1129 max_freq = (rp_state_cap & 0xff00) >> 8; 1130 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1131 max_freq * GT_FREQUENCY_MULTIPLIER); 1132 1133 max_freq = rp_state_cap & 0xff; 1134 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1135 max_freq * GT_FREQUENCY_MULTIPLIER); 1136 1137 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1138 dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER); 1139 } else if (IS_VALLEYVIEW(dev)) { 1140 u32 freq_sts; 1141 1142 mutex_lock(&dev_priv->rps.hw_lock); 1143 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1144 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1145 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1146 1147 seq_printf(m, "max GPU freq: %d MHz\n", 1148 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1149 1150 seq_printf(m, "min GPU freq: %d MHz\n", 1151 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1152 1153 seq_printf(m, "efficient (RPe) frequency: %d MHz\n", 1154 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1155 1156 seq_printf(m, "current GPU freq: %d MHz\n", 1157 vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1158 mutex_unlock(&dev_priv->rps.hw_lock); 1159 } else { 1160 seq_puts(m, "no P-state info available\n"); 1161 } 1162 1163 out: 1164 intel_runtime_pm_put(dev_priv); 1165 return ret; 1166 } 1167 1168 static int ironlake_drpc_info(struct seq_file *m) 1169 { 1170 struct drm_info_node *node = m->private; 1171 struct drm_device *dev = node->minor->dev; 1172 struct drm_i915_private *dev_priv = dev->dev_private; 1173 u32 rgvmodectl, rstdbyctl; 1174 u16 crstandvid; 1175 int ret; 1176 1177 ret = mutex_lock_interruptible(&dev->struct_mutex); 1178 if (ret) 1179 return ret; 1180 intel_runtime_pm_get(dev_priv); 1181 1182 rgvmodectl = I915_READ(MEMMODECTL); 1183 rstdbyctl = I915_READ(RSTDBYCTL); 1184 crstandvid = I915_READ16(CRSTANDVID); 1185 1186 intel_runtime_pm_put(dev_priv); 1187 mutex_unlock(&dev->struct_mutex); 1188 1189 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1190 "yes" : "no"); 1191 seq_printf(m, "Boost freq: %d\n", 1192 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1193 MEMMODE_BOOST_FREQ_SHIFT); 1194 seq_printf(m, "HW control enabled: %s\n", 1195 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 1196 seq_printf(m, "SW control enabled: %s\n", 1197 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 1198 seq_printf(m, "Gated voltage change: %s\n", 1199 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 1200 seq_printf(m, "Starting frequency: P%d\n", 1201 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1202 seq_printf(m, "Max P-state: P%d\n", 1203 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1204 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1205 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1206 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1207 seq_printf(m, "Render standby enabled: %s\n", 1208 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1209 seq_puts(m, "Current RS state: "); 1210 switch (rstdbyctl & RSX_STATUS_MASK) { 1211 case RSX_STATUS_ON: 1212 seq_puts(m, "on\n"); 1213 break; 1214 case RSX_STATUS_RC1: 1215 seq_puts(m, "RC1\n"); 1216 break; 1217 case RSX_STATUS_RC1E: 1218 seq_puts(m, "RC1E\n"); 1219 break; 1220 case RSX_STATUS_RS1: 1221 seq_puts(m, "RS1\n"); 1222 break; 1223 case RSX_STATUS_RS2: 1224 seq_puts(m, "RS2 (RC6)\n"); 1225 break; 1226 case RSX_STATUS_RS3: 1227 seq_puts(m, "RC3 (RC6+)\n"); 1228 break; 1229 default: 1230 seq_puts(m, "unknown\n"); 1231 break; 1232 } 1233 1234 return 0; 1235 } 1236 1237 static int vlv_drpc_info(struct seq_file *m) 1238 { 1239 1240 struct drm_info_node *node = m->private; 1241 struct drm_device *dev = node->minor->dev; 1242 struct drm_i915_private *dev_priv = dev->dev_private; 1243 u32 rpmodectl1, rcctl1, pw_status; 1244 unsigned fw_rendercount = 0, fw_mediacount = 0; 1245 1246 intel_runtime_pm_get(dev_priv); 1247 1248 pw_status = I915_READ(VLV_GTLC_PW_STATUS); 1249 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1250 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1251 1252 intel_runtime_pm_put(dev_priv); 1253 1254 seq_printf(m, "Video Turbo Mode: %s\n", 1255 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1256 seq_printf(m, "Turbo enabled: %s\n", 1257 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1258 seq_printf(m, "HW control enabled: %s\n", 1259 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1260 seq_printf(m, "SW control enabled: %s\n", 1261 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1262 GEN6_RP_MEDIA_SW_MODE)); 1263 seq_printf(m, "RC6 Enabled: %s\n", 1264 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1265 GEN6_RC_CTL_EI_MODE(1)))); 1266 seq_printf(m, "Render Power Well: %s\n", 1267 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1268 seq_printf(m, "Media Power Well: %s\n", 1269 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1270 1271 seq_printf(m, "Render RC6 residency since boot: %u\n", 1272 I915_READ(VLV_GT_RENDER_RC6)); 1273 seq_printf(m, "Media RC6 residency since boot: %u\n", 1274 I915_READ(VLV_GT_MEDIA_RC6)); 1275 1276 spin_lock_irq(&dev_priv->uncore.lock); 1277 fw_rendercount = dev_priv->uncore.fw_rendercount; 1278 fw_mediacount = dev_priv->uncore.fw_mediacount; 1279 spin_unlock_irq(&dev_priv->uncore.lock); 1280 1281 seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount); 1282 seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount); 1283 1284 1285 return 0; 1286 } 1287 1288 1289 static int gen6_drpc_info(struct seq_file *m) 1290 { 1291 1292 struct drm_info_node *node = m->private; 1293 struct drm_device *dev = node->minor->dev; 1294 struct drm_i915_private *dev_priv = dev->dev_private; 1295 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1296 unsigned forcewake_count; 1297 int count = 0, ret; 1298 1299 ret = mutex_lock_interruptible(&dev->struct_mutex); 1300 if (ret) 1301 return ret; 1302 intel_runtime_pm_get(dev_priv); 1303 1304 spin_lock_irq(&dev_priv->uncore.lock); 1305 forcewake_count = dev_priv->uncore.forcewake_count; 1306 spin_unlock_irq(&dev_priv->uncore.lock); 1307 1308 if (forcewake_count) { 1309 seq_puts(m, "RC information inaccurate because somebody " 1310 "holds a forcewake reference \n"); 1311 } else { 1312 /* NB: we cannot use forcewake, else we read the wrong values */ 1313 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1314 udelay(10); 1315 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1316 } 1317 1318 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1319 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1320 1321 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1322 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1323 mutex_unlock(&dev->struct_mutex); 1324 mutex_lock(&dev_priv->rps.hw_lock); 1325 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1326 mutex_unlock(&dev_priv->rps.hw_lock); 1327 1328 intel_runtime_pm_put(dev_priv); 1329 1330 seq_printf(m, "Video Turbo Mode: %s\n", 1331 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1332 seq_printf(m, "HW control enabled: %s\n", 1333 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1334 seq_printf(m, "SW control enabled: %s\n", 1335 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1336 GEN6_RP_MEDIA_SW_MODE)); 1337 seq_printf(m, "RC1e Enabled: %s\n", 1338 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1339 seq_printf(m, "RC6 Enabled: %s\n", 1340 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1341 seq_printf(m, "Deep RC6 Enabled: %s\n", 1342 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1343 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1344 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1345 seq_puts(m, "Current RC state: "); 1346 switch (gt_core_status & GEN6_RCn_MASK) { 1347 case GEN6_RC0: 1348 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1349 seq_puts(m, "Core Power Down\n"); 1350 else 1351 seq_puts(m, "on\n"); 1352 break; 1353 case GEN6_RC3: 1354 seq_puts(m, "RC3\n"); 1355 break; 1356 case GEN6_RC6: 1357 seq_puts(m, "RC6\n"); 1358 break; 1359 case GEN6_RC7: 1360 seq_puts(m, "RC7\n"); 1361 break; 1362 default: 1363 seq_puts(m, "Unknown\n"); 1364 break; 1365 } 1366 1367 seq_printf(m, "Core Power Down: %s\n", 1368 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1369 1370 /* Not exactly sure what this is */ 1371 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1372 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1373 seq_printf(m, "RC6 residency since boot: %u\n", 1374 I915_READ(GEN6_GT_GFX_RC6)); 1375 seq_printf(m, "RC6+ residency since boot: %u\n", 1376 I915_READ(GEN6_GT_GFX_RC6p)); 1377 seq_printf(m, "RC6++ residency since boot: %u\n", 1378 I915_READ(GEN6_GT_GFX_RC6pp)); 1379 1380 seq_printf(m, "RC6 voltage: %dmV\n", 1381 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1382 seq_printf(m, "RC6+ voltage: %dmV\n", 1383 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1384 seq_printf(m, "RC6++ voltage: %dmV\n", 1385 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1386 return 0; 1387 } 1388 1389 static int i915_drpc_info(struct seq_file *m, void *unused) 1390 { 1391 struct drm_info_node *node = m->private; 1392 struct drm_device *dev = node->minor->dev; 1393 1394 if (IS_VALLEYVIEW(dev)) 1395 return vlv_drpc_info(m); 1396 else if (INTEL_INFO(dev)->gen >= 6) 1397 return gen6_drpc_info(m); 1398 else 1399 return ironlake_drpc_info(m); 1400 } 1401 1402 static int i915_fbc_status(struct seq_file *m, void *unused) 1403 { 1404 struct drm_info_node *node = m->private; 1405 struct drm_device *dev = node->minor->dev; 1406 struct drm_i915_private *dev_priv = dev->dev_private; 1407 1408 if (!HAS_FBC(dev)) { 1409 seq_puts(m, "FBC unsupported on this chipset\n"); 1410 return 0; 1411 } 1412 1413 intel_runtime_pm_get(dev_priv); 1414 1415 if (intel_fbc_enabled(dev)) { 1416 seq_puts(m, "FBC enabled\n"); 1417 } else { 1418 seq_puts(m, "FBC disabled: "); 1419 switch (dev_priv->fbc.no_fbc_reason) { 1420 case FBC_OK: 1421 seq_puts(m, "FBC actived, but currently disabled in hardware"); 1422 break; 1423 case FBC_UNSUPPORTED: 1424 seq_puts(m, "unsupported by this chipset"); 1425 break; 1426 case FBC_NO_OUTPUT: 1427 seq_puts(m, "no outputs"); 1428 break; 1429 case FBC_STOLEN_TOO_SMALL: 1430 seq_puts(m, "not enough stolen memory"); 1431 break; 1432 case FBC_UNSUPPORTED_MODE: 1433 seq_puts(m, "mode not supported"); 1434 break; 1435 case FBC_MODE_TOO_LARGE: 1436 seq_puts(m, "mode too large"); 1437 break; 1438 case FBC_BAD_PLANE: 1439 seq_puts(m, "FBC unsupported on plane"); 1440 break; 1441 case FBC_NOT_TILED: 1442 seq_puts(m, "scanout buffer not tiled"); 1443 break; 1444 case FBC_MULTIPLE_PIPES: 1445 seq_puts(m, "multiple pipes are enabled"); 1446 break; 1447 case FBC_MODULE_PARAM: 1448 seq_puts(m, "disabled per module param (default off)"); 1449 break; 1450 case FBC_CHIP_DEFAULT: 1451 seq_puts(m, "disabled per chip default"); 1452 break; 1453 default: 1454 seq_puts(m, "unknown reason"); 1455 } 1456 seq_putc(m, '\n'); 1457 } 1458 1459 intel_runtime_pm_put(dev_priv); 1460 1461 return 0; 1462 } 1463 1464 static int i915_fbc_fc_get(void *data, u64 *val) 1465 { 1466 struct drm_device *dev = data; 1467 struct drm_i915_private *dev_priv = dev->dev_private; 1468 1469 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1470 return -ENODEV; 1471 1472 drm_modeset_lock_all(dev); 1473 *val = dev_priv->fbc.false_color; 1474 drm_modeset_unlock_all(dev); 1475 1476 return 0; 1477 } 1478 1479 static int i915_fbc_fc_set(void *data, u64 val) 1480 { 1481 struct drm_device *dev = data; 1482 struct drm_i915_private *dev_priv = dev->dev_private; 1483 u32 reg; 1484 1485 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1486 return -ENODEV; 1487 1488 drm_modeset_lock_all(dev); 1489 1490 reg = I915_READ(ILK_DPFC_CONTROL); 1491 dev_priv->fbc.false_color = val; 1492 1493 I915_WRITE(ILK_DPFC_CONTROL, val ? 1494 (reg | FBC_CTL_FALSE_COLOR) : 1495 (reg & ~FBC_CTL_FALSE_COLOR)); 1496 1497 drm_modeset_unlock_all(dev); 1498 return 0; 1499 } 1500 1501 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops, 1502 i915_fbc_fc_get, i915_fbc_fc_set, 1503 "%llu\n"); 1504 1505 static int i915_ips_status(struct seq_file *m, void *unused) 1506 { 1507 struct drm_info_node *node = m->private; 1508 struct drm_device *dev = node->minor->dev; 1509 struct drm_i915_private *dev_priv = dev->dev_private; 1510 1511 if (!HAS_IPS(dev)) { 1512 seq_puts(m, "not supported\n"); 1513 return 0; 1514 } 1515 1516 intel_runtime_pm_get(dev_priv); 1517 1518 seq_printf(m, "Enabled by kernel parameter: %s\n", 1519 yesno(i915.enable_ips)); 1520 1521 if (INTEL_INFO(dev)->gen >= 8) { 1522 seq_puts(m, "Currently: unknown\n"); 1523 } else { 1524 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1525 seq_puts(m, "Currently: enabled\n"); 1526 else 1527 seq_puts(m, "Currently: disabled\n"); 1528 } 1529 1530 intel_runtime_pm_put(dev_priv); 1531 1532 return 0; 1533 } 1534 1535 static int i915_sr_status(struct seq_file *m, void *unused) 1536 { 1537 struct drm_info_node *node = m->private; 1538 struct drm_device *dev = node->minor->dev; 1539 struct drm_i915_private *dev_priv = dev->dev_private; 1540 bool sr_enabled = false; 1541 1542 intel_runtime_pm_get(dev_priv); 1543 1544 if (HAS_PCH_SPLIT(dev)) 1545 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1546 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1547 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1548 else if (IS_I915GM(dev)) 1549 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1550 else if (IS_PINEVIEW(dev)) 1551 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1552 1553 intel_runtime_pm_put(dev_priv); 1554 1555 seq_printf(m, "self-refresh: %s\n", 1556 sr_enabled ? "enabled" : "disabled"); 1557 1558 return 0; 1559 } 1560 1561 static int i915_emon_status(struct seq_file *m, void *unused) 1562 { 1563 struct drm_info_node *node = m->private; 1564 struct drm_device *dev = node->minor->dev; 1565 struct drm_i915_private *dev_priv = dev->dev_private; 1566 unsigned long temp, chipset, gfx; 1567 int ret; 1568 1569 if (!IS_GEN5(dev)) 1570 return -ENODEV; 1571 1572 ret = mutex_lock_interruptible(&dev->struct_mutex); 1573 if (ret) 1574 return ret; 1575 1576 temp = i915_mch_val(dev_priv); 1577 chipset = i915_chipset_val(dev_priv); 1578 gfx = i915_gfx_val(dev_priv); 1579 mutex_unlock(&dev->struct_mutex); 1580 1581 seq_printf(m, "GMCH temp: %ld\n", temp); 1582 seq_printf(m, "Chipset power: %ld\n", chipset); 1583 seq_printf(m, "GFX power: %ld\n", gfx); 1584 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1585 1586 return 0; 1587 } 1588 1589 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1590 { 1591 struct drm_info_node *node = m->private; 1592 struct drm_device *dev = node->minor->dev; 1593 struct drm_i915_private *dev_priv = dev->dev_private; 1594 int ret = 0; 1595 int gpu_freq, ia_freq; 1596 1597 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1598 seq_puts(m, "unsupported on this chipset\n"); 1599 return 0; 1600 } 1601 1602 intel_runtime_pm_get(dev_priv); 1603 1604 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1605 1606 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1607 if (ret) 1608 goto out; 1609 1610 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1611 1612 for (gpu_freq = dev_priv->rps.min_freq_softlimit; 1613 gpu_freq <= dev_priv->rps.max_freq_softlimit; 1614 gpu_freq++) { 1615 ia_freq = gpu_freq; 1616 sandybridge_pcode_read(dev_priv, 1617 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1618 &ia_freq); 1619 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1620 gpu_freq * GT_FREQUENCY_MULTIPLIER, 1621 ((ia_freq >> 0) & 0xff) * 100, 1622 ((ia_freq >> 8) & 0xff) * 100); 1623 } 1624 1625 mutex_unlock(&dev_priv->rps.hw_lock); 1626 1627 out: 1628 intel_runtime_pm_put(dev_priv); 1629 return ret; 1630 } 1631 1632 static int i915_opregion(struct seq_file *m, void *unused) 1633 { 1634 struct drm_info_node *node = m->private; 1635 struct drm_device *dev = node->minor->dev; 1636 struct drm_i915_private *dev_priv = dev->dev_private; 1637 struct intel_opregion *opregion = &dev_priv->opregion; 1638 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); 1639 int ret; 1640 1641 if (data == NULL) 1642 return -ENOMEM; 1643 1644 ret = mutex_lock_interruptible(&dev->struct_mutex); 1645 if (ret) 1646 goto out; 1647 1648 if (opregion->header) { 1649 memcpy_fromio(data, opregion->header, OPREGION_SIZE); 1650 seq_write(m, data, OPREGION_SIZE); 1651 } 1652 1653 mutex_unlock(&dev->struct_mutex); 1654 1655 out: 1656 kfree(data); 1657 return 0; 1658 } 1659 1660 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1661 { 1662 struct drm_info_node *node = m->private; 1663 struct drm_device *dev = node->minor->dev; 1664 struct intel_fbdev *ifbdev = NULL; 1665 struct intel_framebuffer *fb; 1666 1667 #ifdef CONFIG_DRM_I915_FBDEV 1668 struct drm_i915_private *dev_priv = dev->dev_private; 1669 1670 ifbdev = dev_priv->fbdev; 1671 fb = to_intel_framebuffer(ifbdev->helper.fb); 1672 1673 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1674 fb->base.width, 1675 fb->base.height, 1676 fb->base.depth, 1677 fb->base.bits_per_pixel, 1678 atomic_read(&fb->base.refcount.refcount)); 1679 describe_obj(m, fb->obj); 1680 seq_putc(m, '\n'); 1681 #endif 1682 1683 mutex_lock(&dev->mode_config.fb_lock); 1684 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1685 if (ifbdev && &fb->base == ifbdev->helper.fb) 1686 continue; 1687 1688 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1689 fb->base.width, 1690 fb->base.height, 1691 fb->base.depth, 1692 fb->base.bits_per_pixel, 1693 atomic_read(&fb->base.refcount.refcount)); 1694 describe_obj(m, fb->obj); 1695 seq_putc(m, '\n'); 1696 } 1697 mutex_unlock(&dev->mode_config.fb_lock); 1698 1699 return 0; 1700 } 1701 1702 static void describe_ctx_ringbuf(struct seq_file *m, 1703 struct intel_ringbuffer *ringbuf) 1704 { 1705 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)", 1706 ringbuf->space, ringbuf->head, ringbuf->tail, 1707 ringbuf->last_retired_head); 1708 } 1709 1710 static int i915_context_status(struct seq_file *m, void *unused) 1711 { 1712 struct drm_info_node *node = m->private; 1713 struct drm_device *dev = node->minor->dev; 1714 struct drm_i915_private *dev_priv = dev->dev_private; 1715 struct intel_engine_cs *ring; 1716 struct intel_context *ctx; 1717 int ret, i; 1718 1719 ret = mutex_lock_interruptible(&dev->struct_mutex); 1720 if (ret) 1721 return ret; 1722 1723 if (dev_priv->ips.pwrctx) { 1724 seq_puts(m, "power context "); 1725 describe_obj(m, dev_priv->ips.pwrctx); 1726 seq_putc(m, '\n'); 1727 } 1728 1729 if (dev_priv->ips.renderctx) { 1730 seq_puts(m, "render context "); 1731 describe_obj(m, dev_priv->ips.renderctx); 1732 seq_putc(m, '\n'); 1733 } 1734 1735 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1736 if (!i915.enable_execlists && 1737 ctx->legacy_hw_ctx.rcs_state == NULL) 1738 continue; 1739 1740 seq_puts(m, "HW context "); 1741 describe_ctx(m, ctx); 1742 for_each_ring(ring, dev_priv, i) { 1743 if (ring->default_context == ctx) 1744 seq_printf(m, "(default context %s) ", 1745 ring->name); 1746 } 1747 1748 if (i915.enable_execlists) { 1749 seq_putc(m, '\n'); 1750 for_each_ring(ring, dev_priv, i) { 1751 struct drm_i915_gem_object *ctx_obj = 1752 ctx->engine[i].state; 1753 struct intel_ringbuffer *ringbuf = 1754 ctx->engine[i].ringbuf; 1755 1756 seq_printf(m, "%s: ", ring->name); 1757 if (ctx_obj) 1758 describe_obj(m, ctx_obj); 1759 if (ringbuf) 1760 describe_ctx_ringbuf(m, ringbuf); 1761 seq_putc(m, '\n'); 1762 } 1763 } else { 1764 describe_obj(m, ctx->legacy_hw_ctx.rcs_state); 1765 } 1766 1767 seq_putc(m, '\n'); 1768 } 1769 1770 mutex_unlock(&dev->struct_mutex); 1771 1772 return 0; 1773 } 1774 1775 static void i915_dump_lrc_obj(struct seq_file *m, 1776 struct intel_engine_cs *ring, 1777 struct drm_i915_gem_object *ctx_obj) 1778 { 1779 struct page *page; 1780 uint32_t *reg_state; 1781 int j; 1782 unsigned long ggtt_offset = 0; 1783 1784 if (ctx_obj == NULL) { 1785 seq_printf(m, "Context on %s with no gem object\n", 1786 ring->name); 1787 return; 1788 } 1789 1790 seq_printf(m, "CONTEXT: %s %u\n", ring->name, 1791 intel_execlists_ctx_id(ctx_obj)); 1792 1793 if (!i915_gem_obj_ggtt_bound(ctx_obj)) 1794 seq_puts(m, "\tNot bound in GGTT\n"); 1795 else 1796 ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj); 1797 1798 if (i915_gem_object_get_pages(ctx_obj)) { 1799 seq_puts(m, "\tFailed to get pages for context object\n"); 1800 return; 1801 } 1802 1803 page = i915_gem_object_get_page(ctx_obj, 1); 1804 if (!WARN_ON(page == NULL)) { 1805 reg_state = kmap_atomic(page); 1806 1807 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { 1808 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n", 1809 ggtt_offset + 4096 + (j * 4), 1810 reg_state[j], reg_state[j + 1], 1811 reg_state[j + 2], reg_state[j + 3]); 1812 } 1813 kunmap_atomic(reg_state); 1814 } 1815 1816 seq_putc(m, '\n'); 1817 } 1818 1819 static int i915_dump_lrc(struct seq_file *m, void *unused) 1820 { 1821 struct drm_info_node *node = (struct drm_info_node *) m->private; 1822 struct drm_device *dev = node->minor->dev; 1823 struct drm_i915_private *dev_priv = dev->dev_private; 1824 struct intel_engine_cs *ring; 1825 struct intel_context *ctx; 1826 int ret, i; 1827 1828 if (!i915.enable_execlists) { 1829 seq_printf(m, "Logical Ring Contexts are disabled\n"); 1830 return 0; 1831 } 1832 1833 ret = mutex_lock_interruptible(&dev->struct_mutex); 1834 if (ret) 1835 return ret; 1836 1837 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1838 for_each_ring(ring, dev_priv, i) { 1839 if (ring->default_context != ctx) 1840 i915_dump_lrc_obj(m, ring, 1841 ctx->engine[i].state); 1842 } 1843 } 1844 1845 mutex_unlock(&dev->struct_mutex); 1846 1847 return 0; 1848 } 1849 1850 static int i915_execlists(struct seq_file *m, void *data) 1851 { 1852 struct drm_info_node *node = (struct drm_info_node *)m->private; 1853 struct drm_device *dev = node->minor->dev; 1854 struct drm_i915_private *dev_priv = dev->dev_private; 1855 struct intel_engine_cs *ring; 1856 u32 status_pointer; 1857 u8 read_pointer; 1858 u8 write_pointer; 1859 u32 status; 1860 u32 ctx_id; 1861 struct list_head *cursor; 1862 int ring_id, i; 1863 int ret; 1864 1865 if (!i915.enable_execlists) { 1866 seq_puts(m, "Logical Ring Contexts are disabled\n"); 1867 return 0; 1868 } 1869 1870 ret = mutex_lock_interruptible(&dev->struct_mutex); 1871 if (ret) 1872 return ret; 1873 1874 intel_runtime_pm_get(dev_priv); 1875 1876 for_each_ring(ring, dev_priv, ring_id) { 1877 struct intel_ctx_submit_request *head_req = NULL; 1878 int count = 0; 1879 unsigned long flags; 1880 1881 seq_printf(m, "%s\n", ring->name); 1882 1883 status = I915_READ(RING_EXECLIST_STATUS(ring)); 1884 ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4); 1885 seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n", 1886 status, ctx_id); 1887 1888 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); 1889 seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer); 1890 1891 read_pointer = ring->next_context_status_buffer; 1892 write_pointer = status_pointer & 0x07; 1893 if (read_pointer > write_pointer) 1894 write_pointer += 6; 1895 seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n", 1896 read_pointer, write_pointer); 1897 1898 for (i = 0; i < 6; i++) { 1899 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i); 1900 ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4); 1901 1902 seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n", 1903 i, status, ctx_id); 1904 } 1905 1906 spin_lock_irqsave(&ring->execlist_lock, flags); 1907 list_for_each(cursor, &ring->execlist_queue) 1908 count++; 1909 head_req = list_first_entry_or_null(&ring->execlist_queue, 1910 struct intel_ctx_submit_request, execlist_link); 1911 spin_unlock_irqrestore(&ring->execlist_lock, flags); 1912 1913 seq_printf(m, "\t%d requests in queue\n", count); 1914 if (head_req) { 1915 struct drm_i915_gem_object *ctx_obj; 1916 1917 ctx_obj = head_req->ctx->engine[ring_id].state; 1918 seq_printf(m, "\tHead request id: %u\n", 1919 intel_execlists_ctx_id(ctx_obj)); 1920 seq_printf(m, "\tHead request tail: %u\n", 1921 head_req->tail); 1922 } 1923 1924 seq_putc(m, '\n'); 1925 } 1926 1927 intel_runtime_pm_put(dev_priv); 1928 mutex_unlock(&dev->struct_mutex); 1929 1930 return 0; 1931 } 1932 1933 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1934 { 1935 struct drm_info_node *node = m->private; 1936 struct drm_device *dev = node->minor->dev; 1937 struct drm_i915_private *dev_priv = dev->dev_private; 1938 unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0; 1939 1940 spin_lock_irq(&dev_priv->uncore.lock); 1941 if (IS_VALLEYVIEW(dev)) { 1942 fw_rendercount = dev_priv->uncore.fw_rendercount; 1943 fw_mediacount = dev_priv->uncore.fw_mediacount; 1944 } else 1945 forcewake_count = dev_priv->uncore.forcewake_count; 1946 spin_unlock_irq(&dev_priv->uncore.lock); 1947 1948 if (IS_VALLEYVIEW(dev)) { 1949 seq_printf(m, "fw_rendercount = %u\n", fw_rendercount); 1950 seq_printf(m, "fw_mediacount = %u\n", fw_mediacount); 1951 } else 1952 seq_printf(m, "forcewake count = %u\n", forcewake_count); 1953 1954 return 0; 1955 } 1956 1957 static const char *swizzle_string(unsigned swizzle) 1958 { 1959 switch (swizzle) { 1960 case I915_BIT_6_SWIZZLE_NONE: 1961 return "none"; 1962 case I915_BIT_6_SWIZZLE_9: 1963 return "bit9"; 1964 case I915_BIT_6_SWIZZLE_9_10: 1965 return "bit9/bit10"; 1966 case I915_BIT_6_SWIZZLE_9_11: 1967 return "bit9/bit11"; 1968 case I915_BIT_6_SWIZZLE_9_10_11: 1969 return "bit9/bit10/bit11"; 1970 case I915_BIT_6_SWIZZLE_9_17: 1971 return "bit9/bit17"; 1972 case I915_BIT_6_SWIZZLE_9_10_17: 1973 return "bit9/bit10/bit17"; 1974 case I915_BIT_6_SWIZZLE_UNKNOWN: 1975 return "unknown"; 1976 } 1977 1978 return "bug"; 1979 } 1980 1981 static int i915_swizzle_info(struct seq_file *m, void *data) 1982 { 1983 struct drm_info_node *node = m->private; 1984 struct drm_device *dev = node->minor->dev; 1985 struct drm_i915_private *dev_priv = dev->dev_private; 1986 int ret; 1987 1988 ret = mutex_lock_interruptible(&dev->struct_mutex); 1989 if (ret) 1990 return ret; 1991 intel_runtime_pm_get(dev_priv); 1992 1993 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1994 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1995 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1996 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1997 1998 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1999 seq_printf(m, "DDC = 0x%08x\n", 2000 I915_READ(DCC)); 2001 seq_printf(m, "DDC2 = 0x%08x\n", 2002 I915_READ(DCC2)); 2003 seq_printf(m, "C0DRB3 = 0x%04x\n", 2004 I915_READ16(C0DRB3)); 2005 seq_printf(m, "C1DRB3 = 0x%04x\n", 2006 I915_READ16(C1DRB3)); 2007 } else if (INTEL_INFO(dev)->gen >= 6) { 2008 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 2009 I915_READ(MAD_DIMM_C0)); 2010 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 2011 I915_READ(MAD_DIMM_C1)); 2012 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 2013 I915_READ(MAD_DIMM_C2)); 2014 seq_printf(m, "TILECTL = 0x%08x\n", 2015 I915_READ(TILECTL)); 2016 if (INTEL_INFO(dev)->gen >= 8) 2017 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 2018 I915_READ(GAMTARBMODE)); 2019 else 2020 seq_printf(m, "ARB_MODE = 0x%08x\n", 2021 I915_READ(ARB_MODE)); 2022 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 2023 I915_READ(DISP_ARB_CTL)); 2024 } 2025 2026 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 2027 seq_puts(m, "L-shaped memory detected\n"); 2028 2029 intel_runtime_pm_put(dev_priv); 2030 mutex_unlock(&dev->struct_mutex); 2031 2032 return 0; 2033 } 2034 2035 static int per_file_ctx(int id, void *ptr, void *data) 2036 { 2037 struct intel_context *ctx = ptr; 2038 struct seq_file *m = data; 2039 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2040 2041 if (!ppgtt) { 2042 seq_printf(m, " no ppgtt for context %d\n", 2043 ctx->user_handle); 2044 return 0; 2045 } 2046 2047 if (i915_gem_context_is_default(ctx)) 2048 seq_puts(m, " default context:\n"); 2049 else 2050 seq_printf(m, " context %d:\n", ctx->user_handle); 2051 ppgtt->debug_dump(ppgtt, m); 2052 2053 return 0; 2054 } 2055 2056 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2057 { 2058 struct drm_i915_private *dev_priv = dev->dev_private; 2059 struct intel_engine_cs *ring; 2060 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2061 int unused, i; 2062 2063 if (!ppgtt) 2064 return; 2065 2066 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages); 2067 seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries); 2068 for_each_ring(ring, dev_priv, unused) { 2069 seq_printf(m, "%s\n", ring->name); 2070 for (i = 0; i < 4; i++) { 2071 u32 offset = 0x270 + i * 8; 2072 u64 pdp = I915_READ(ring->mmio_base + offset + 4); 2073 pdp <<= 32; 2074 pdp |= I915_READ(ring->mmio_base + offset); 2075 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 2076 } 2077 } 2078 } 2079 2080 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2081 { 2082 struct drm_i915_private *dev_priv = dev->dev_private; 2083 struct intel_engine_cs *ring; 2084 struct drm_file *file; 2085 int i; 2086 2087 if (INTEL_INFO(dev)->gen == 6) 2088 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2089 2090 for_each_ring(ring, dev_priv, i) { 2091 seq_printf(m, "%s\n", ring->name); 2092 if (INTEL_INFO(dev)->gen == 7) 2093 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 2094 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 2095 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 2096 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 2097 } 2098 if (dev_priv->mm.aliasing_ppgtt) { 2099 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2100 2101 seq_puts(m, "aliasing PPGTT:\n"); 2102 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 2103 2104 ppgtt->debug_dump(ppgtt, m); 2105 } 2106 2107 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2108 struct drm_i915_file_private *file_priv = file->driver_priv; 2109 2110 seq_printf(m, "proc: %s\n", 2111 get_pid_task(file->pid, PIDTYPE_PID)->comm); 2112 idr_for_each(&file_priv->context_idr, per_file_ctx, m); 2113 } 2114 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 2115 } 2116 2117 static int i915_ppgtt_info(struct seq_file *m, void *data) 2118 { 2119 struct drm_info_node *node = m->private; 2120 struct drm_device *dev = node->minor->dev; 2121 struct drm_i915_private *dev_priv = dev->dev_private; 2122 2123 int ret = mutex_lock_interruptible(&dev->struct_mutex); 2124 if (ret) 2125 return ret; 2126 intel_runtime_pm_get(dev_priv); 2127 2128 if (INTEL_INFO(dev)->gen >= 8) 2129 gen8_ppgtt_info(m, dev); 2130 else if (INTEL_INFO(dev)->gen >= 6) 2131 gen6_ppgtt_info(m, dev); 2132 2133 intel_runtime_pm_put(dev_priv); 2134 mutex_unlock(&dev->struct_mutex); 2135 2136 return 0; 2137 } 2138 2139 static int i915_llc(struct seq_file *m, void *data) 2140 { 2141 struct drm_info_node *node = m->private; 2142 struct drm_device *dev = node->minor->dev; 2143 struct drm_i915_private *dev_priv = dev->dev_private; 2144 2145 /* Size calculation for LLC is a bit of a pain. Ignore for now. */ 2146 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); 2147 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size); 2148 2149 return 0; 2150 } 2151 2152 static int i915_edp_psr_status(struct seq_file *m, void *data) 2153 { 2154 struct drm_info_node *node = m->private; 2155 struct drm_device *dev = node->minor->dev; 2156 struct drm_i915_private *dev_priv = dev->dev_private; 2157 u32 psrperf = 0; 2158 bool enabled = false; 2159 2160 intel_runtime_pm_get(dev_priv); 2161 2162 mutex_lock(&dev_priv->psr.lock); 2163 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 2164 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 2165 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); 2166 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); 2167 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", 2168 dev_priv->psr.busy_frontbuffer_bits); 2169 seq_printf(m, "Re-enable work scheduled: %s\n", 2170 yesno(work_busy(&dev_priv->psr.work.work))); 2171 2172 enabled = HAS_PSR(dev) && 2173 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; 2174 seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled)); 2175 2176 if (HAS_PSR(dev)) 2177 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & 2178 EDP_PSR_PERF_CNT_MASK; 2179 seq_printf(m, "Performance_Counter: %u\n", psrperf); 2180 mutex_unlock(&dev_priv->psr.lock); 2181 2182 intel_runtime_pm_put(dev_priv); 2183 return 0; 2184 } 2185 2186 static int i915_sink_crc(struct seq_file *m, void *data) 2187 { 2188 struct drm_info_node *node = m->private; 2189 struct drm_device *dev = node->minor->dev; 2190 struct intel_encoder *encoder; 2191 struct intel_connector *connector; 2192 struct intel_dp *intel_dp = NULL; 2193 int ret; 2194 u8 crc[6]; 2195 2196 drm_modeset_lock_all(dev); 2197 list_for_each_entry(connector, &dev->mode_config.connector_list, 2198 base.head) { 2199 2200 if (connector->base.dpms != DRM_MODE_DPMS_ON) 2201 continue; 2202 2203 if (!connector->base.encoder) 2204 continue; 2205 2206 encoder = to_intel_encoder(connector->base.encoder); 2207 if (encoder->type != INTEL_OUTPUT_EDP) 2208 continue; 2209 2210 intel_dp = enc_to_intel_dp(&encoder->base); 2211 2212 ret = intel_dp_sink_crc(intel_dp, crc); 2213 if (ret) 2214 goto out; 2215 2216 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 2217 crc[0], crc[1], crc[2], 2218 crc[3], crc[4], crc[5]); 2219 goto out; 2220 } 2221 ret = -ENODEV; 2222 out: 2223 drm_modeset_unlock_all(dev); 2224 return ret; 2225 } 2226 2227 static int i915_energy_uJ(struct seq_file *m, void *data) 2228 { 2229 struct drm_info_node *node = m->private; 2230 struct drm_device *dev = node->minor->dev; 2231 struct drm_i915_private *dev_priv = dev->dev_private; 2232 u64 power; 2233 u32 units; 2234 2235 if (INTEL_INFO(dev)->gen < 6) 2236 return -ENODEV; 2237 2238 intel_runtime_pm_get(dev_priv); 2239 2240 rdmsrl(MSR_RAPL_POWER_UNIT, power); 2241 power = (power & 0x1f00) >> 8; 2242 units = 1000000 / (1 << power); /* convert to uJ */ 2243 power = I915_READ(MCH_SECP_NRG_STTS); 2244 power *= units; 2245 2246 intel_runtime_pm_put(dev_priv); 2247 2248 seq_printf(m, "%llu", (long long unsigned)power); 2249 2250 return 0; 2251 } 2252 2253 static int i915_pc8_status(struct seq_file *m, void *unused) 2254 { 2255 struct drm_info_node *node = m->private; 2256 struct drm_device *dev = node->minor->dev; 2257 struct drm_i915_private *dev_priv = dev->dev_private; 2258 2259 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { 2260 seq_puts(m, "not supported\n"); 2261 return 0; 2262 } 2263 2264 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); 2265 seq_printf(m, "IRQs disabled: %s\n", 2266 yesno(!intel_irqs_enabled(dev_priv))); 2267 2268 return 0; 2269 } 2270 2271 static const char *power_domain_str(enum intel_display_power_domain domain) 2272 { 2273 switch (domain) { 2274 case POWER_DOMAIN_PIPE_A: 2275 return "PIPE_A"; 2276 case POWER_DOMAIN_PIPE_B: 2277 return "PIPE_B"; 2278 case POWER_DOMAIN_PIPE_C: 2279 return "PIPE_C"; 2280 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 2281 return "PIPE_A_PANEL_FITTER"; 2282 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 2283 return "PIPE_B_PANEL_FITTER"; 2284 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 2285 return "PIPE_C_PANEL_FITTER"; 2286 case POWER_DOMAIN_TRANSCODER_A: 2287 return "TRANSCODER_A"; 2288 case POWER_DOMAIN_TRANSCODER_B: 2289 return "TRANSCODER_B"; 2290 case POWER_DOMAIN_TRANSCODER_C: 2291 return "TRANSCODER_C"; 2292 case POWER_DOMAIN_TRANSCODER_EDP: 2293 return "TRANSCODER_EDP"; 2294 case POWER_DOMAIN_PORT_DDI_A_2_LANES: 2295 return "PORT_DDI_A_2_LANES"; 2296 case POWER_DOMAIN_PORT_DDI_A_4_LANES: 2297 return "PORT_DDI_A_4_LANES"; 2298 case POWER_DOMAIN_PORT_DDI_B_2_LANES: 2299 return "PORT_DDI_B_2_LANES"; 2300 case POWER_DOMAIN_PORT_DDI_B_4_LANES: 2301 return "PORT_DDI_B_4_LANES"; 2302 case POWER_DOMAIN_PORT_DDI_C_2_LANES: 2303 return "PORT_DDI_C_2_LANES"; 2304 case POWER_DOMAIN_PORT_DDI_C_4_LANES: 2305 return "PORT_DDI_C_4_LANES"; 2306 case POWER_DOMAIN_PORT_DDI_D_2_LANES: 2307 return "PORT_DDI_D_2_LANES"; 2308 case POWER_DOMAIN_PORT_DDI_D_4_LANES: 2309 return "PORT_DDI_D_4_LANES"; 2310 case POWER_DOMAIN_PORT_DSI: 2311 return "PORT_DSI"; 2312 case POWER_DOMAIN_PORT_CRT: 2313 return "PORT_CRT"; 2314 case POWER_DOMAIN_PORT_OTHER: 2315 return "PORT_OTHER"; 2316 case POWER_DOMAIN_VGA: 2317 return "VGA"; 2318 case POWER_DOMAIN_AUDIO: 2319 return "AUDIO"; 2320 case POWER_DOMAIN_PLLS: 2321 return "PLLS"; 2322 case POWER_DOMAIN_INIT: 2323 return "INIT"; 2324 default: 2325 WARN_ON(1); 2326 return "?"; 2327 } 2328 } 2329 2330 static int i915_power_domain_info(struct seq_file *m, void *unused) 2331 { 2332 struct drm_info_node *node = m->private; 2333 struct drm_device *dev = node->minor->dev; 2334 struct drm_i915_private *dev_priv = dev->dev_private; 2335 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2336 int i; 2337 2338 mutex_lock(&power_domains->lock); 2339 2340 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2341 for (i = 0; i < power_domains->power_well_count; i++) { 2342 struct i915_power_well *power_well; 2343 enum intel_display_power_domain power_domain; 2344 2345 power_well = &power_domains->power_wells[i]; 2346 seq_printf(m, "%-25s %d\n", power_well->name, 2347 power_well->count); 2348 2349 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM; 2350 power_domain++) { 2351 if (!(BIT(power_domain) & power_well->domains)) 2352 continue; 2353 2354 seq_printf(m, " %-23s %d\n", 2355 power_domain_str(power_domain), 2356 power_domains->domain_use_count[power_domain]); 2357 } 2358 } 2359 2360 mutex_unlock(&power_domains->lock); 2361 2362 return 0; 2363 } 2364 2365 static void intel_seq_print_mode(struct seq_file *m, int tabs, 2366 struct drm_display_mode *mode) 2367 { 2368 int i; 2369 2370 for (i = 0; i < tabs; i++) 2371 seq_putc(m, '\t'); 2372 2373 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2374 mode->base.id, mode->name, 2375 mode->vrefresh, mode->clock, 2376 mode->hdisplay, mode->hsync_start, 2377 mode->hsync_end, mode->htotal, 2378 mode->vdisplay, mode->vsync_start, 2379 mode->vsync_end, mode->vtotal, 2380 mode->type, mode->flags); 2381 } 2382 2383 static void intel_encoder_info(struct seq_file *m, 2384 struct intel_crtc *intel_crtc, 2385 struct intel_encoder *intel_encoder) 2386 { 2387 struct drm_info_node *node = m->private; 2388 struct drm_device *dev = node->minor->dev; 2389 struct drm_crtc *crtc = &intel_crtc->base; 2390 struct intel_connector *intel_connector; 2391 struct drm_encoder *encoder; 2392 2393 encoder = &intel_encoder->base; 2394 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2395 encoder->base.id, encoder->name); 2396 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2397 struct drm_connector *connector = &intel_connector->base; 2398 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2399 connector->base.id, 2400 connector->name, 2401 drm_get_connector_status_name(connector->status)); 2402 if (connector->status == connector_status_connected) { 2403 struct drm_display_mode *mode = &crtc->mode; 2404 seq_printf(m, ", mode:\n"); 2405 intel_seq_print_mode(m, 2, mode); 2406 } else { 2407 seq_putc(m, '\n'); 2408 } 2409 } 2410 } 2411 2412 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2413 { 2414 struct drm_info_node *node = m->private; 2415 struct drm_device *dev = node->minor->dev; 2416 struct drm_crtc *crtc = &intel_crtc->base; 2417 struct intel_encoder *intel_encoder; 2418 2419 if (crtc->primary->fb) 2420 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2421 crtc->primary->fb->base.id, crtc->x, crtc->y, 2422 crtc->primary->fb->width, crtc->primary->fb->height); 2423 else 2424 seq_puts(m, "\tprimary plane disabled\n"); 2425 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2426 intel_encoder_info(m, intel_crtc, intel_encoder); 2427 } 2428 2429 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 2430 { 2431 struct drm_display_mode *mode = panel->fixed_mode; 2432 2433 seq_printf(m, "\tfixed mode:\n"); 2434 intel_seq_print_mode(m, 2, mode); 2435 } 2436 2437 static void intel_dp_info(struct seq_file *m, 2438 struct intel_connector *intel_connector) 2439 { 2440 struct intel_encoder *intel_encoder = intel_connector->encoder; 2441 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2442 2443 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2444 seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" : 2445 "no"); 2446 if (intel_encoder->type == INTEL_OUTPUT_EDP) 2447 intel_panel_info(m, &intel_connector->panel); 2448 } 2449 2450 static void intel_hdmi_info(struct seq_file *m, 2451 struct intel_connector *intel_connector) 2452 { 2453 struct intel_encoder *intel_encoder = intel_connector->encoder; 2454 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 2455 2456 seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" : 2457 "no"); 2458 } 2459 2460 static void intel_lvds_info(struct seq_file *m, 2461 struct intel_connector *intel_connector) 2462 { 2463 intel_panel_info(m, &intel_connector->panel); 2464 } 2465 2466 static void intel_connector_info(struct seq_file *m, 2467 struct drm_connector *connector) 2468 { 2469 struct intel_connector *intel_connector = to_intel_connector(connector); 2470 struct intel_encoder *intel_encoder = intel_connector->encoder; 2471 struct drm_display_mode *mode; 2472 2473 seq_printf(m, "connector %d: type %s, status: %s\n", 2474 connector->base.id, connector->name, 2475 drm_get_connector_status_name(connector->status)); 2476 if (connector->status == connector_status_connected) { 2477 seq_printf(m, "\tname: %s\n", connector->display_info.name); 2478 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 2479 connector->display_info.width_mm, 2480 connector->display_info.height_mm); 2481 seq_printf(m, "\tsubpixel order: %s\n", 2482 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 2483 seq_printf(m, "\tCEA rev: %d\n", 2484 connector->display_info.cea_rev); 2485 } 2486 if (intel_encoder) { 2487 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2488 intel_encoder->type == INTEL_OUTPUT_EDP) 2489 intel_dp_info(m, intel_connector); 2490 else if (intel_encoder->type == INTEL_OUTPUT_HDMI) 2491 intel_hdmi_info(m, intel_connector); 2492 else if (intel_encoder->type == INTEL_OUTPUT_LVDS) 2493 intel_lvds_info(m, intel_connector); 2494 } 2495 2496 seq_printf(m, "\tmodes:\n"); 2497 list_for_each_entry(mode, &connector->modes, head) 2498 intel_seq_print_mode(m, 2, mode); 2499 } 2500 2501 static bool cursor_active(struct drm_device *dev, int pipe) 2502 { 2503 struct drm_i915_private *dev_priv = dev->dev_private; 2504 u32 state; 2505 2506 if (IS_845G(dev) || IS_I865G(dev)) 2507 state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 2508 else 2509 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 2510 2511 return state; 2512 } 2513 2514 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) 2515 { 2516 struct drm_i915_private *dev_priv = dev->dev_private; 2517 u32 pos; 2518 2519 pos = I915_READ(CURPOS(pipe)); 2520 2521 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK; 2522 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT)) 2523 *x = -*x; 2524 2525 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK; 2526 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT)) 2527 *y = -*y; 2528 2529 return cursor_active(dev, pipe); 2530 } 2531 2532 static int i915_display_info(struct seq_file *m, void *unused) 2533 { 2534 struct drm_info_node *node = m->private; 2535 struct drm_device *dev = node->minor->dev; 2536 struct drm_i915_private *dev_priv = dev->dev_private; 2537 struct intel_crtc *crtc; 2538 struct drm_connector *connector; 2539 2540 intel_runtime_pm_get(dev_priv); 2541 drm_modeset_lock_all(dev); 2542 seq_printf(m, "CRTC info\n"); 2543 seq_printf(m, "---------\n"); 2544 for_each_intel_crtc(dev, crtc) { 2545 bool active; 2546 int x, y; 2547 2548 seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n", 2549 crtc->base.base.id, pipe_name(crtc->pipe), 2550 yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h); 2551 if (crtc->active) { 2552 intel_crtc_info(m, crtc); 2553 2554 active = cursor_position(dev, crtc->pipe, &x, &y); 2555 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n", 2556 yesno(crtc->cursor_base), 2557 x, y, crtc->cursor_width, crtc->cursor_height, 2558 crtc->cursor_addr, yesno(active)); 2559 } 2560 2561 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 2562 yesno(!crtc->cpu_fifo_underrun_disabled), 2563 yesno(!crtc->pch_fifo_underrun_disabled)); 2564 } 2565 2566 seq_printf(m, "\n"); 2567 seq_printf(m, "Connector info\n"); 2568 seq_printf(m, "--------------\n"); 2569 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 2570 intel_connector_info(m, connector); 2571 } 2572 drm_modeset_unlock_all(dev); 2573 intel_runtime_pm_put(dev_priv); 2574 2575 return 0; 2576 } 2577 2578 static int i915_semaphore_status(struct seq_file *m, void *unused) 2579 { 2580 struct drm_info_node *node = (struct drm_info_node *) m->private; 2581 struct drm_device *dev = node->minor->dev; 2582 struct drm_i915_private *dev_priv = dev->dev_private; 2583 struct intel_engine_cs *ring; 2584 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 2585 int i, j, ret; 2586 2587 if (!i915_semaphore_is_enabled(dev)) { 2588 seq_puts(m, "Semaphores are disabled\n"); 2589 return 0; 2590 } 2591 2592 ret = mutex_lock_interruptible(&dev->struct_mutex); 2593 if (ret) 2594 return ret; 2595 intel_runtime_pm_get(dev_priv); 2596 2597 if (IS_BROADWELL(dev)) { 2598 struct page *page; 2599 uint64_t *seqno; 2600 2601 page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); 2602 2603 seqno = (uint64_t *)kmap_atomic(page); 2604 for_each_ring(ring, dev_priv, i) { 2605 uint64_t offset; 2606 2607 seq_printf(m, "%s\n", ring->name); 2608 2609 seq_puts(m, " Last signal:"); 2610 for (j = 0; j < num_rings; j++) { 2611 offset = i * I915_NUM_RINGS + j; 2612 seq_printf(m, "0x%08llx (0x%02llx) ", 2613 seqno[offset], offset * 8); 2614 } 2615 seq_putc(m, '\n'); 2616 2617 seq_puts(m, " Last wait: "); 2618 for (j = 0; j < num_rings; j++) { 2619 offset = i + (j * I915_NUM_RINGS); 2620 seq_printf(m, "0x%08llx (0x%02llx) ", 2621 seqno[offset], offset * 8); 2622 } 2623 seq_putc(m, '\n'); 2624 2625 } 2626 kunmap_atomic(seqno); 2627 } else { 2628 seq_puts(m, " Last signal:"); 2629 for_each_ring(ring, dev_priv, i) 2630 for (j = 0; j < num_rings; j++) 2631 seq_printf(m, "0x%08x\n", 2632 I915_READ(ring->semaphore.mbox.signal[j])); 2633 seq_putc(m, '\n'); 2634 } 2635 2636 seq_puts(m, "\nSync seqno:\n"); 2637 for_each_ring(ring, dev_priv, i) { 2638 for (j = 0; j < num_rings; j++) { 2639 seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]); 2640 } 2641 seq_putc(m, '\n'); 2642 } 2643 seq_putc(m, '\n'); 2644 2645 intel_runtime_pm_put(dev_priv); 2646 mutex_unlock(&dev->struct_mutex); 2647 return 0; 2648 } 2649 2650 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 2651 { 2652 struct drm_info_node *node = (struct drm_info_node *) m->private; 2653 struct drm_device *dev = node->minor->dev; 2654 struct drm_i915_private *dev_priv = dev->dev_private; 2655 int i; 2656 2657 drm_modeset_lock_all(dev); 2658 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 2659 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 2660 2661 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); 2662 seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n", 2663 pll->config.crtc_mask, pll->active, yesno(pll->on)); 2664 seq_printf(m, " tracked hardware state:\n"); 2665 seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll); 2666 seq_printf(m, " dpll_md: 0x%08x\n", 2667 pll->config.hw_state.dpll_md); 2668 seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0); 2669 seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1); 2670 seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll); 2671 } 2672 drm_modeset_unlock_all(dev); 2673 2674 return 0; 2675 } 2676 2677 static int i915_wa_registers(struct seq_file *m, void *unused) 2678 { 2679 int i; 2680 int ret; 2681 struct drm_info_node *node = (struct drm_info_node *) m->private; 2682 struct drm_device *dev = node->minor->dev; 2683 struct drm_i915_private *dev_priv = dev->dev_private; 2684 2685 ret = mutex_lock_interruptible(&dev->struct_mutex); 2686 if (ret) 2687 return ret; 2688 2689 intel_runtime_pm_get(dev_priv); 2690 2691 seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count); 2692 for (i = 0; i < dev_priv->workarounds.count; ++i) { 2693 u32 addr, mask, value, read; 2694 bool ok; 2695 2696 addr = dev_priv->workarounds.reg[i].addr; 2697 mask = dev_priv->workarounds.reg[i].mask; 2698 value = dev_priv->workarounds.reg[i].value; 2699 read = I915_READ(addr); 2700 ok = (value & mask) == (read & mask); 2701 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", 2702 addr, value, mask, read, ok ? "OK" : "FAIL"); 2703 } 2704 2705 intel_runtime_pm_put(dev_priv); 2706 mutex_unlock(&dev->struct_mutex); 2707 2708 return 0; 2709 } 2710 2711 static int i915_ddb_info(struct seq_file *m, void *unused) 2712 { 2713 struct drm_info_node *node = m->private; 2714 struct drm_device *dev = node->minor->dev; 2715 struct drm_i915_private *dev_priv = dev->dev_private; 2716 struct skl_ddb_allocation *ddb; 2717 struct skl_ddb_entry *entry; 2718 enum pipe pipe; 2719 int plane; 2720 2721 drm_modeset_lock_all(dev); 2722 2723 ddb = &dev_priv->wm.skl_hw.ddb; 2724 2725 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 2726 2727 for_each_pipe(dev_priv, pipe) { 2728 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 2729 2730 for_each_plane(pipe, plane) { 2731 entry = &ddb->plane[pipe][plane]; 2732 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, 2733 entry->start, entry->end, 2734 skl_ddb_entry_size(entry)); 2735 } 2736 2737 entry = &ddb->cursor[pipe]; 2738 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 2739 entry->end, skl_ddb_entry_size(entry)); 2740 } 2741 2742 drm_modeset_unlock_all(dev); 2743 2744 return 0; 2745 } 2746 2747 struct pipe_crc_info { 2748 const char *name; 2749 struct drm_device *dev; 2750 enum pipe pipe; 2751 }; 2752 2753 static int i915_dp_mst_info(struct seq_file *m, void *unused) 2754 { 2755 struct drm_info_node *node = (struct drm_info_node *) m->private; 2756 struct drm_device *dev = node->minor->dev; 2757 struct drm_encoder *encoder; 2758 struct intel_encoder *intel_encoder; 2759 struct intel_digital_port *intel_dig_port; 2760 drm_modeset_lock_all(dev); 2761 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2762 intel_encoder = to_intel_encoder(encoder); 2763 if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT) 2764 continue; 2765 intel_dig_port = enc_to_dig_port(encoder); 2766 if (!intel_dig_port->dp.can_mst) 2767 continue; 2768 2769 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 2770 } 2771 drm_modeset_unlock_all(dev); 2772 return 0; 2773 } 2774 2775 static int i915_pipe_crc_open(struct inode *inode, struct file *filep) 2776 { 2777 struct pipe_crc_info *info = inode->i_private; 2778 struct drm_i915_private *dev_priv = info->dev->dev_private; 2779 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2780 2781 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes) 2782 return -ENODEV; 2783 2784 spin_lock_irq(&pipe_crc->lock); 2785 2786 if (pipe_crc->opened) { 2787 spin_unlock_irq(&pipe_crc->lock); 2788 return -EBUSY; /* already open */ 2789 } 2790 2791 pipe_crc->opened = true; 2792 filep->private_data = inode->i_private; 2793 2794 spin_unlock_irq(&pipe_crc->lock); 2795 2796 return 0; 2797 } 2798 2799 static int i915_pipe_crc_release(struct inode *inode, struct file *filep) 2800 { 2801 struct pipe_crc_info *info = inode->i_private; 2802 struct drm_i915_private *dev_priv = info->dev->dev_private; 2803 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2804 2805 spin_lock_irq(&pipe_crc->lock); 2806 pipe_crc->opened = false; 2807 spin_unlock_irq(&pipe_crc->lock); 2808 2809 return 0; 2810 } 2811 2812 /* (6 fields, 8 chars each, space separated (5) + '\n') */ 2813 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) 2814 /* account for \'0' */ 2815 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) 2816 2817 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) 2818 { 2819 assert_spin_locked(&pipe_crc->lock); 2820 return CIRC_CNT(pipe_crc->head, pipe_crc->tail, 2821 INTEL_PIPE_CRC_ENTRIES_NR); 2822 } 2823 2824 static ssize_t 2825 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, 2826 loff_t *pos) 2827 { 2828 struct pipe_crc_info *info = filep->private_data; 2829 struct drm_device *dev = info->dev; 2830 struct drm_i915_private *dev_priv = dev->dev_private; 2831 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2832 char buf[PIPE_CRC_BUFFER_LEN]; 2833 int head, tail, n_entries, n; 2834 ssize_t bytes_read; 2835 2836 /* 2837 * Don't allow user space to provide buffers not big enough to hold 2838 * a line of data. 2839 */ 2840 if (count < PIPE_CRC_LINE_LEN) 2841 return -EINVAL; 2842 2843 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) 2844 return 0; 2845 2846 /* nothing to read */ 2847 spin_lock_irq(&pipe_crc->lock); 2848 while (pipe_crc_data_count(pipe_crc) == 0) { 2849 int ret; 2850 2851 if (filep->f_flags & O_NONBLOCK) { 2852 spin_unlock_irq(&pipe_crc->lock); 2853 return -EAGAIN; 2854 } 2855 2856 ret = wait_event_interruptible_lock_irq(pipe_crc->wq, 2857 pipe_crc_data_count(pipe_crc), pipe_crc->lock); 2858 if (ret) { 2859 spin_unlock_irq(&pipe_crc->lock); 2860 return ret; 2861 } 2862 } 2863 2864 /* We now have one or more entries to read */ 2865 head = pipe_crc->head; 2866 tail = pipe_crc->tail; 2867 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR), 2868 count / PIPE_CRC_LINE_LEN); 2869 spin_unlock_irq(&pipe_crc->lock); 2870 2871 bytes_read = 0; 2872 n = 0; 2873 do { 2874 struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail]; 2875 int ret; 2876 2877 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, 2878 "%8u %8x %8x %8x %8x %8x\n", 2879 entry->frame, entry->crc[0], 2880 entry->crc[1], entry->crc[2], 2881 entry->crc[3], entry->crc[4]); 2882 2883 ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN, 2884 buf, PIPE_CRC_LINE_LEN); 2885 if (ret == PIPE_CRC_LINE_LEN) 2886 return -EFAULT; 2887 2888 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); 2889 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 2890 n++; 2891 } while (--n_entries); 2892 2893 spin_lock_irq(&pipe_crc->lock); 2894 pipe_crc->tail = tail; 2895 spin_unlock_irq(&pipe_crc->lock); 2896 2897 return bytes_read; 2898 } 2899 2900 static const struct file_operations i915_pipe_crc_fops = { 2901 .owner = THIS_MODULE, 2902 .open = i915_pipe_crc_open, 2903 .read = i915_pipe_crc_read, 2904 .release = i915_pipe_crc_release, 2905 }; 2906 2907 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { 2908 { 2909 .name = "i915_pipe_A_crc", 2910 .pipe = PIPE_A, 2911 }, 2912 { 2913 .name = "i915_pipe_B_crc", 2914 .pipe = PIPE_B, 2915 }, 2916 { 2917 .name = "i915_pipe_C_crc", 2918 .pipe = PIPE_C, 2919 }, 2920 }; 2921 2922 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, 2923 enum pipe pipe) 2924 { 2925 struct drm_device *dev = minor->dev; 2926 struct dentry *ent; 2927 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; 2928 2929 info->dev = dev; 2930 ent = debugfs_create_file(info->name, S_IRUGO, root, info, 2931 &i915_pipe_crc_fops); 2932 if (!ent) 2933 return -ENOMEM; 2934 2935 return drm_add_fake_info_node(minor, ent, info); 2936 } 2937 2938 static const char * const pipe_crc_sources[] = { 2939 "none", 2940 "plane1", 2941 "plane2", 2942 "pf", 2943 "pipe", 2944 "TV", 2945 "DP-B", 2946 "DP-C", 2947 "DP-D", 2948 "auto", 2949 }; 2950 2951 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) 2952 { 2953 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); 2954 return pipe_crc_sources[source]; 2955 } 2956 2957 static int display_crc_ctl_show(struct seq_file *m, void *data) 2958 { 2959 struct drm_device *dev = m->private; 2960 struct drm_i915_private *dev_priv = dev->dev_private; 2961 int i; 2962 2963 for (i = 0; i < I915_MAX_PIPES; i++) 2964 seq_printf(m, "%c %s\n", pipe_name(i), 2965 pipe_crc_source_name(dev_priv->pipe_crc[i].source)); 2966 2967 return 0; 2968 } 2969 2970 static int display_crc_ctl_open(struct inode *inode, struct file *file) 2971 { 2972 struct drm_device *dev = inode->i_private; 2973 2974 return single_open(file, display_crc_ctl_show, dev); 2975 } 2976 2977 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2978 uint32_t *val) 2979 { 2980 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2981 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2982 2983 switch (*source) { 2984 case INTEL_PIPE_CRC_SOURCE_PIPE: 2985 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; 2986 break; 2987 case INTEL_PIPE_CRC_SOURCE_NONE: 2988 *val = 0; 2989 break; 2990 default: 2991 return -EINVAL; 2992 } 2993 2994 return 0; 2995 } 2996 2997 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe, 2998 enum intel_pipe_crc_source *source) 2999 { 3000 struct intel_encoder *encoder; 3001 struct intel_crtc *crtc; 3002 struct intel_digital_port *dig_port; 3003 int ret = 0; 3004 3005 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3006 3007 drm_modeset_lock_all(dev); 3008 for_each_intel_encoder(dev, encoder) { 3009 if (!encoder->base.crtc) 3010 continue; 3011 3012 crtc = to_intel_crtc(encoder->base.crtc); 3013 3014 if (crtc->pipe != pipe) 3015 continue; 3016 3017 switch (encoder->type) { 3018 case INTEL_OUTPUT_TVOUT: 3019 *source = INTEL_PIPE_CRC_SOURCE_TV; 3020 break; 3021 case INTEL_OUTPUT_DISPLAYPORT: 3022 case INTEL_OUTPUT_EDP: 3023 dig_port = enc_to_dig_port(&encoder->base); 3024 switch (dig_port->port) { 3025 case PORT_B: 3026 *source = INTEL_PIPE_CRC_SOURCE_DP_B; 3027 break; 3028 case PORT_C: 3029 *source = INTEL_PIPE_CRC_SOURCE_DP_C; 3030 break; 3031 case PORT_D: 3032 *source = INTEL_PIPE_CRC_SOURCE_DP_D; 3033 break; 3034 default: 3035 WARN(1, "nonexisting DP port %c\n", 3036 port_name(dig_port->port)); 3037 break; 3038 } 3039 break; 3040 default: 3041 break; 3042 } 3043 } 3044 drm_modeset_unlock_all(dev); 3045 3046 return ret; 3047 } 3048 3049 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, 3050 enum pipe pipe, 3051 enum intel_pipe_crc_source *source, 3052 uint32_t *val) 3053 { 3054 struct drm_i915_private *dev_priv = dev->dev_private; 3055 bool need_stable_symbols = false; 3056 3057 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3058 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 3059 if (ret) 3060 return ret; 3061 } 3062 3063 switch (*source) { 3064 case INTEL_PIPE_CRC_SOURCE_PIPE: 3065 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; 3066 break; 3067 case INTEL_PIPE_CRC_SOURCE_DP_B: 3068 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; 3069 need_stable_symbols = true; 3070 break; 3071 case INTEL_PIPE_CRC_SOURCE_DP_C: 3072 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; 3073 need_stable_symbols = true; 3074 break; 3075 case INTEL_PIPE_CRC_SOURCE_NONE: 3076 *val = 0; 3077 break; 3078 default: 3079 return -EINVAL; 3080 } 3081 3082 /* 3083 * When the pipe CRC tap point is after the transcoders we need 3084 * to tweak symbol-level features to produce a deterministic series of 3085 * symbols for a given frame. We need to reset those features only once 3086 * a frame (instead of every nth symbol): 3087 * - DC-balance: used to ensure a better clock recovery from the data 3088 * link (SDVO) 3089 * - DisplayPort scrambling: used for EMI reduction 3090 */ 3091 if (need_stable_symbols) { 3092 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3093 3094 tmp |= DC_BALANCE_RESET_VLV; 3095 if (pipe == PIPE_A) 3096 tmp |= PIPE_A_SCRAMBLE_RESET; 3097 else 3098 tmp |= PIPE_B_SCRAMBLE_RESET; 3099 3100 I915_WRITE(PORT_DFT2_G4X, tmp); 3101 } 3102 3103 return 0; 3104 } 3105 3106 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, 3107 enum pipe pipe, 3108 enum intel_pipe_crc_source *source, 3109 uint32_t *val) 3110 { 3111 struct drm_i915_private *dev_priv = dev->dev_private; 3112 bool need_stable_symbols = false; 3113 3114 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3115 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 3116 if (ret) 3117 return ret; 3118 } 3119 3120 switch (*source) { 3121 case INTEL_PIPE_CRC_SOURCE_PIPE: 3122 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; 3123 break; 3124 case INTEL_PIPE_CRC_SOURCE_TV: 3125 if (!SUPPORTS_TV(dev)) 3126 return -EINVAL; 3127 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; 3128 break; 3129 case INTEL_PIPE_CRC_SOURCE_DP_B: 3130 if (!IS_G4X(dev)) 3131 return -EINVAL; 3132 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; 3133 need_stable_symbols = true; 3134 break; 3135 case INTEL_PIPE_CRC_SOURCE_DP_C: 3136 if (!IS_G4X(dev)) 3137 return -EINVAL; 3138 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; 3139 need_stable_symbols = true; 3140 break; 3141 case INTEL_PIPE_CRC_SOURCE_DP_D: 3142 if (!IS_G4X(dev)) 3143 return -EINVAL; 3144 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; 3145 need_stable_symbols = true; 3146 break; 3147 case INTEL_PIPE_CRC_SOURCE_NONE: 3148 *val = 0; 3149 break; 3150 default: 3151 return -EINVAL; 3152 } 3153 3154 /* 3155 * When the pipe CRC tap point is after the transcoders we need 3156 * to tweak symbol-level features to produce a deterministic series of 3157 * symbols for a given frame. We need to reset those features only once 3158 * a frame (instead of every nth symbol): 3159 * - DC-balance: used to ensure a better clock recovery from the data 3160 * link (SDVO) 3161 * - DisplayPort scrambling: used for EMI reduction 3162 */ 3163 if (need_stable_symbols) { 3164 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3165 3166 WARN_ON(!IS_G4X(dev)); 3167 3168 I915_WRITE(PORT_DFT_I9XX, 3169 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); 3170 3171 if (pipe == PIPE_A) 3172 tmp |= PIPE_A_SCRAMBLE_RESET; 3173 else 3174 tmp |= PIPE_B_SCRAMBLE_RESET; 3175 3176 I915_WRITE(PORT_DFT2_G4X, tmp); 3177 } 3178 3179 return 0; 3180 } 3181 3182 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, 3183 enum pipe pipe) 3184 { 3185 struct drm_i915_private *dev_priv = dev->dev_private; 3186 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3187 3188 if (pipe == PIPE_A) 3189 tmp &= ~PIPE_A_SCRAMBLE_RESET; 3190 else 3191 tmp &= ~PIPE_B_SCRAMBLE_RESET; 3192 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) 3193 tmp &= ~DC_BALANCE_RESET_VLV; 3194 I915_WRITE(PORT_DFT2_G4X, tmp); 3195 3196 } 3197 3198 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, 3199 enum pipe pipe) 3200 { 3201 struct drm_i915_private *dev_priv = dev->dev_private; 3202 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3203 3204 if (pipe == PIPE_A) 3205 tmp &= ~PIPE_A_SCRAMBLE_RESET; 3206 else 3207 tmp &= ~PIPE_B_SCRAMBLE_RESET; 3208 I915_WRITE(PORT_DFT2_G4X, tmp); 3209 3210 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { 3211 I915_WRITE(PORT_DFT_I9XX, 3212 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); 3213 } 3214 } 3215 3216 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 3217 uint32_t *val) 3218 { 3219 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3220 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3221 3222 switch (*source) { 3223 case INTEL_PIPE_CRC_SOURCE_PLANE1: 3224 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; 3225 break; 3226 case INTEL_PIPE_CRC_SOURCE_PLANE2: 3227 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; 3228 break; 3229 case INTEL_PIPE_CRC_SOURCE_PIPE: 3230 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; 3231 break; 3232 case INTEL_PIPE_CRC_SOURCE_NONE: 3233 *val = 0; 3234 break; 3235 default: 3236 return -EINVAL; 3237 } 3238 3239 return 0; 3240 } 3241 3242 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev) 3243 { 3244 struct drm_i915_private *dev_priv = dev->dev_private; 3245 struct intel_crtc *crtc = 3246 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); 3247 3248 drm_modeset_lock_all(dev); 3249 /* 3250 * If we use the eDP transcoder we need to make sure that we don't 3251 * bypass the pfit, since otherwise the pipe CRC source won't work. Only 3252 * relevant on hsw with pipe A when using the always-on power well 3253 * routing. 3254 */ 3255 if (crtc->config.cpu_transcoder == TRANSCODER_EDP && 3256 !crtc->config.pch_pfit.enabled) { 3257 crtc->config.pch_pfit.force_thru = true; 3258 3259 intel_display_power_get(dev_priv, 3260 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A)); 3261 3262 dev_priv->display.crtc_disable(&crtc->base); 3263 dev_priv->display.crtc_enable(&crtc->base); 3264 } 3265 drm_modeset_unlock_all(dev); 3266 } 3267 3268 static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev) 3269 { 3270 struct drm_i915_private *dev_priv = dev->dev_private; 3271 struct intel_crtc *crtc = 3272 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); 3273 3274 drm_modeset_lock_all(dev); 3275 /* 3276 * If we use the eDP transcoder we need to make sure that we don't 3277 * bypass the pfit, since otherwise the pipe CRC source won't work. Only 3278 * relevant on hsw with pipe A when using the always-on power well 3279 * routing. 3280 */ 3281 if (crtc->config.pch_pfit.force_thru) { 3282 crtc->config.pch_pfit.force_thru = false; 3283 3284 dev_priv->display.crtc_disable(&crtc->base); 3285 dev_priv->display.crtc_enable(&crtc->base); 3286 3287 intel_display_power_put(dev_priv, 3288 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A)); 3289 } 3290 drm_modeset_unlock_all(dev); 3291 } 3292 3293 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev, 3294 enum pipe pipe, 3295 enum intel_pipe_crc_source *source, 3296 uint32_t *val) 3297 { 3298 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3299 *source = INTEL_PIPE_CRC_SOURCE_PF; 3300 3301 switch (*source) { 3302 case INTEL_PIPE_CRC_SOURCE_PLANE1: 3303 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; 3304 break; 3305 case INTEL_PIPE_CRC_SOURCE_PLANE2: 3306 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; 3307 break; 3308 case INTEL_PIPE_CRC_SOURCE_PF: 3309 if (IS_HASWELL(dev) && pipe == PIPE_A) 3310 hsw_trans_edp_pipe_A_crc_wa(dev); 3311 3312 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; 3313 break; 3314 case INTEL_PIPE_CRC_SOURCE_NONE: 3315 *val = 0; 3316 break; 3317 default: 3318 return -EINVAL; 3319 } 3320 3321 return 0; 3322 } 3323 3324 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, 3325 enum intel_pipe_crc_source source) 3326 { 3327 struct drm_i915_private *dev_priv = dev->dev_private; 3328 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 3329 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, 3330 pipe)); 3331 u32 val = 0; /* shut up gcc */ 3332 int ret; 3333 3334 if (pipe_crc->source == source) 3335 return 0; 3336 3337 /* forbid changing the source without going back to 'none' */ 3338 if (pipe_crc->source && source) 3339 return -EINVAL; 3340 3341 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) { 3342 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); 3343 return -EIO; 3344 } 3345 3346 if (IS_GEN2(dev)) 3347 ret = i8xx_pipe_crc_ctl_reg(&source, &val); 3348 else if (INTEL_INFO(dev)->gen < 5) 3349 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); 3350 else if (IS_VALLEYVIEW(dev)) 3351 ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val); 3352 else if (IS_GEN5(dev) || IS_GEN6(dev)) 3353 ret = ilk_pipe_crc_ctl_reg(&source, &val); 3354 else 3355 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val); 3356 3357 if (ret != 0) 3358 return ret; 3359 3360 /* none -> real source transition */ 3361 if (source) { 3362 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", 3363 pipe_name(pipe), pipe_crc_source_name(source)); 3364 3365 pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) * 3366 INTEL_PIPE_CRC_ENTRIES_NR, 3367 GFP_KERNEL); 3368 if (!pipe_crc->entries) 3369 return -ENOMEM; 3370 3371 /* 3372 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 3373 * enabled and disabled dynamically based on package C states, 3374 * user space can't make reliable use of the CRCs, so let's just 3375 * completely disable it. 3376 */ 3377 hsw_disable_ips(crtc); 3378 3379 spin_lock_irq(&pipe_crc->lock); 3380 pipe_crc->head = 0; 3381 pipe_crc->tail = 0; 3382 spin_unlock_irq(&pipe_crc->lock); 3383 } 3384 3385 pipe_crc->source = source; 3386 3387 I915_WRITE(PIPE_CRC_CTL(pipe), val); 3388 POSTING_READ(PIPE_CRC_CTL(pipe)); 3389 3390 /* real source -> none transition */ 3391 if (source == INTEL_PIPE_CRC_SOURCE_NONE) { 3392 struct intel_pipe_crc_entry *entries; 3393 struct intel_crtc *crtc = 3394 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 3395 3396 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", 3397 pipe_name(pipe)); 3398 3399 drm_modeset_lock(&crtc->base.mutex, NULL); 3400 if (crtc->active) 3401 intel_wait_for_vblank(dev, pipe); 3402 drm_modeset_unlock(&crtc->base.mutex); 3403 3404 spin_lock_irq(&pipe_crc->lock); 3405 entries = pipe_crc->entries; 3406 pipe_crc->entries = NULL; 3407 spin_unlock_irq(&pipe_crc->lock); 3408 3409 kfree(entries); 3410 3411 if (IS_G4X(dev)) 3412 g4x_undo_pipe_scramble_reset(dev, pipe); 3413 else if (IS_VALLEYVIEW(dev)) 3414 vlv_undo_pipe_scramble_reset(dev, pipe); 3415 else if (IS_HASWELL(dev) && pipe == PIPE_A) 3416 hsw_undo_trans_edp_pipe_A_crc_wa(dev); 3417 3418 hsw_enable_ips(crtc); 3419 } 3420 3421 return 0; 3422 } 3423 3424 /* 3425 * Parse pipe CRC command strings: 3426 * command: wsp* object wsp+ name wsp+ source wsp* 3427 * object: 'pipe' 3428 * name: (A | B | C) 3429 * source: (none | plane1 | plane2 | pf) 3430 * wsp: (#0x20 | #0x9 | #0xA)+ 3431 * 3432 * eg.: 3433 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A 3434 * "pipe A none" -> Stop CRC 3435 */ 3436 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) 3437 { 3438 int n_words = 0; 3439 3440 while (*buf) { 3441 char *end; 3442 3443 /* skip leading white space */ 3444 buf = skip_spaces(buf); 3445 if (!*buf) 3446 break; /* end of buffer */ 3447 3448 /* find end of word */ 3449 for (end = buf; *end && !isspace(*end); end++) 3450 ; 3451 3452 if (n_words == max_words) { 3453 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", 3454 max_words); 3455 return -EINVAL; /* ran out of words[] before bytes */ 3456 } 3457 3458 if (*end) 3459 *end++ = '\0'; 3460 words[n_words++] = buf; 3461 buf = end; 3462 } 3463 3464 return n_words; 3465 } 3466 3467 enum intel_pipe_crc_object { 3468 PIPE_CRC_OBJECT_PIPE, 3469 }; 3470 3471 static const char * const pipe_crc_objects[] = { 3472 "pipe", 3473 }; 3474 3475 static int 3476 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) 3477 { 3478 int i; 3479 3480 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) 3481 if (!strcmp(buf, pipe_crc_objects[i])) { 3482 *o = i; 3483 return 0; 3484 } 3485 3486 return -EINVAL; 3487 } 3488 3489 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) 3490 { 3491 const char name = buf[0]; 3492 3493 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) 3494 return -EINVAL; 3495 3496 *pipe = name - 'A'; 3497 3498 return 0; 3499 } 3500 3501 static int 3502 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) 3503 { 3504 int i; 3505 3506 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) 3507 if (!strcmp(buf, pipe_crc_sources[i])) { 3508 *s = i; 3509 return 0; 3510 } 3511 3512 return -EINVAL; 3513 } 3514 3515 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len) 3516 { 3517 #define N_WORDS 3 3518 int n_words; 3519 char *words[N_WORDS]; 3520 enum pipe pipe; 3521 enum intel_pipe_crc_object object; 3522 enum intel_pipe_crc_source source; 3523 3524 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); 3525 if (n_words != N_WORDS) { 3526 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", 3527 N_WORDS); 3528 return -EINVAL; 3529 } 3530 3531 if (display_crc_ctl_parse_object(words[0], &object) < 0) { 3532 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); 3533 return -EINVAL; 3534 } 3535 3536 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { 3537 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); 3538 return -EINVAL; 3539 } 3540 3541 if (display_crc_ctl_parse_source(words[2], &source) < 0) { 3542 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); 3543 return -EINVAL; 3544 } 3545 3546 return pipe_crc_set_source(dev, pipe, source); 3547 } 3548 3549 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, 3550 size_t len, loff_t *offp) 3551 { 3552 struct seq_file *m = file->private_data; 3553 struct drm_device *dev = m->private; 3554 char *tmpbuf; 3555 int ret; 3556 3557 if (len == 0) 3558 return 0; 3559 3560 if (len > PAGE_SIZE - 1) { 3561 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", 3562 PAGE_SIZE); 3563 return -E2BIG; 3564 } 3565 3566 tmpbuf = kmalloc(len + 1, GFP_KERNEL); 3567 if (!tmpbuf) 3568 return -ENOMEM; 3569 3570 if (copy_from_user(tmpbuf, ubuf, len)) { 3571 ret = -EFAULT; 3572 goto out; 3573 } 3574 tmpbuf[len] = '\0'; 3575 3576 ret = display_crc_ctl_parse(dev, tmpbuf, len); 3577 3578 out: 3579 kfree(tmpbuf); 3580 if (ret < 0) 3581 return ret; 3582 3583 *offp += len; 3584 return len; 3585 } 3586 3587 static const struct file_operations i915_display_crc_ctl_fops = { 3588 .owner = THIS_MODULE, 3589 .open = display_crc_ctl_open, 3590 .read = seq_read, 3591 .llseek = seq_lseek, 3592 .release = single_release, 3593 .write = display_crc_ctl_write 3594 }; 3595 3596 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) 3597 { 3598 struct drm_device *dev = m->private; 3599 int num_levels = ilk_wm_max_level(dev) + 1; 3600 int level; 3601 3602 drm_modeset_lock_all(dev); 3603 3604 for (level = 0; level < num_levels; level++) { 3605 unsigned int latency = wm[level]; 3606 3607 /* 3608 * - WM1+ latency values in 0.5us units 3609 * - latencies are in us on gen9 3610 */ 3611 if (INTEL_INFO(dev)->gen >= 9) 3612 latency *= 10; 3613 else if (level > 0) 3614 latency *= 5; 3615 3616 seq_printf(m, "WM%d %u (%u.%u usec)\n", 3617 level, wm[level], latency / 10, latency % 10); 3618 } 3619 3620 drm_modeset_unlock_all(dev); 3621 } 3622 3623 static int pri_wm_latency_show(struct seq_file *m, void *data) 3624 { 3625 struct drm_device *dev = m->private; 3626 struct drm_i915_private *dev_priv = dev->dev_private; 3627 const uint16_t *latencies; 3628 3629 if (INTEL_INFO(dev)->gen >= 9) 3630 latencies = dev_priv->wm.skl_latency; 3631 else 3632 latencies = to_i915(dev)->wm.pri_latency; 3633 3634 wm_latency_show(m, latencies); 3635 3636 return 0; 3637 } 3638 3639 static int spr_wm_latency_show(struct seq_file *m, void *data) 3640 { 3641 struct drm_device *dev = m->private; 3642 struct drm_i915_private *dev_priv = dev->dev_private; 3643 const uint16_t *latencies; 3644 3645 if (INTEL_INFO(dev)->gen >= 9) 3646 latencies = dev_priv->wm.skl_latency; 3647 else 3648 latencies = to_i915(dev)->wm.spr_latency; 3649 3650 wm_latency_show(m, latencies); 3651 3652 return 0; 3653 } 3654 3655 static int cur_wm_latency_show(struct seq_file *m, void *data) 3656 { 3657 struct drm_device *dev = m->private; 3658 struct drm_i915_private *dev_priv = dev->dev_private; 3659 const uint16_t *latencies; 3660 3661 if (INTEL_INFO(dev)->gen >= 9) 3662 latencies = dev_priv->wm.skl_latency; 3663 else 3664 latencies = to_i915(dev)->wm.cur_latency; 3665 3666 wm_latency_show(m, latencies); 3667 3668 return 0; 3669 } 3670 3671 static int pri_wm_latency_open(struct inode *inode, struct file *file) 3672 { 3673 struct drm_device *dev = inode->i_private; 3674 3675 if (HAS_GMCH_DISPLAY(dev)) 3676 return -ENODEV; 3677 3678 return single_open(file, pri_wm_latency_show, dev); 3679 } 3680 3681 static int spr_wm_latency_open(struct inode *inode, struct file *file) 3682 { 3683 struct drm_device *dev = inode->i_private; 3684 3685 if (HAS_GMCH_DISPLAY(dev)) 3686 return -ENODEV; 3687 3688 return single_open(file, spr_wm_latency_show, dev); 3689 } 3690 3691 static int cur_wm_latency_open(struct inode *inode, struct file *file) 3692 { 3693 struct drm_device *dev = inode->i_private; 3694 3695 if (HAS_GMCH_DISPLAY(dev)) 3696 return -ENODEV; 3697 3698 return single_open(file, cur_wm_latency_show, dev); 3699 } 3700 3701 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 3702 size_t len, loff_t *offp, uint16_t wm[8]) 3703 { 3704 struct seq_file *m = file->private_data; 3705 struct drm_device *dev = m->private; 3706 uint16_t new[8] = { 0 }; 3707 int num_levels = ilk_wm_max_level(dev) + 1; 3708 int level; 3709 int ret; 3710 char tmp[32]; 3711 3712 if (len >= sizeof(tmp)) 3713 return -EINVAL; 3714 3715 if (copy_from_user(tmp, ubuf, len)) 3716 return -EFAULT; 3717 3718 tmp[len] = '\0'; 3719 3720 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 3721 &new[0], &new[1], &new[2], &new[3], 3722 &new[4], &new[5], &new[6], &new[7]); 3723 if (ret != num_levels) 3724 return -EINVAL; 3725 3726 drm_modeset_lock_all(dev); 3727 3728 for (level = 0; level < num_levels; level++) 3729 wm[level] = new[level]; 3730 3731 drm_modeset_unlock_all(dev); 3732 3733 return len; 3734 } 3735 3736 3737 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 3738 size_t len, loff_t *offp) 3739 { 3740 struct seq_file *m = file->private_data; 3741 struct drm_device *dev = m->private; 3742 struct drm_i915_private *dev_priv = dev->dev_private; 3743 uint16_t *latencies; 3744 3745 if (INTEL_INFO(dev)->gen >= 9) 3746 latencies = dev_priv->wm.skl_latency; 3747 else 3748 latencies = to_i915(dev)->wm.pri_latency; 3749 3750 return wm_latency_write(file, ubuf, len, offp, latencies); 3751 } 3752 3753 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 3754 size_t len, loff_t *offp) 3755 { 3756 struct seq_file *m = file->private_data; 3757 struct drm_device *dev = m->private; 3758 struct drm_i915_private *dev_priv = dev->dev_private; 3759 uint16_t *latencies; 3760 3761 if (INTEL_INFO(dev)->gen >= 9) 3762 latencies = dev_priv->wm.skl_latency; 3763 else 3764 latencies = to_i915(dev)->wm.spr_latency; 3765 3766 return wm_latency_write(file, ubuf, len, offp, latencies); 3767 } 3768 3769 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 3770 size_t len, loff_t *offp) 3771 { 3772 struct seq_file *m = file->private_data; 3773 struct drm_device *dev = m->private; 3774 struct drm_i915_private *dev_priv = dev->dev_private; 3775 uint16_t *latencies; 3776 3777 if (INTEL_INFO(dev)->gen >= 9) 3778 latencies = dev_priv->wm.skl_latency; 3779 else 3780 latencies = to_i915(dev)->wm.cur_latency; 3781 3782 return wm_latency_write(file, ubuf, len, offp, latencies); 3783 } 3784 3785 static const struct file_operations i915_pri_wm_latency_fops = { 3786 .owner = THIS_MODULE, 3787 .open = pri_wm_latency_open, 3788 .read = seq_read, 3789 .llseek = seq_lseek, 3790 .release = single_release, 3791 .write = pri_wm_latency_write 3792 }; 3793 3794 static const struct file_operations i915_spr_wm_latency_fops = { 3795 .owner = THIS_MODULE, 3796 .open = spr_wm_latency_open, 3797 .read = seq_read, 3798 .llseek = seq_lseek, 3799 .release = single_release, 3800 .write = spr_wm_latency_write 3801 }; 3802 3803 static const struct file_operations i915_cur_wm_latency_fops = { 3804 .owner = THIS_MODULE, 3805 .open = cur_wm_latency_open, 3806 .read = seq_read, 3807 .llseek = seq_lseek, 3808 .release = single_release, 3809 .write = cur_wm_latency_write 3810 }; 3811 3812 static int 3813 i915_wedged_get(void *data, u64 *val) 3814 { 3815 struct drm_device *dev = data; 3816 struct drm_i915_private *dev_priv = dev->dev_private; 3817 3818 *val = atomic_read(&dev_priv->gpu_error.reset_counter); 3819 3820 return 0; 3821 } 3822 3823 static int 3824 i915_wedged_set(void *data, u64 val) 3825 { 3826 struct drm_device *dev = data; 3827 struct drm_i915_private *dev_priv = dev->dev_private; 3828 3829 intel_runtime_pm_get(dev_priv); 3830 3831 i915_handle_error(dev, val, 3832 "Manually setting wedged to %llu", val); 3833 3834 intel_runtime_pm_put(dev_priv); 3835 3836 return 0; 3837 } 3838 3839 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 3840 i915_wedged_get, i915_wedged_set, 3841 "%llu\n"); 3842 3843 static int 3844 i915_ring_stop_get(void *data, u64 *val) 3845 { 3846 struct drm_device *dev = data; 3847 struct drm_i915_private *dev_priv = dev->dev_private; 3848 3849 *val = dev_priv->gpu_error.stop_rings; 3850 3851 return 0; 3852 } 3853 3854 static int 3855 i915_ring_stop_set(void *data, u64 val) 3856 { 3857 struct drm_device *dev = data; 3858 struct drm_i915_private *dev_priv = dev->dev_private; 3859 int ret; 3860 3861 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); 3862 3863 ret = mutex_lock_interruptible(&dev->struct_mutex); 3864 if (ret) 3865 return ret; 3866 3867 dev_priv->gpu_error.stop_rings = val; 3868 mutex_unlock(&dev->struct_mutex); 3869 3870 return 0; 3871 } 3872 3873 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, 3874 i915_ring_stop_get, i915_ring_stop_set, 3875 "0x%08llx\n"); 3876 3877 static int 3878 i915_ring_missed_irq_get(void *data, u64 *val) 3879 { 3880 struct drm_device *dev = data; 3881 struct drm_i915_private *dev_priv = dev->dev_private; 3882 3883 *val = dev_priv->gpu_error.missed_irq_rings; 3884 return 0; 3885 } 3886 3887 static int 3888 i915_ring_missed_irq_set(void *data, u64 val) 3889 { 3890 struct drm_device *dev = data; 3891 struct drm_i915_private *dev_priv = dev->dev_private; 3892 int ret; 3893 3894 /* Lock against concurrent debugfs callers */ 3895 ret = mutex_lock_interruptible(&dev->struct_mutex); 3896 if (ret) 3897 return ret; 3898 dev_priv->gpu_error.missed_irq_rings = val; 3899 mutex_unlock(&dev->struct_mutex); 3900 3901 return 0; 3902 } 3903 3904 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 3905 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 3906 "0x%08llx\n"); 3907 3908 static int 3909 i915_ring_test_irq_get(void *data, u64 *val) 3910 { 3911 struct drm_device *dev = data; 3912 struct drm_i915_private *dev_priv = dev->dev_private; 3913 3914 *val = dev_priv->gpu_error.test_irq_rings; 3915 3916 return 0; 3917 } 3918 3919 static int 3920 i915_ring_test_irq_set(void *data, u64 val) 3921 { 3922 struct drm_device *dev = data; 3923 struct drm_i915_private *dev_priv = dev->dev_private; 3924 int ret; 3925 3926 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 3927 3928 /* Lock against concurrent debugfs callers */ 3929 ret = mutex_lock_interruptible(&dev->struct_mutex); 3930 if (ret) 3931 return ret; 3932 3933 dev_priv->gpu_error.test_irq_rings = val; 3934 mutex_unlock(&dev->struct_mutex); 3935 3936 return 0; 3937 } 3938 3939 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 3940 i915_ring_test_irq_get, i915_ring_test_irq_set, 3941 "0x%08llx\n"); 3942 3943 #define DROP_UNBOUND 0x1 3944 #define DROP_BOUND 0x2 3945 #define DROP_RETIRE 0x4 3946 #define DROP_ACTIVE 0x8 3947 #define DROP_ALL (DROP_UNBOUND | \ 3948 DROP_BOUND | \ 3949 DROP_RETIRE | \ 3950 DROP_ACTIVE) 3951 static int 3952 i915_drop_caches_get(void *data, u64 *val) 3953 { 3954 *val = DROP_ALL; 3955 3956 return 0; 3957 } 3958 3959 static int 3960 i915_drop_caches_set(void *data, u64 val) 3961 { 3962 struct drm_device *dev = data; 3963 struct drm_i915_private *dev_priv = dev->dev_private; 3964 int ret; 3965 3966 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 3967 3968 /* No need to check and wait for gpu resets, only libdrm auto-restarts 3969 * on ioctls on -EAGAIN. */ 3970 ret = mutex_lock_interruptible(&dev->struct_mutex); 3971 if (ret) 3972 return ret; 3973 3974 if (val & DROP_ACTIVE) { 3975 ret = i915_gpu_idle(dev); 3976 if (ret) 3977 goto unlock; 3978 } 3979 3980 if (val & (DROP_RETIRE | DROP_ACTIVE)) 3981 i915_gem_retire_requests(dev); 3982 3983 if (val & DROP_BOUND) 3984 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); 3985 3986 if (val & DROP_UNBOUND) 3987 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND); 3988 3989 unlock: 3990 mutex_unlock(&dev->struct_mutex); 3991 3992 return ret; 3993 } 3994 3995 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 3996 i915_drop_caches_get, i915_drop_caches_set, 3997 "0x%08llx\n"); 3998 3999 static int 4000 i915_max_freq_get(void *data, u64 *val) 4001 { 4002 struct drm_device *dev = data; 4003 struct drm_i915_private *dev_priv = dev->dev_private; 4004 int ret; 4005 4006 if (INTEL_INFO(dev)->gen < 6) 4007 return -ENODEV; 4008 4009 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4010 4011 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4012 if (ret) 4013 return ret; 4014 4015 if (IS_VALLEYVIEW(dev)) 4016 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); 4017 else 4018 *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER; 4019 mutex_unlock(&dev_priv->rps.hw_lock); 4020 4021 return 0; 4022 } 4023 4024 static int 4025 i915_max_freq_set(void *data, u64 val) 4026 { 4027 struct drm_device *dev = data; 4028 struct drm_i915_private *dev_priv = dev->dev_private; 4029 u32 rp_state_cap, hw_max, hw_min; 4030 int ret; 4031 4032 if (INTEL_INFO(dev)->gen < 6) 4033 return -ENODEV; 4034 4035 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4036 4037 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 4038 4039 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4040 if (ret) 4041 return ret; 4042 4043 /* 4044 * Turbo will still be enabled, but won't go above the set value. 4045 */ 4046 if (IS_VALLEYVIEW(dev)) { 4047 val = vlv_freq_opcode(dev_priv, val); 4048 4049 hw_max = dev_priv->rps.max_freq; 4050 hw_min = dev_priv->rps.min_freq; 4051 } else { 4052 do_div(val, GT_FREQUENCY_MULTIPLIER); 4053 4054 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 4055 hw_max = dev_priv->rps.max_freq; 4056 hw_min = (rp_state_cap >> 16) & 0xff; 4057 } 4058 4059 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { 4060 mutex_unlock(&dev_priv->rps.hw_lock); 4061 return -EINVAL; 4062 } 4063 4064 dev_priv->rps.max_freq_softlimit = val; 4065 4066 if (IS_VALLEYVIEW(dev)) 4067 valleyview_set_rps(dev, val); 4068 else 4069 gen6_set_rps(dev, val); 4070 4071 mutex_unlock(&dev_priv->rps.hw_lock); 4072 4073 return 0; 4074 } 4075 4076 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 4077 i915_max_freq_get, i915_max_freq_set, 4078 "%llu\n"); 4079 4080 static int 4081 i915_min_freq_get(void *data, u64 *val) 4082 { 4083 struct drm_device *dev = data; 4084 struct drm_i915_private *dev_priv = dev->dev_private; 4085 int ret; 4086 4087 if (INTEL_INFO(dev)->gen < 6) 4088 return -ENODEV; 4089 4090 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4091 4092 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4093 if (ret) 4094 return ret; 4095 4096 if (IS_VALLEYVIEW(dev)) 4097 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); 4098 else 4099 *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER; 4100 mutex_unlock(&dev_priv->rps.hw_lock); 4101 4102 return 0; 4103 } 4104 4105 static int 4106 i915_min_freq_set(void *data, u64 val) 4107 { 4108 struct drm_device *dev = data; 4109 struct drm_i915_private *dev_priv = dev->dev_private; 4110 u32 rp_state_cap, hw_max, hw_min; 4111 int ret; 4112 4113 if (INTEL_INFO(dev)->gen < 6) 4114 return -ENODEV; 4115 4116 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4117 4118 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 4119 4120 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4121 if (ret) 4122 return ret; 4123 4124 /* 4125 * Turbo will still be enabled, but won't go below the set value. 4126 */ 4127 if (IS_VALLEYVIEW(dev)) { 4128 val = vlv_freq_opcode(dev_priv, val); 4129 4130 hw_max = dev_priv->rps.max_freq; 4131 hw_min = dev_priv->rps.min_freq; 4132 } else { 4133 do_div(val, GT_FREQUENCY_MULTIPLIER); 4134 4135 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 4136 hw_max = dev_priv->rps.max_freq; 4137 hw_min = (rp_state_cap >> 16) & 0xff; 4138 } 4139 4140 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { 4141 mutex_unlock(&dev_priv->rps.hw_lock); 4142 return -EINVAL; 4143 } 4144 4145 dev_priv->rps.min_freq_softlimit = val; 4146 4147 if (IS_VALLEYVIEW(dev)) 4148 valleyview_set_rps(dev, val); 4149 else 4150 gen6_set_rps(dev, val); 4151 4152 mutex_unlock(&dev_priv->rps.hw_lock); 4153 4154 return 0; 4155 } 4156 4157 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 4158 i915_min_freq_get, i915_min_freq_set, 4159 "%llu\n"); 4160 4161 static int 4162 i915_cache_sharing_get(void *data, u64 *val) 4163 { 4164 struct drm_device *dev = data; 4165 struct drm_i915_private *dev_priv = dev->dev_private; 4166 u32 snpcr; 4167 int ret; 4168 4169 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 4170 return -ENODEV; 4171 4172 ret = mutex_lock_interruptible(&dev->struct_mutex); 4173 if (ret) 4174 return ret; 4175 intel_runtime_pm_get(dev_priv); 4176 4177 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4178 4179 intel_runtime_pm_put(dev_priv); 4180 mutex_unlock(&dev_priv->dev->struct_mutex); 4181 4182 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 4183 4184 return 0; 4185 } 4186 4187 static int 4188 i915_cache_sharing_set(void *data, u64 val) 4189 { 4190 struct drm_device *dev = data; 4191 struct drm_i915_private *dev_priv = dev->dev_private; 4192 u32 snpcr; 4193 4194 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 4195 return -ENODEV; 4196 4197 if (val > 3) 4198 return -EINVAL; 4199 4200 intel_runtime_pm_get(dev_priv); 4201 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 4202 4203 /* Update the cache sharing policy here as well */ 4204 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4205 snpcr &= ~GEN6_MBC_SNPCR_MASK; 4206 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 4207 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 4208 4209 intel_runtime_pm_put(dev_priv); 4210 return 0; 4211 } 4212 4213 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 4214 i915_cache_sharing_get, i915_cache_sharing_set, 4215 "%llu\n"); 4216 4217 static int i915_forcewake_open(struct inode *inode, struct file *file) 4218 { 4219 struct drm_device *dev = inode->i_private; 4220 struct drm_i915_private *dev_priv = dev->dev_private; 4221 4222 if (INTEL_INFO(dev)->gen < 6) 4223 return 0; 4224 4225 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 4226 4227 return 0; 4228 } 4229 4230 static int i915_forcewake_release(struct inode *inode, struct file *file) 4231 { 4232 struct drm_device *dev = inode->i_private; 4233 struct drm_i915_private *dev_priv = dev->dev_private; 4234 4235 if (INTEL_INFO(dev)->gen < 6) 4236 return 0; 4237 4238 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 4239 4240 return 0; 4241 } 4242 4243 static const struct file_operations i915_forcewake_fops = { 4244 .owner = THIS_MODULE, 4245 .open = i915_forcewake_open, 4246 .release = i915_forcewake_release, 4247 }; 4248 4249 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 4250 { 4251 struct drm_device *dev = minor->dev; 4252 struct dentry *ent; 4253 4254 ent = debugfs_create_file("i915_forcewake_user", 4255 S_IRUSR, 4256 root, dev, 4257 &i915_forcewake_fops); 4258 if (!ent) 4259 return -ENOMEM; 4260 4261 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 4262 } 4263 4264 static int i915_debugfs_create(struct dentry *root, 4265 struct drm_minor *minor, 4266 const char *name, 4267 const struct file_operations *fops) 4268 { 4269 struct drm_device *dev = minor->dev; 4270 struct dentry *ent; 4271 4272 ent = debugfs_create_file(name, 4273 S_IRUGO | S_IWUSR, 4274 root, dev, 4275 fops); 4276 if (!ent) 4277 return -ENOMEM; 4278 4279 return drm_add_fake_info_node(minor, ent, fops); 4280 } 4281 4282 static const struct drm_info_list i915_debugfs_list[] = { 4283 {"i915_capabilities", i915_capabilities, 0}, 4284 {"i915_gem_objects", i915_gem_object_info, 0}, 4285 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 4286 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 4287 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 4288 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 4289 {"i915_gem_stolen", i915_gem_stolen_list_info }, 4290 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 4291 {"i915_gem_request", i915_gem_request_info, 0}, 4292 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 4293 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 4294 {"i915_gem_interrupt", i915_interrupt_info, 0}, 4295 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 4296 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 4297 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 4298 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 4299 {"i915_frequency_info", i915_frequency_info, 0}, 4300 {"i915_drpc_info", i915_drpc_info, 0}, 4301 {"i915_emon_status", i915_emon_status, 0}, 4302 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 4303 {"i915_fbc_status", i915_fbc_status, 0}, 4304 {"i915_ips_status", i915_ips_status, 0}, 4305 {"i915_sr_status", i915_sr_status, 0}, 4306 {"i915_opregion", i915_opregion, 0}, 4307 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 4308 {"i915_context_status", i915_context_status, 0}, 4309 {"i915_dump_lrc", i915_dump_lrc, 0}, 4310 {"i915_execlists", i915_execlists, 0}, 4311 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 4312 {"i915_swizzle_info", i915_swizzle_info, 0}, 4313 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 4314 {"i915_llc", i915_llc, 0}, 4315 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 4316 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 4317 {"i915_energy_uJ", i915_energy_uJ, 0}, 4318 {"i915_pc8_status", i915_pc8_status, 0}, 4319 {"i915_power_domain_info", i915_power_domain_info, 0}, 4320 {"i915_display_info", i915_display_info, 0}, 4321 {"i915_semaphore_status", i915_semaphore_status, 0}, 4322 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 4323 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 4324 {"i915_wa_registers", i915_wa_registers, 0}, 4325 {"i915_ddb_info", i915_ddb_info, 0}, 4326 }; 4327 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 4328 4329 static const struct i915_debugfs_files { 4330 const char *name; 4331 const struct file_operations *fops; 4332 } i915_debugfs_files[] = { 4333 {"i915_wedged", &i915_wedged_fops}, 4334 {"i915_max_freq", &i915_max_freq_fops}, 4335 {"i915_min_freq", &i915_min_freq_fops}, 4336 {"i915_cache_sharing", &i915_cache_sharing_fops}, 4337 {"i915_ring_stop", &i915_ring_stop_fops}, 4338 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 4339 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 4340 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 4341 {"i915_error_state", &i915_error_state_fops}, 4342 {"i915_next_seqno", &i915_next_seqno_fops}, 4343 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 4344 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 4345 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 4346 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 4347 {"i915_fbc_false_color", &i915_fbc_fc_fops}, 4348 }; 4349 4350 void intel_display_crc_init(struct drm_device *dev) 4351 { 4352 struct drm_i915_private *dev_priv = dev->dev_private; 4353 enum pipe pipe; 4354 4355 for_each_pipe(dev_priv, pipe) { 4356 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 4357 4358 pipe_crc->opened = false; 4359 spin_lock_init(&pipe_crc->lock); 4360 init_waitqueue_head(&pipe_crc->wq); 4361 } 4362 } 4363 4364 int i915_debugfs_init(struct drm_minor *minor) 4365 { 4366 int ret, i; 4367 4368 ret = i915_forcewake_create(minor->debugfs_root, minor); 4369 if (ret) 4370 return ret; 4371 4372 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 4373 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); 4374 if (ret) 4375 return ret; 4376 } 4377 4378 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 4379 ret = i915_debugfs_create(minor->debugfs_root, minor, 4380 i915_debugfs_files[i].name, 4381 i915_debugfs_files[i].fops); 4382 if (ret) 4383 return ret; 4384 } 4385 4386 return drm_debugfs_create_files(i915_debugfs_list, 4387 I915_DEBUGFS_ENTRIES, 4388 minor->debugfs_root, minor); 4389 } 4390 4391 void i915_debugfs_cleanup(struct drm_minor *minor) 4392 { 4393 int i; 4394 4395 drm_debugfs_remove_files(i915_debugfs_list, 4396 I915_DEBUGFS_ENTRIES, minor); 4397 4398 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 4399 1, minor); 4400 4401 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 4402 struct drm_info_list *info_list = 4403 (struct drm_info_list *)&i915_pipe_crc_data[i]; 4404 4405 drm_debugfs_remove_files(info_list, 1, minor); 4406 } 4407 4408 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 4409 struct drm_info_list *info_list = 4410 (struct drm_info_list *) i915_debugfs_files[i].fops; 4411 4412 drm_debugfs_remove_files(info_list, 1, minor); 4413 } 4414 } 4415