1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/circ_buf.h> 31 #include <linux/ctype.h> 32 #include <linux/debugfs.h> 33 #include <linux/slab.h> 34 #include <linux/export.h> 35 #include <linux/list_sort.h> 36 #include <asm/msr-index.h> 37 #include <drm/drmP.h> 38 #include "intel_drv.h" 39 #include "intel_ringbuffer.h" 40 #include <drm/i915_drm.h> 41 #include "i915_drv.h" 42 43 enum { 44 ACTIVE_LIST, 45 INACTIVE_LIST, 46 PINNED_LIST, 47 }; 48 49 static const char *yesno(int v) 50 { 51 return v ? "yes" : "no"; 52 } 53 54 /* As the drm_debugfs_init() routines are called before dev->dev_private is 55 * allocated we need to hook into the minor for release. */ 56 static int 57 drm_add_fake_info_node(struct drm_minor *minor, 58 struct dentry *ent, 59 const void *key) 60 { 61 struct drm_info_node *node; 62 63 node = kmalloc(sizeof(*node), GFP_KERNEL); 64 if (node == NULL) { 65 debugfs_remove(ent); 66 return -ENOMEM; 67 } 68 69 node->minor = minor; 70 node->dent = ent; 71 node->info_ent = (void *) key; 72 73 mutex_lock(&minor->debugfs_lock); 74 list_add(&node->list, &minor->debugfs_list); 75 mutex_unlock(&minor->debugfs_lock); 76 77 return 0; 78 } 79 80 static int i915_capabilities(struct seq_file *m, void *data) 81 { 82 struct drm_info_node *node = m->private; 83 struct drm_device *dev = node->minor->dev; 84 const struct intel_device_info *info = INTEL_INFO(dev); 85 86 seq_printf(m, "gen: %d\n", info->gen); 87 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 88 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 89 #define SEP_SEMICOLON ; 90 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON); 91 #undef PRINT_FLAG 92 #undef SEP_SEMICOLON 93 94 return 0; 95 } 96 97 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 98 { 99 if (obj->user_pin_count > 0) 100 return "P"; 101 else if (i915_gem_obj_is_pinned(obj)) 102 return "p"; 103 else 104 return " "; 105 } 106 107 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 108 { 109 switch (obj->tiling_mode) { 110 default: 111 case I915_TILING_NONE: return " "; 112 case I915_TILING_X: return "X"; 113 case I915_TILING_Y: return "Y"; 114 } 115 } 116 117 static inline const char *get_global_flag(struct drm_i915_gem_object *obj) 118 { 119 return obj->has_global_gtt_mapping ? "g" : " "; 120 } 121 122 static void 123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 124 { 125 struct i915_vma *vma; 126 int pin_count = 0; 127 128 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s", 129 &obj->base, 130 get_pin_flag(obj), 131 get_tiling_flag(obj), 132 get_global_flag(obj), 133 obj->base.size / 1024, 134 obj->base.read_domains, 135 obj->base.write_domain, 136 obj->last_read_seqno, 137 obj->last_write_seqno, 138 obj->last_fenced_seqno, 139 i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level), 140 obj->dirty ? " dirty" : "", 141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 142 if (obj->base.name) 143 seq_printf(m, " (name: %d)", obj->base.name); 144 list_for_each_entry(vma, &obj->vma_list, vma_link) 145 if (vma->pin_count > 0) 146 pin_count++; 147 seq_printf(m, " (pinned x %d)", pin_count); 148 if (obj->pin_display) 149 seq_printf(m, " (display)"); 150 if (obj->fence_reg != I915_FENCE_REG_NONE) 151 seq_printf(m, " (fence: %d)", obj->fence_reg); 152 list_for_each_entry(vma, &obj->vma_list, vma_link) { 153 if (!i915_is_ggtt(vma->vm)) 154 seq_puts(m, " (pp"); 155 else 156 seq_puts(m, " (g"); 157 seq_printf(m, "gtt offset: %08lx, size: %08lx)", 158 vma->node.start, vma->node.size); 159 } 160 if (obj->stolen) 161 seq_printf(m, " (stolen: %08lx)", obj->stolen->start); 162 if (obj->pin_mappable || obj->fault_mappable) { 163 char s[3], *t = s; 164 if (obj->pin_mappable) 165 *t++ = 'p'; 166 if (obj->fault_mappable) 167 *t++ = 'f'; 168 *t = '\0'; 169 seq_printf(m, " (%s mappable)", s); 170 } 171 if (obj->ring != NULL) 172 seq_printf(m, " (%s)", obj->ring->name); 173 if (obj->frontbuffer_bits) 174 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); 175 } 176 177 static void describe_ctx(struct seq_file *m, struct intel_context *ctx) 178 { 179 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i'); 180 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 181 seq_putc(m, ' '); 182 } 183 184 static int i915_gem_object_list_info(struct seq_file *m, void *data) 185 { 186 struct drm_info_node *node = m->private; 187 uintptr_t list = (uintptr_t) node->info_ent->data; 188 struct list_head *head; 189 struct drm_device *dev = node->minor->dev; 190 struct drm_i915_private *dev_priv = dev->dev_private; 191 struct i915_address_space *vm = &dev_priv->gtt.base; 192 struct i915_vma *vma; 193 size_t total_obj_size, total_gtt_size; 194 int count, ret; 195 196 ret = mutex_lock_interruptible(&dev->struct_mutex); 197 if (ret) 198 return ret; 199 200 /* FIXME: the user of this interface might want more than just GGTT */ 201 switch (list) { 202 case ACTIVE_LIST: 203 seq_puts(m, "Active:\n"); 204 head = &vm->active_list; 205 break; 206 case INACTIVE_LIST: 207 seq_puts(m, "Inactive:\n"); 208 head = &vm->inactive_list; 209 break; 210 default: 211 mutex_unlock(&dev->struct_mutex); 212 return -EINVAL; 213 } 214 215 total_obj_size = total_gtt_size = count = 0; 216 list_for_each_entry(vma, head, mm_list) { 217 seq_printf(m, " "); 218 describe_obj(m, vma->obj); 219 seq_printf(m, "\n"); 220 total_obj_size += vma->obj->base.size; 221 total_gtt_size += vma->node.size; 222 count++; 223 } 224 mutex_unlock(&dev->struct_mutex); 225 226 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 227 count, total_obj_size, total_gtt_size); 228 return 0; 229 } 230 231 static int obj_rank_by_stolen(void *priv, 232 struct list_head *A, struct list_head *B) 233 { 234 struct drm_i915_gem_object *a = 235 container_of(A, struct drm_i915_gem_object, obj_exec_link); 236 struct drm_i915_gem_object *b = 237 container_of(B, struct drm_i915_gem_object, obj_exec_link); 238 239 return a->stolen->start - b->stolen->start; 240 } 241 242 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 243 { 244 struct drm_info_node *node = m->private; 245 struct drm_device *dev = node->minor->dev; 246 struct drm_i915_private *dev_priv = dev->dev_private; 247 struct drm_i915_gem_object *obj; 248 size_t total_obj_size, total_gtt_size; 249 LIST_HEAD(stolen); 250 int count, ret; 251 252 ret = mutex_lock_interruptible(&dev->struct_mutex); 253 if (ret) 254 return ret; 255 256 total_obj_size = total_gtt_size = count = 0; 257 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 258 if (obj->stolen == NULL) 259 continue; 260 261 list_add(&obj->obj_exec_link, &stolen); 262 263 total_obj_size += obj->base.size; 264 total_gtt_size += i915_gem_obj_ggtt_size(obj); 265 count++; 266 } 267 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 268 if (obj->stolen == NULL) 269 continue; 270 271 list_add(&obj->obj_exec_link, &stolen); 272 273 total_obj_size += obj->base.size; 274 count++; 275 } 276 list_sort(NULL, &stolen, obj_rank_by_stolen); 277 seq_puts(m, "Stolen:\n"); 278 while (!list_empty(&stolen)) { 279 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link); 280 seq_puts(m, " "); 281 describe_obj(m, obj); 282 seq_putc(m, '\n'); 283 list_del_init(&obj->obj_exec_link); 284 } 285 mutex_unlock(&dev->struct_mutex); 286 287 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 288 count, total_obj_size, total_gtt_size); 289 return 0; 290 } 291 292 #define count_objects(list, member) do { \ 293 list_for_each_entry(obj, list, member) { \ 294 size += i915_gem_obj_ggtt_size(obj); \ 295 ++count; \ 296 if (obj->map_and_fenceable) { \ 297 mappable_size += i915_gem_obj_ggtt_size(obj); \ 298 ++mappable_count; \ 299 } \ 300 } \ 301 } while (0) 302 303 struct file_stats { 304 struct drm_i915_file_private *file_priv; 305 int count; 306 size_t total, unbound; 307 size_t global, shared; 308 size_t active, inactive; 309 }; 310 311 static int per_file_stats(int id, void *ptr, void *data) 312 { 313 struct drm_i915_gem_object *obj = ptr; 314 struct file_stats *stats = data; 315 struct i915_vma *vma; 316 317 stats->count++; 318 stats->total += obj->base.size; 319 320 if (obj->base.name || obj->base.dma_buf) 321 stats->shared += obj->base.size; 322 323 if (USES_FULL_PPGTT(obj->base.dev)) { 324 list_for_each_entry(vma, &obj->vma_list, vma_link) { 325 struct i915_hw_ppgtt *ppgtt; 326 327 if (!drm_mm_node_allocated(&vma->node)) 328 continue; 329 330 if (i915_is_ggtt(vma->vm)) { 331 stats->global += obj->base.size; 332 continue; 333 } 334 335 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); 336 if (ppgtt->file_priv != stats->file_priv) 337 continue; 338 339 if (obj->ring) /* XXX per-vma statistic */ 340 stats->active += obj->base.size; 341 else 342 stats->inactive += obj->base.size; 343 344 return 0; 345 } 346 } else { 347 if (i915_gem_obj_ggtt_bound(obj)) { 348 stats->global += obj->base.size; 349 if (obj->ring) 350 stats->active += obj->base.size; 351 else 352 stats->inactive += obj->base.size; 353 return 0; 354 } 355 } 356 357 if (!list_empty(&obj->global_list)) 358 stats->unbound += obj->base.size; 359 360 return 0; 361 } 362 363 #define count_vmas(list, member) do { \ 364 list_for_each_entry(vma, list, member) { \ 365 size += i915_gem_obj_ggtt_size(vma->obj); \ 366 ++count; \ 367 if (vma->obj->map_and_fenceable) { \ 368 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ 369 ++mappable_count; \ 370 } \ 371 } \ 372 } while (0) 373 374 static int i915_gem_object_info(struct seq_file *m, void* data) 375 { 376 struct drm_info_node *node = m->private; 377 struct drm_device *dev = node->minor->dev; 378 struct drm_i915_private *dev_priv = dev->dev_private; 379 u32 count, mappable_count, purgeable_count; 380 size_t size, mappable_size, purgeable_size; 381 struct drm_i915_gem_object *obj; 382 struct i915_address_space *vm = &dev_priv->gtt.base; 383 struct drm_file *file; 384 struct i915_vma *vma; 385 int ret; 386 387 ret = mutex_lock_interruptible(&dev->struct_mutex); 388 if (ret) 389 return ret; 390 391 seq_printf(m, "%u objects, %zu bytes\n", 392 dev_priv->mm.object_count, 393 dev_priv->mm.object_memory); 394 395 size = count = mappable_size = mappable_count = 0; 396 count_objects(&dev_priv->mm.bound_list, global_list); 397 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 398 count, mappable_count, size, mappable_size); 399 400 size = count = mappable_size = mappable_count = 0; 401 count_vmas(&vm->active_list, mm_list); 402 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 403 count, mappable_count, size, mappable_size); 404 405 size = count = mappable_size = mappable_count = 0; 406 count_vmas(&vm->inactive_list, mm_list); 407 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 408 count, mappable_count, size, mappable_size); 409 410 size = count = purgeable_size = purgeable_count = 0; 411 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 412 size += obj->base.size, ++count; 413 if (obj->madv == I915_MADV_DONTNEED) 414 purgeable_size += obj->base.size, ++purgeable_count; 415 } 416 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); 417 418 size = count = mappable_size = mappable_count = 0; 419 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 420 if (obj->fault_mappable) { 421 size += i915_gem_obj_ggtt_size(obj); 422 ++count; 423 } 424 if (obj->pin_mappable) { 425 mappable_size += i915_gem_obj_ggtt_size(obj); 426 ++mappable_count; 427 } 428 if (obj->madv == I915_MADV_DONTNEED) { 429 purgeable_size += obj->base.size; 430 ++purgeable_count; 431 } 432 } 433 seq_printf(m, "%u purgeable objects, %zu bytes\n", 434 purgeable_count, purgeable_size); 435 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 436 mappable_count, mappable_size); 437 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 438 count, size); 439 440 seq_printf(m, "%zu [%lu] gtt total\n", 441 dev_priv->gtt.base.total, 442 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); 443 444 seq_putc(m, '\n'); 445 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 446 struct file_stats stats; 447 struct task_struct *task; 448 449 memset(&stats, 0, sizeof(stats)); 450 stats.file_priv = file->driver_priv; 451 spin_lock(&file->table_lock); 452 idr_for_each(&file->object_idr, per_file_stats, &stats); 453 spin_unlock(&file->table_lock); 454 /* 455 * Although we have a valid reference on file->pid, that does 456 * not guarantee that the task_struct who called get_pid() is 457 * still alive (e.g. get_pid(current) => fork() => exit()). 458 * Therefore, we need to protect this ->comm access using RCU. 459 */ 460 rcu_read_lock(); 461 task = pid_task(file->pid, PIDTYPE_PID); 462 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", 463 task ? task->comm : "<unknown>", 464 stats.count, 465 stats.total, 466 stats.active, 467 stats.inactive, 468 stats.global, 469 stats.shared, 470 stats.unbound); 471 rcu_read_unlock(); 472 } 473 474 mutex_unlock(&dev->struct_mutex); 475 476 return 0; 477 } 478 479 static int i915_gem_gtt_info(struct seq_file *m, void *data) 480 { 481 struct drm_info_node *node = m->private; 482 struct drm_device *dev = node->minor->dev; 483 uintptr_t list = (uintptr_t) node->info_ent->data; 484 struct drm_i915_private *dev_priv = dev->dev_private; 485 struct drm_i915_gem_object *obj; 486 size_t total_obj_size, total_gtt_size; 487 int count, ret; 488 489 ret = mutex_lock_interruptible(&dev->struct_mutex); 490 if (ret) 491 return ret; 492 493 total_obj_size = total_gtt_size = count = 0; 494 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 495 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj)) 496 continue; 497 498 seq_puts(m, " "); 499 describe_obj(m, obj); 500 seq_putc(m, '\n'); 501 total_obj_size += obj->base.size; 502 total_gtt_size += i915_gem_obj_ggtt_size(obj); 503 count++; 504 } 505 506 mutex_unlock(&dev->struct_mutex); 507 508 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 509 count, total_obj_size, total_gtt_size); 510 511 return 0; 512 } 513 514 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 515 { 516 struct drm_info_node *node = m->private; 517 struct drm_device *dev = node->minor->dev; 518 struct drm_i915_private *dev_priv = dev->dev_private; 519 unsigned long flags; 520 struct intel_crtc *crtc; 521 int ret; 522 523 ret = mutex_lock_interruptible(&dev->struct_mutex); 524 if (ret) 525 return ret; 526 527 for_each_intel_crtc(dev, crtc) { 528 const char pipe = pipe_name(crtc->pipe); 529 const char plane = plane_name(crtc->plane); 530 struct intel_unpin_work *work; 531 532 spin_lock_irqsave(&dev->event_lock, flags); 533 work = crtc->unpin_work; 534 if (work == NULL) { 535 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 536 pipe, plane); 537 } else { 538 u32 addr; 539 540 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 541 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 542 pipe, plane); 543 } else { 544 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 545 pipe, plane); 546 } 547 if (work->flip_queued_ring) { 548 seq_printf(m, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n", 549 work->flip_queued_ring->name, 550 work->flip_queued_seqno, 551 dev_priv->next_seqno, 552 work->flip_queued_ring->get_seqno(work->flip_queued_ring, true), 553 i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true), 554 work->flip_queued_seqno)); 555 } else 556 seq_printf(m, "Flip not associated with any ring\n"); 557 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", 558 work->flip_queued_vblank, 559 work->flip_ready_vblank, 560 drm_vblank_count(dev, crtc->pipe)); 561 if (work->enable_stall_check) 562 seq_puts(m, "Stall check enabled, "); 563 else 564 seq_puts(m, "Stall check waiting for page flip ioctl, "); 565 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 566 567 if (INTEL_INFO(dev)->gen >= 4) 568 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane))); 569 else 570 addr = I915_READ(DSPADDR(crtc->plane)); 571 seq_printf(m, "Current scanout address 0x%08x\n", addr); 572 573 if (work->pending_flip_obj) { 574 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset); 575 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset); 576 } 577 } 578 spin_unlock_irqrestore(&dev->event_lock, flags); 579 } 580 581 mutex_unlock(&dev->struct_mutex); 582 583 return 0; 584 } 585 586 static int i915_gem_request_info(struct seq_file *m, void *data) 587 { 588 struct drm_info_node *node = m->private; 589 struct drm_device *dev = node->minor->dev; 590 struct drm_i915_private *dev_priv = dev->dev_private; 591 struct intel_engine_cs *ring; 592 struct drm_i915_gem_request *gem_request; 593 int ret, count, i; 594 595 ret = mutex_lock_interruptible(&dev->struct_mutex); 596 if (ret) 597 return ret; 598 599 count = 0; 600 for_each_ring(ring, dev_priv, i) { 601 if (list_empty(&ring->request_list)) 602 continue; 603 604 seq_printf(m, "%s requests:\n", ring->name); 605 list_for_each_entry(gem_request, 606 &ring->request_list, 607 list) { 608 seq_printf(m, " %d @ %d\n", 609 gem_request->seqno, 610 (int) (jiffies - gem_request->emitted_jiffies)); 611 } 612 count++; 613 } 614 mutex_unlock(&dev->struct_mutex); 615 616 if (count == 0) 617 seq_puts(m, "No requests\n"); 618 619 return 0; 620 } 621 622 static void i915_ring_seqno_info(struct seq_file *m, 623 struct intel_engine_cs *ring) 624 { 625 if (ring->get_seqno) { 626 seq_printf(m, "Current sequence (%s): %u\n", 627 ring->name, ring->get_seqno(ring, false)); 628 } 629 } 630 631 static int i915_gem_seqno_info(struct seq_file *m, void *data) 632 { 633 struct drm_info_node *node = m->private; 634 struct drm_device *dev = node->minor->dev; 635 struct drm_i915_private *dev_priv = dev->dev_private; 636 struct intel_engine_cs *ring; 637 int ret, i; 638 639 ret = mutex_lock_interruptible(&dev->struct_mutex); 640 if (ret) 641 return ret; 642 intel_runtime_pm_get(dev_priv); 643 644 for_each_ring(ring, dev_priv, i) 645 i915_ring_seqno_info(m, ring); 646 647 intel_runtime_pm_put(dev_priv); 648 mutex_unlock(&dev->struct_mutex); 649 650 return 0; 651 } 652 653 654 static int i915_interrupt_info(struct seq_file *m, void *data) 655 { 656 struct drm_info_node *node = m->private; 657 struct drm_device *dev = node->minor->dev; 658 struct drm_i915_private *dev_priv = dev->dev_private; 659 struct intel_engine_cs *ring; 660 int ret, i, pipe; 661 662 ret = mutex_lock_interruptible(&dev->struct_mutex); 663 if (ret) 664 return ret; 665 intel_runtime_pm_get(dev_priv); 666 667 if (IS_CHERRYVIEW(dev)) { 668 seq_printf(m, "Master Interrupt Control:\t%08x\n", 669 I915_READ(GEN8_MASTER_IRQ)); 670 671 seq_printf(m, "Display IER:\t%08x\n", 672 I915_READ(VLV_IER)); 673 seq_printf(m, "Display IIR:\t%08x\n", 674 I915_READ(VLV_IIR)); 675 seq_printf(m, "Display IIR_RW:\t%08x\n", 676 I915_READ(VLV_IIR_RW)); 677 seq_printf(m, "Display IMR:\t%08x\n", 678 I915_READ(VLV_IMR)); 679 for_each_pipe(dev_priv, pipe) 680 seq_printf(m, "Pipe %c stat:\t%08x\n", 681 pipe_name(pipe), 682 I915_READ(PIPESTAT(pipe))); 683 684 seq_printf(m, "Port hotplug:\t%08x\n", 685 I915_READ(PORT_HOTPLUG_EN)); 686 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 687 I915_READ(VLV_DPFLIPSTAT)); 688 seq_printf(m, "DPINVGTT:\t%08x\n", 689 I915_READ(DPINVGTT)); 690 691 for (i = 0; i < 4; i++) { 692 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 693 i, I915_READ(GEN8_GT_IMR(i))); 694 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 695 i, I915_READ(GEN8_GT_IIR(i))); 696 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 697 i, I915_READ(GEN8_GT_IER(i))); 698 } 699 700 seq_printf(m, "PCU interrupt mask:\t%08x\n", 701 I915_READ(GEN8_PCU_IMR)); 702 seq_printf(m, "PCU interrupt identity:\t%08x\n", 703 I915_READ(GEN8_PCU_IIR)); 704 seq_printf(m, "PCU interrupt enable:\t%08x\n", 705 I915_READ(GEN8_PCU_IER)); 706 } else if (INTEL_INFO(dev)->gen >= 8) { 707 seq_printf(m, "Master Interrupt Control:\t%08x\n", 708 I915_READ(GEN8_MASTER_IRQ)); 709 710 for (i = 0; i < 4; i++) { 711 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 712 i, I915_READ(GEN8_GT_IMR(i))); 713 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 714 i, I915_READ(GEN8_GT_IIR(i))); 715 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 716 i, I915_READ(GEN8_GT_IER(i))); 717 } 718 719 for_each_pipe(dev_priv, pipe) { 720 if (!intel_display_power_enabled(dev_priv, 721 POWER_DOMAIN_PIPE(pipe))) { 722 seq_printf(m, "Pipe %c power disabled\n", 723 pipe_name(pipe)); 724 continue; 725 } 726 seq_printf(m, "Pipe %c IMR:\t%08x\n", 727 pipe_name(pipe), 728 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 729 seq_printf(m, "Pipe %c IIR:\t%08x\n", 730 pipe_name(pipe), 731 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 732 seq_printf(m, "Pipe %c IER:\t%08x\n", 733 pipe_name(pipe), 734 I915_READ(GEN8_DE_PIPE_IER(pipe))); 735 } 736 737 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 738 I915_READ(GEN8_DE_PORT_IMR)); 739 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 740 I915_READ(GEN8_DE_PORT_IIR)); 741 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 742 I915_READ(GEN8_DE_PORT_IER)); 743 744 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 745 I915_READ(GEN8_DE_MISC_IMR)); 746 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 747 I915_READ(GEN8_DE_MISC_IIR)); 748 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 749 I915_READ(GEN8_DE_MISC_IER)); 750 751 seq_printf(m, "PCU interrupt mask:\t%08x\n", 752 I915_READ(GEN8_PCU_IMR)); 753 seq_printf(m, "PCU interrupt identity:\t%08x\n", 754 I915_READ(GEN8_PCU_IIR)); 755 seq_printf(m, "PCU interrupt enable:\t%08x\n", 756 I915_READ(GEN8_PCU_IER)); 757 } else if (IS_VALLEYVIEW(dev)) { 758 seq_printf(m, "Display IER:\t%08x\n", 759 I915_READ(VLV_IER)); 760 seq_printf(m, "Display IIR:\t%08x\n", 761 I915_READ(VLV_IIR)); 762 seq_printf(m, "Display IIR_RW:\t%08x\n", 763 I915_READ(VLV_IIR_RW)); 764 seq_printf(m, "Display IMR:\t%08x\n", 765 I915_READ(VLV_IMR)); 766 for_each_pipe(dev_priv, pipe) 767 seq_printf(m, "Pipe %c stat:\t%08x\n", 768 pipe_name(pipe), 769 I915_READ(PIPESTAT(pipe))); 770 771 seq_printf(m, "Master IER:\t%08x\n", 772 I915_READ(VLV_MASTER_IER)); 773 774 seq_printf(m, "Render IER:\t%08x\n", 775 I915_READ(GTIER)); 776 seq_printf(m, "Render IIR:\t%08x\n", 777 I915_READ(GTIIR)); 778 seq_printf(m, "Render IMR:\t%08x\n", 779 I915_READ(GTIMR)); 780 781 seq_printf(m, "PM IER:\t\t%08x\n", 782 I915_READ(GEN6_PMIER)); 783 seq_printf(m, "PM IIR:\t\t%08x\n", 784 I915_READ(GEN6_PMIIR)); 785 seq_printf(m, "PM IMR:\t\t%08x\n", 786 I915_READ(GEN6_PMIMR)); 787 788 seq_printf(m, "Port hotplug:\t%08x\n", 789 I915_READ(PORT_HOTPLUG_EN)); 790 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 791 I915_READ(VLV_DPFLIPSTAT)); 792 seq_printf(m, "DPINVGTT:\t%08x\n", 793 I915_READ(DPINVGTT)); 794 795 } else if (!HAS_PCH_SPLIT(dev)) { 796 seq_printf(m, "Interrupt enable: %08x\n", 797 I915_READ(IER)); 798 seq_printf(m, "Interrupt identity: %08x\n", 799 I915_READ(IIR)); 800 seq_printf(m, "Interrupt mask: %08x\n", 801 I915_READ(IMR)); 802 for_each_pipe(dev_priv, pipe) 803 seq_printf(m, "Pipe %c stat: %08x\n", 804 pipe_name(pipe), 805 I915_READ(PIPESTAT(pipe))); 806 } else { 807 seq_printf(m, "North Display Interrupt enable: %08x\n", 808 I915_READ(DEIER)); 809 seq_printf(m, "North Display Interrupt identity: %08x\n", 810 I915_READ(DEIIR)); 811 seq_printf(m, "North Display Interrupt mask: %08x\n", 812 I915_READ(DEIMR)); 813 seq_printf(m, "South Display Interrupt enable: %08x\n", 814 I915_READ(SDEIER)); 815 seq_printf(m, "South Display Interrupt identity: %08x\n", 816 I915_READ(SDEIIR)); 817 seq_printf(m, "South Display Interrupt mask: %08x\n", 818 I915_READ(SDEIMR)); 819 seq_printf(m, "Graphics Interrupt enable: %08x\n", 820 I915_READ(GTIER)); 821 seq_printf(m, "Graphics Interrupt identity: %08x\n", 822 I915_READ(GTIIR)); 823 seq_printf(m, "Graphics Interrupt mask: %08x\n", 824 I915_READ(GTIMR)); 825 } 826 for_each_ring(ring, dev_priv, i) { 827 if (INTEL_INFO(dev)->gen >= 6) { 828 seq_printf(m, 829 "Graphics Interrupt mask (%s): %08x\n", 830 ring->name, I915_READ_IMR(ring)); 831 } 832 i915_ring_seqno_info(m, ring); 833 } 834 intel_runtime_pm_put(dev_priv); 835 mutex_unlock(&dev->struct_mutex); 836 837 return 0; 838 } 839 840 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 841 { 842 struct drm_info_node *node = m->private; 843 struct drm_device *dev = node->minor->dev; 844 struct drm_i915_private *dev_priv = dev->dev_private; 845 int i, ret; 846 847 ret = mutex_lock_interruptible(&dev->struct_mutex); 848 if (ret) 849 return ret; 850 851 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 852 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 853 for (i = 0; i < dev_priv->num_fence_regs; i++) { 854 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 855 856 seq_printf(m, "Fence %d, pin count = %d, object = ", 857 i, dev_priv->fence_regs[i].pin_count); 858 if (obj == NULL) 859 seq_puts(m, "unused"); 860 else 861 describe_obj(m, obj); 862 seq_putc(m, '\n'); 863 } 864 865 mutex_unlock(&dev->struct_mutex); 866 return 0; 867 } 868 869 static int i915_hws_info(struct seq_file *m, void *data) 870 { 871 struct drm_info_node *node = m->private; 872 struct drm_device *dev = node->minor->dev; 873 struct drm_i915_private *dev_priv = dev->dev_private; 874 struct intel_engine_cs *ring; 875 const u32 *hws; 876 int i; 877 878 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 879 hws = ring->status_page.page_addr; 880 if (hws == NULL) 881 return 0; 882 883 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 884 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 885 i * 4, 886 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 887 } 888 return 0; 889 } 890 891 static ssize_t 892 i915_error_state_write(struct file *filp, 893 const char __user *ubuf, 894 size_t cnt, 895 loff_t *ppos) 896 { 897 struct i915_error_state_file_priv *error_priv = filp->private_data; 898 struct drm_device *dev = error_priv->dev; 899 int ret; 900 901 DRM_DEBUG_DRIVER("Resetting error state\n"); 902 903 ret = mutex_lock_interruptible(&dev->struct_mutex); 904 if (ret) 905 return ret; 906 907 i915_destroy_error_state(dev); 908 mutex_unlock(&dev->struct_mutex); 909 910 return cnt; 911 } 912 913 static int i915_error_state_open(struct inode *inode, struct file *file) 914 { 915 struct drm_device *dev = inode->i_private; 916 struct i915_error_state_file_priv *error_priv; 917 918 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 919 if (!error_priv) 920 return -ENOMEM; 921 922 error_priv->dev = dev; 923 924 i915_error_state_get(dev, error_priv); 925 926 file->private_data = error_priv; 927 928 return 0; 929 } 930 931 static int i915_error_state_release(struct inode *inode, struct file *file) 932 { 933 struct i915_error_state_file_priv *error_priv = file->private_data; 934 935 i915_error_state_put(error_priv); 936 kfree(error_priv); 937 938 return 0; 939 } 940 941 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, 942 size_t count, loff_t *pos) 943 { 944 struct i915_error_state_file_priv *error_priv = file->private_data; 945 struct drm_i915_error_state_buf error_str; 946 loff_t tmp_pos = 0; 947 ssize_t ret_count = 0; 948 int ret; 949 950 ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos); 951 if (ret) 952 return ret; 953 954 ret = i915_error_state_to_str(&error_str, error_priv); 955 if (ret) 956 goto out; 957 958 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, 959 error_str.buf, 960 error_str.bytes); 961 962 if (ret_count < 0) 963 ret = ret_count; 964 else 965 *pos = error_str.start + ret_count; 966 out: 967 i915_error_state_buf_release(&error_str); 968 return ret ?: ret_count; 969 } 970 971 static const struct file_operations i915_error_state_fops = { 972 .owner = THIS_MODULE, 973 .open = i915_error_state_open, 974 .read = i915_error_state_read, 975 .write = i915_error_state_write, 976 .llseek = default_llseek, 977 .release = i915_error_state_release, 978 }; 979 980 static int 981 i915_next_seqno_get(void *data, u64 *val) 982 { 983 struct drm_device *dev = data; 984 struct drm_i915_private *dev_priv = dev->dev_private; 985 int ret; 986 987 ret = mutex_lock_interruptible(&dev->struct_mutex); 988 if (ret) 989 return ret; 990 991 *val = dev_priv->next_seqno; 992 mutex_unlock(&dev->struct_mutex); 993 994 return 0; 995 } 996 997 static int 998 i915_next_seqno_set(void *data, u64 val) 999 { 1000 struct drm_device *dev = data; 1001 int ret; 1002 1003 ret = mutex_lock_interruptible(&dev->struct_mutex); 1004 if (ret) 1005 return ret; 1006 1007 ret = i915_gem_set_seqno(dev, val); 1008 mutex_unlock(&dev->struct_mutex); 1009 1010 return ret; 1011 } 1012 1013 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 1014 i915_next_seqno_get, i915_next_seqno_set, 1015 "0x%llx\n"); 1016 1017 static int i915_frequency_info(struct seq_file *m, void *unused) 1018 { 1019 struct drm_info_node *node = m->private; 1020 struct drm_device *dev = node->minor->dev; 1021 struct drm_i915_private *dev_priv = dev->dev_private; 1022 int ret = 0; 1023 1024 intel_runtime_pm_get(dev_priv); 1025 1026 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1027 1028 if (IS_GEN5(dev)) { 1029 u16 rgvswctl = I915_READ16(MEMSWCTL); 1030 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1031 1032 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 1033 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 1034 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 1035 MEMSTAT_VID_SHIFT); 1036 seq_printf(m, "Current P-state: %d\n", 1037 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1038 } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) || 1039 IS_BROADWELL(dev)) { 1040 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1041 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1042 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1043 u32 rpmodectl, rpinclimit, rpdeclimit; 1044 u32 rpstat, cagf, reqf; 1045 u32 rpupei, rpcurup, rpprevup; 1046 u32 rpdownei, rpcurdown, rpprevdown; 1047 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 1048 int max_freq; 1049 1050 /* RPSTAT1 is in the GT power well */ 1051 ret = mutex_lock_interruptible(&dev->struct_mutex); 1052 if (ret) 1053 goto out; 1054 1055 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 1056 1057 reqf = I915_READ(GEN6_RPNSWREQ); 1058 reqf &= ~GEN6_TURBO_DISABLE; 1059 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1060 reqf >>= 24; 1061 else 1062 reqf >>= 25; 1063 reqf *= GT_FREQUENCY_MULTIPLIER; 1064 1065 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1066 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 1067 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 1068 1069 rpstat = I915_READ(GEN6_RPSTAT1); 1070 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 1071 rpcurup = I915_READ(GEN6_RP_CUR_UP); 1072 rpprevup = I915_READ(GEN6_RP_PREV_UP); 1073 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 1074 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 1075 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 1076 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1077 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1078 else 1079 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1080 cagf *= GT_FREQUENCY_MULTIPLIER; 1081 1082 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 1083 mutex_unlock(&dev->struct_mutex); 1084 1085 if (IS_GEN6(dev) || IS_GEN7(dev)) { 1086 pm_ier = I915_READ(GEN6_PMIER); 1087 pm_imr = I915_READ(GEN6_PMIMR); 1088 pm_isr = I915_READ(GEN6_PMISR); 1089 pm_iir = I915_READ(GEN6_PMIIR); 1090 pm_mask = I915_READ(GEN6_PMINTRMSK); 1091 } else { 1092 pm_ier = I915_READ(GEN8_GT_IER(2)); 1093 pm_imr = I915_READ(GEN8_GT_IMR(2)); 1094 pm_isr = I915_READ(GEN8_GT_ISR(2)); 1095 pm_iir = I915_READ(GEN8_GT_IIR(2)); 1096 pm_mask = I915_READ(GEN6_PMINTRMSK); 1097 } 1098 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1099 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1100 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1101 seq_printf(m, "Render p-state ratio: %d\n", 1102 (gt_perf_status & 0xff00) >> 8); 1103 seq_printf(m, "Render p-state VID: %d\n", 1104 gt_perf_status & 0xff); 1105 seq_printf(m, "Render p-state limit: %d\n", 1106 rp_state_limits & 0xff); 1107 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1108 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 1109 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 1110 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 1111 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1112 seq_printf(m, "CAGF: %dMHz\n", cagf); 1113 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 1114 GEN6_CURICONT_MASK); 1115 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 1116 GEN6_CURBSYTAVG_MASK); 1117 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 1118 GEN6_CURBSYTAVG_MASK); 1119 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 1120 GEN6_CURIAVG_MASK); 1121 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 1122 GEN6_CURBSYTAVG_MASK); 1123 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 1124 GEN6_CURBSYTAVG_MASK); 1125 1126 max_freq = (rp_state_cap & 0xff0000) >> 16; 1127 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1128 max_freq * GT_FREQUENCY_MULTIPLIER); 1129 1130 max_freq = (rp_state_cap & 0xff00) >> 8; 1131 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1132 max_freq * GT_FREQUENCY_MULTIPLIER); 1133 1134 max_freq = rp_state_cap & 0xff; 1135 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1136 max_freq * GT_FREQUENCY_MULTIPLIER); 1137 1138 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1139 dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER); 1140 } else if (IS_VALLEYVIEW(dev)) { 1141 u32 freq_sts; 1142 1143 mutex_lock(&dev_priv->rps.hw_lock); 1144 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1145 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1146 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1147 1148 seq_printf(m, "max GPU freq: %d MHz\n", 1149 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1150 1151 seq_printf(m, "min GPU freq: %d MHz\n", 1152 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1153 1154 seq_printf(m, "efficient (RPe) frequency: %d MHz\n", 1155 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1156 1157 seq_printf(m, "current GPU freq: %d MHz\n", 1158 vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1159 mutex_unlock(&dev_priv->rps.hw_lock); 1160 } else { 1161 seq_puts(m, "no P-state info available\n"); 1162 } 1163 1164 out: 1165 intel_runtime_pm_put(dev_priv); 1166 return ret; 1167 } 1168 1169 static int ironlake_drpc_info(struct seq_file *m) 1170 { 1171 struct drm_info_node *node = m->private; 1172 struct drm_device *dev = node->minor->dev; 1173 struct drm_i915_private *dev_priv = dev->dev_private; 1174 u32 rgvmodectl, rstdbyctl; 1175 u16 crstandvid; 1176 int ret; 1177 1178 ret = mutex_lock_interruptible(&dev->struct_mutex); 1179 if (ret) 1180 return ret; 1181 intel_runtime_pm_get(dev_priv); 1182 1183 rgvmodectl = I915_READ(MEMMODECTL); 1184 rstdbyctl = I915_READ(RSTDBYCTL); 1185 crstandvid = I915_READ16(CRSTANDVID); 1186 1187 intel_runtime_pm_put(dev_priv); 1188 mutex_unlock(&dev->struct_mutex); 1189 1190 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1191 "yes" : "no"); 1192 seq_printf(m, "Boost freq: %d\n", 1193 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1194 MEMMODE_BOOST_FREQ_SHIFT); 1195 seq_printf(m, "HW control enabled: %s\n", 1196 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 1197 seq_printf(m, "SW control enabled: %s\n", 1198 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 1199 seq_printf(m, "Gated voltage change: %s\n", 1200 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 1201 seq_printf(m, "Starting frequency: P%d\n", 1202 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1203 seq_printf(m, "Max P-state: P%d\n", 1204 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1205 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1206 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1207 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1208 seq_printf(m, "Render standby enabled: %s\n", 1209 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1210 seq_puts(m, "Current RS state: "); 1211 switch (rstdbyctl & RSX_STATUS_MASK) { 1212 case RSX_STATUS_ON: 1213 seq_puts(m, "on\n"); 1214 break; 1215 case RSX_STATUS_RC1: 1216 seq_puts(m, "RC1\n"); 1217 break; 1218 case RSX_STATUS_RC1E: 1219 seq_puts(m, "RC1E\n"); 1220 break; 1221 case RSX_STATUS_RS1: 1222 seq_puts(m, "RS1\n"); 1223 break; 1224 case RSX_STATUS_RS2: 1225 seq_puts(m, "RS2 (RC6)\n"); 1226 break; 1227 case RSX_STATUS_RS3: 1228 seq_puts(m, "RC3 (RC6+)\n"); 1229 break; 1230 default: 1231 seq_puts(m, "unknown\n"); 1232 break; 1233 } 1234 1235 return 0; 1236 } 1237 1238 static int vlv_drpc_info(struct seq_file *m) 1239 { 1240 1241 struct drm_info_node *node = m->private; 1242 struct drm_device *dev = node->minor->dev; 1243 struct drm_i915_private *dev_priv = dev->dev_private; 1244 u32 rpmodectl1, rcctl1; 1245 unsigned fw_rendercount = 0, fw_mediacount = 0; 1246 1247 intel_runtime_pm_get(dev_priv); 1248 1249 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1250 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1251 1252 intel_runtime_pm_put(dev_priv); 1253 1254 seq_printf(m, "Video Turbo Mode: %s\n", 1255 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1256 seq_printf(m, "Turbo enabled: %s\n", 1257 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1258 seq_printf(m, "HW control enabled: %s\n", 1259 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1260 seq_printf(m, "SW control enabled: %s\n", 1261 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1262 GEN6_RP_MEDIA_SW_MODE)); 1263 seq_printf(m, "RC6 Enabled: %s\n", 1264 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1265 GEN6_RC_CTL_EI_MODE(1)))); 1266 seq_printf(m, "Render Power Well: %s\n", 1267 (I915_READ(VLV_GTLC_PW_STATUS) & 1268 VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1269 seq_printf(m, "Media Power Well: %s\n", 1270 (I915_READ(VLV_GTLC_PW_STATUS) & 1271 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1272 1273 seq_printf(m, "Render RC6 residency since boot: %u\n", 1274 I915_READ(VLV_GT_RENDER_RC6)); 1275 seq_printf(m, "Media RC6 residency since boot: %u\n", 1276 I915_READ(VLV_GT_MEDIA_RC6)); 1277 1278 spin_lock_irq(&dev_priv->uncore.lock); 1279 fw_rendercount = dev_priv->uncore.fw_rendercount; 1280 fw_mediacount = dev_priv->uncore.fw_mediacount; 1281 spin_unlock_irq(&dev_priv->uncore.lock); 1282 1283 seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount); 1284 seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount); 1285 1286 1287 return 0; 1288 } 1289 1290 1291 static int gen6_drpc_info(struct seq_file *m) 1292 { 1293 1294 struct drm_info_node *node = m->private; 1295 struct drm_device *dev = node->minor->dev; 1296 struct drm_i915_private *dev_priv = dev->dev_private; 1297 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1298 unsigned forcewake_count; 1299 int count = 0, ret; 1300 1301 ret = mutex_lock_interruptible(&dev->struct_mutex); 1302 if (ret) 1303 return ret; 1304 intel_runtime_pm_get(dev_priv); 1305 1306 spin_lock_irq(&dev_priv->uncore.lock); 1307 forcewake_count = dev_priv->uncore.forcewake_count; 1308 spin_unlock_irq(&dev_priv->uncore.lock); 1309 1310 if (forcewake_count) { 1311 seq_puts(m, "RC information inaccurate because somebody " 1312 "holds a forcewake reference \n"); 1313 } else { 1314 /* NB: we cannot use forcewake, else we read the wrong values */ 1315 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1316 udelay(10); 1317 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1318 } 1319 1320 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1321 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1322 1323 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1324 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1325 mutex_unlock(&dev->struct_mutex); 1326 mutex_lock(&dev_priv->rps.hw_lock); 1327 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1328 mutex_unlock(&dev_priv->rps.hw_lock); 1329 1330 intel_runtime_pm_put(dev_priv); 1331 1332 seq_printf(m, "Video Turbo Mode: %s\n", 1333 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1334 seq_printf(m, "HW control enabled: %s\n", 1335 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1336 seq_printf(m, "SW control enabled: %s\n", 1337 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1338 GEN6_RP_MEDIA_SW_MODE)); 1339 seq_printf(m, "RC1e Enabled: %s\n", 1340 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1341 seq_printf(m, "RC6 Enabled: %s\n", 1342 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1343 seq_printf(m, "Deep RC6 Enabled: %s\n", 1344 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1345 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1346 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1347 seq_puts(m, "Current RC state: "); 1348 switch (gt_core_status & GEN6_RCn_MASK) { 1349 case GEN6_RC0: 1350 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1351 seq_puts(m, "Core Power Down\n"); 1352 else 1353 seq_puts(m, "on\n"); 1354 break; 1355 case GEN6_RC3: 1356 seq_puts(m, "RC3\n"); 1357 break; 1358 case GEN6_RC6: 1359 seq_puts(m, "RC6\n"); 1360 break; 1361 case GEN6_RC7: 1362 seq_puts(m, "RC7\n"); 1363 break; 1364 default: 1365 seq_puts(m, "Unknown\n"); 1366 break; 1367 } 1368 1369 seq_printf(m, "Core Power Down: %s\n", 1370 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1371 1372 /* Not exactly sure what this is */ 1373 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1374 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1375 seq_printf(m, "RC6 residency since boot: %u\n", 1376 I915_READ(GEN6_GT_GFX_RC6)); 1377 seq_printf(m, "RC6+ residency since boot: %u\n", 1378 I915_READ(GEN6_GT_GFX_RC6p)); 1379 seq_printf(m, "RC6++ residency since boot: %u\n", 1380 I915_READ(GEN6_GT_GFX_RC6pp)); 1381 1382 seq_printf(m, "RC6 voltage: %dmV\n", 1383 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1384 seq_printf(m, "RC6+ voltage: %dmV\n", 1385 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1386 seq_printf(m, "RC6++ voltage: %dmV\n", 1387 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1388 return 0; 1389 } 1390 1391 static int i915_drpc_info(struct seq_file *m, void *unused) 1392 { 1393 struct drm_info_node *node = m->private; 1394 struct drm_device *dev = node->minor->dev; 1395 1396 if (IS_VALLEYVIEW(dev)) 1397 return vlv_drpc_info(m); 1398 else if (INTEL_INFO(dev)->gen >= 6) 1399 return gen6_drpc_info(m); 1400 else 1401 return ironlake_drpc_info(m); 1402 } 1403 1404 static int i915_fbc_status(struct seq_file *m, void *unused) 1405 { 1406 struct drm_info_node *node = m->private; 1407 struct drm_device *dev = node->minor->dev; 1408 struct drm_i915_private *dev_priv = dev->dev_private; 1409 1410 if (!HAS_FBC(dev)) { 1411 seq_puts(m, "FBC unsupported on this chipset\n"); 1412 return 0; 1413 } 1414 1415 intel_runtime_pm_get(dev_priv); 1416 1417 if (intel_fbc_enabled(dev)) { 1418 seq_puts(m, "FBC enabled\n"); 1419 } else { 1420 seq_puts(m, "FBC disabled: "); 1421 switch (dev_priv->fbc.no_fbc_reason) { 1422 case FBC_OK: 1423 seq_puts(m, "FBC actived, but currently disabled in hardware"); 1424 break; 1425 case FBC_UNSUPPORTED: 1426 seq_puts(m, "unsupported by this chipset"); 1427 break; 1428 case FBC_NO_OUTPUT: 1429 seq_puts(m, "no outputs"); 1430 break; 1431 case FBC_STOLEN_TOO_SMALL: 1432 seq_puts(m, "not enough stolen memory"); 1433 break; 1434 case FBC_UNSUPPORTED_MODE: 1435 seq_puts(m, "mode not supported"); 1436 break; 1437 case FBC_MODE_TOO_LARGE: 1438 seq_puts(m, "mode too large"); 1439 break; 1440 case FBC_BAD_PLANE: 1441 seq_puts(m, "FBC unsupported on plane"); 1442 break; 1443 case FBC_NOT_TILED: 1444 seq_puts(m, "scanout buffer not tiled"); 1445 break; 1446 case FBC_MULTIPLE_PIPES: 1447 seq_puts(m, "multiple pipes are enabled"); 1448 break; 1449 case FBC_MODULE_PARAM: 1450 seq_puts(m, "disabled per module param (default off)"); 1451 break; 1452 case FBC_CHIP_DEFAULT: 1453 seq_puts(m, "disabled per chip default"); 1454 break; 1455 default: 1456 seq_puts(m, "unknown reason"); 1457 } 1458 seq_putc(m, '\n'); 1459 } 1460 1461 intel_runtime_pm_put(dev_priv); 1462 1463 return 0; 1464 } 1465 1466 static int i915_fbc_fc_get(void *data, u64 *val) 1467 { 1468 struct drm_device *dev = data; 1469 struct drm_i915_private *dev_priv = dev->dev_private; 1470 1471 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1472 return -ENODEV; 1473 1474 drm_modeset_lock_all(dev); 1475 *val = dev_priv->fbc.false_color; 1476 drm_modeset_unlock_all(dev); 1477 1478 return 0; 1479 } 1480 1481 static int i915_fbc_fc_set(void *data, u64 val) 1482 { 1483 struct drm_device *dev = data; 1484 struct drm_i915_private *dev_priv = dev->dev_private; 1485 u32 reg; 1486 1487 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1488 return -ENODEV; 1489 1490 drm_modeset_lock_all(dev); 1491 1492 reg = I915_READ(ILK_DPFC_CONTROL); 1493 dev_priv->fbc.false_color = val; 1494 1495 I915_WRITE(ILK_DPFC_CONTROL, val ? 1496 (reg | FBC_CTL_FALSE_COLOR) : 1497 (reg & ~FBC_CTL_FALSE_COLOR)); 1498 1499 drm_modeset_unlock_all(dev); 1500 return 0; 1501 } 1502 1503 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops, 1504 i915_fbc_fc_get, i915_fbc_fc_set, 1505 "%llu\n"); 1506 1507 static int i915_ips_status(struct seq_file *m, void *unused) 1508 { 1509 struct drm_info_node *node = m->private; 1510 struct drm_device *dev = node->minor->dev; 1511 struct drm_i915_private *dev_priv = dev->dev_private; 1512 1513 if (!HAS_IPS(dev)) { 1514 seq_puts(m, "not supported\n"); 1515 return 0; 1516 } 1517 1518 intel_runtime_pm_get(dev_priv); 1519 1520 seq_printf(m, "Enabled by kernel parameter: %s\n", 1521 yesno(i915.enable_ips)); 1522 1523 if (INTEL_INFO(dev)->gen >= 8) { 1524 seq_puts(m, "Currently: unknown\n"); 1525 } else { 1526 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1527 seq_puts(m, "Currently: enabled\n"); 1528 else 1529 seq_puts(m, "Currently: disabled\n"); 1530 } 1531 1532 intel_runtime_pm_put(dev_priv); 1533 1534 return 0; 1535 } 1536 1537 static int i915_sr_status(struct seq_file *m, void *unused) 1538 { 1539 struct drm_info_node *node = m->private; 1540 struct drm_device *dev = node->minor->dev; 1541 struct drm_i915_private *dev_priv = dev->dev_private; 1542 bool sr_enabled = false; 1543 1544 intel_runtime_pm_get(dev_priv); 1545 1546 if (HAS_PCH_SPLIT(dev)) 1547 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1548 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1549 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1550 else if (IS_I915GM(dev)) 1551 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1552 else if (IS_PINEVIEW(dev)) 1553 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1554 1555 intel_runtime_pm_put(dev_priv); 1556 1557 seq_printf(m, "self-refresh: %s\n", 1558 sr_enabled ? "enabled" : "disabled"); 1559 1560 return 0; 1561 } 1562 1563 static int i915_emon_status(struct seq_file *m, void *unused) 1564 { 1565 struct drm_info_node *node = m->private; 1566 struct drm_device *dev = node->minor->dev; 1567 struct drm_i915_private *dev_priv = dev->dev_private; 1568 unsigned long temp, chipset, gfx; 1569 int ret; 1570 1571 if (!IS_GEN5(dev)) 1572 return -ENODEV; 1573 1574 ret = mutex_lock_interruptible(&dev->struct_mutex); 1575 if (ret) 1576 return ret; 1577 1578 temp = i915_mch_val(dev_priv); 1579 chipset = i915_chipset_val(dev_priv); 1580 gfx = i915_gfx_val(dev_priv); 1581 mutex_unlock(&dev->struct_mutex); 1582 1583 seq_printf(m, "GMCH temp: %ld\n", temp); 1584 seq_printf(m, "Chipset power: %ld\n", chipset); 1585 seq_printf(m, "GFX power: %ld\n", gfx); 1586 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1587 1588 return 0; 1589 } 1590 1591 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1592 { 1593 struct drm_info_node *node = m->private; 1594 struct drm_device *dev = node->minor->dev; 1595 struct drm_i915_private *dev_priv = dev->dev_private; 1596 int ret = 0; 1597 int gpu_freq, ia_freq; 1598 1599 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1600 seq_puts(m, "unsupported on this chipset\n"); 1601 return 0; 1602 } 1603 1604 intel_runtime_pm_get(dev_priv); 1605 1606 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1607 1608 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1609 if (ret) 1610 goto out; 1611 1612 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1613 1614 for (gpu_freq = dev_priv->rps.min_freq_softlimit; 1615 gpu_freq <= dev_priv->rps.max_freq_softlimit; 1616 gpu_freq++) { 1617 ia_freq = gpu_freq; 1618 sandybridge_pcode_read(dev_priv, 1619 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1620 &ia_freq); 1621 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1622 gpu_freq * GT_FREQUENCY_MULTIPLIER, 1623 ((ia_freq >> 0) & 0xff) * 100, 1624 ((ia_freq >> 8) & 0xff) * 100); 1625 } 1626 1627 mutex_unlock(&dev_priv->rps.hw_lock); 1628 1629 out: 1630 intel_runtime_pm_put(dev_priv); 1631 return ret; 1632 } 1633 1634 static int i915_opregion(struct seq_file *m, void *unused) 1635 { 1636 struct drm_info_node *node = m->private; 1637 struct drm_device *dev = node->minor->dev; 1638 struct drm_i915_private *dev_priv = dev->dev_private; 1639 struct intel_opregion *opregion = &dev_priv->opregion; 1640 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); 1641 int ret; 1642 1643 if (data == NULL) 1644 return -ENOMEM; 1645 1646 ret = mutex_lock_interruptible(&dev->struct_mutex); 1647 if (ret) 1648 goto out; 1649 1650 if (opregion->header) { 1651 memcpy_fromio(data, opregion->header, OPREGION_SIZE); 1652 seq_write(m, data, OPREGION_SIZE); 1653 } 1654 1655 mutex_unlock(&dev->struct_mutex); 1656 1657 out: 1658 kfree(data); 1659 return 0; 1660 } 1661 1662 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1663 { 1664 struct drm_info_node *node = m->private; 1665 struct drm_device *dev = node->minor->dev; 1666 struct intel_fbdev *ifbdev = NULL; 1667 struct intel_framebuffer *fb; 1668 1669 #ifdef CONFIG_DRM_I915_FBDEV 1670 struct drm_i915_private *dev_priv = dev->dev_private; 1671 1672 ifbdev = dev_priv->fbdev; 1673 fb = to_intel_framebuffer(ifbdev->helper.fb); 1674 1675 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1676 fb->base.width, 1677 fb->base.height, 1678 fb->base.depth, 1679 fb->base.bits_per_pixel, 1680 atomic_read(&fb->base.refcount.refcount)); 1681 describe_obj(m, fb->obj); 1682 seq_putc(m, '\n'); 1683 #endif 1684 1685 mutex_lock(&dev->mode_config.fb_lock); 1686 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1687 if (ifbdev && &fb->base == ifbdev->helper.fb) 1688 continue; 1689 1690 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1691 fb->base.width, 1692 fb->base.height, 1693 fb->base.depth, 1694 fb->base.bits_per_pixel, 1695 atomic_read(&fb->base.refcount.refcount)); 1696 describe_obj(m, fb->obj); 1697 seq_putc(m, '\n'); 1698 } 1699 mutex_unlock(&dev->mode_config.fb_lock); 1700 1701 return 0; 1702 } 1703 1704 static void describe_ctx_ringbuf(struct seq_file *m, 1705 struct intel_ringbuffer *ringbuf) 1706 { 1707 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)", 1708 ringbuf->space, ringbuf->head, ringbuf->tail, 1709 ringbuf->last_retired_head); 1710 } 1711 1712 static int i915_context_status(struct seq_file *m, void *unused) 1713 { 1714 struct drm_info_node *node = m->private; 1715 struct drm_device *dev = node->minor->dev; 1716 struct drm_i915_private *dev_priv = dev->dev_private; 1717 struct intel_engine_cs *ring; 1718 struct intel_context *ctx; 1719 int ret, i; 1720 1721 ret = mutex_lock_interruptible(&dev->struct_mutex); 1722 if (ret) 1723 return ret; 1724 1725 if (dev_priv->ips.pwrctx) { 1726 seq_puts(m, "power context "); 1727 describe_obj(m, dev_priv->ips.pwrctx); 1728 seq_putc(m, '\n'); 1729 } 1730 1731 if (dev_priv->ips.renderctx) { 1732 seq_puts(m, "render context "); 1733 describe_obj(m, dev_priv->ips.renderctx); 1734 seq_putc(m, '\n'); 1735 } 1736 1737 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1738 if (!i915.enable_execlists && 1739 ctx->legacy_hw_ctx.rcs_state == NULL) 1740 continue; 1741 1742 seq_puts(m, "HW context "); 1743 describe_ctx(m, ctx); 1744 for_each_ring(ring, dev_priv, i) { 1745 if (ring->default_context == ctx) 1746 seq_printf(m, "(default context %s) ", 1747 ring->name); 1748 } 1749 1750 if (i915.enable_execlists) { 1751 seq_putc(m, '\n'); 1752 for_each_ring(ring, dev_priv, i) { 1753 struct drm_i915_gem_object *ctx_obj = 1754 ctx->engine[i].state; 1755 struct intel_ringbuffer *ringbuf = 1756 ctx->engine[i].ringbuf; 1757 1758 seq_printf(m, "%s: ", ring->name); 1759 if (ctx_obj) 1760 describe_obj(m, ctx_obj); 1761 if (ringbuf) 1762 describe_ctx_ringbuf(m, ringbuf); 1763 seq_putc(m, '\n'); 1764 } 1765 } else { 1766 describe_obj(m, ctx->legacy_hw_ctx.rcs_state); 1767 } 1768 1769 seq_putc(m, '\n'); 1770 } 1771 1772 mutex_unlock(&dev->struct_mutex); 1773 1774 return 0; 1775 } 1776 1777 static int i915_dump_lrc(struct seq_file *m, void *unused) 1778 { 1779 struct drm_info_node *node = (struct drm_info_node *) m->private; 1780 struct drm_device *dev = node->minor->dev; 1781 struct drm_i915_private *dev_priv = dev->dev_private; 1782 struct intel_engine_cs *ring; 1783 struct intel_context *ctx; 1784 int ret, i; 1785 1786 if (!i915.enable_execlists) { 1787 seq_printf(m, "Logical Ring Contexts are disabled\n"); 1788 return 0; 1789 } 1790 1791 ret = mutex_lock_interruptible(&dev->struct_mutex); 1792 if (ret) 1793 return ret; 1794 1795 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1796 for_each_ring(ring, dev_priv, i) { 1797 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; 1798 1799 if (ring->default_context == ctx) 1800 continue; 1801 1802 if (ctx_obj) { 1803 struct page *page = i915_gem_object_get_page(ctx_obj, 1); 1804 uint32_t *reg_state = kmap_atomic(page); 1805 int j; 1806 1807 seq_printf(m, "CONTEXT: %s %u\n", ring->name, 1808 intel_execlists_ctx_id(ctx_obj)); 1809 1810 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { 1811 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n", 1812 i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4), 1813 reg_state[j], reg_state[j + 1], 1814 reg_state[j + 2], reg_state[j + 3]); 1815 } 1816 kunmap_atomic(reg_state); 1817 1818 seq_putc(m, '\n'); 1819 } 1820 } 1821 } 1822 1823 mutex_unlock(&dev->struct_mutex); 1824 1825 return 0; 1826 } 1827 1828 static int i915_execlists(struct seq_file *m, void *data) 1829 { 1830 struct drm_info_node *node = (struct drm_info_node *)m->private; 1831 struct drm_device *dev = node->minor->dev; 1832 struct drm_i915_private *dev_priv = dev->dev_private; 1833 struct intel_engine_cs *ring; 1834 u32 status_pointer; 1835 u8 read_pointer; 1836 u8 write_pointer; 1837 u32 status; 1838 u32 ctx_id; 1839 struct list_head *cursor; 1840 int ring_id, i; 1841 int ret; 1842 1843 if (!i915.enable_execlists) { 1844 seq_puts(m, "Logical Ring Contexts are disabled\n"); 1845 return 0; 1846 } 1847 1848 ret = mutex_lock_interruptible(&dev->struct_mutex); 1849 if (ret) 1850 return ret; 1851 1852 for_each_ring(ring, dev_priv, ring_id) { 1853 struct intel_ctx_submit_request *head_req = NULL; 1854 int count = 0; 1855 unsigned long flags; 1856 1857 seq_printf(m, "%s\n", ring->name); 1858 1859 status = I915_READ(RING_EXECLIST_STATUS(ring)); 1860 ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4); 1861 seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n", 1862 status, ctx_id); 1863 1864 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); 1865 seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer); 1866 1867 read_pointer = ring->next_context_status_buffer; 1868 write_pointer = status_pointer & 0x07; 1869 if (read_pointer > write_pointer) 1870 write_pointer += 6; 1871 seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n", 1872 read_pointer, write_pointer); 1873 1874 for (i = 0; i < 6; i++) { 1875 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i); 1876 ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4); 1877 1878 seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n", 1879 i, status, ctx_id); 1880 } 1881 1882 spin_lock_irqsave(&ring->execlist_lock, flags); 1883 list_for_each(cursor, &ring->execlist_queue) 1884 count++; 1885 head_req = list_first_entry_or_null(&ring->execlist_queue, 1886 struct intel_ctx_submit_request, execlist_link); 1887 spin_unlock_irqrestore(&ring->execlist_lock, flags); 1888 1889 seq_printf(m, "\t%d requests in queue\n", count); 1890 if (head_req) { 1891 struct drm_i915_gem_object *ctx_obj; 1892 1893 ctx_obj = head_req->ctx->engine[ring_id].state; 1894 seq_printf(m, "\tHead request id: %u\n", 1895 intel_execlists_ctx_id(ctx_obj)); 1896 seq_printf(m, "\tHead request tail: %u\n", 1897 head_req->tail); 1898 } 1899 1900 seq_putc(m, '\n'); 1901 } 1902 1903 mutex_unlock(&dev->struct_mutex); 1904 1905 return 0; 1906 } 1907 1908 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1909 { 1910 struct drm_info_node *node = m->private; 1911 struct drm_device *dev = node->minor->dev; 1912 struct drm_i915_private *dev_priv = dev->dev_private; 1913 unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0; 1914 1915 spin_lock_irq(&dev_priv->uncore.lock); 1916 if (IS_VALLEYVIEW(dev)) { 1917 fw_rendercount = dev_priv->uncore.fw_rendercount; 1918 fw_mediacount = dev_priv->uncore.fw_mediacount; 1919 } else 1920 forcewake_count = dev_priv->uncore.forcewake_count; 1921 spin_unlock_irq(&dev_priv->uncore.lock); 1922 1923 if (IS_VALLEYVIEW(dev)) { 1924 seq_printf(m, "fw_rendercount = %u\n", fw_rendercount); 1925 seq_printf(m, "fw_mediacount = %u\n", fw_mediacount); 1926 } else 1927 seq_printf(m, "forcewake count = %u\n", forcewake_count); 1928 1929 return 0; 1930 } 1931 1932 static const char *swizzle_string(unsigned swizzle) 1933 { 1934 switch (swizzle) { 1935 case I915_BIT_6_SWIZZLE_NONE: 1936 return "none"; 1937 case I915_BIT_6_SWIZZLE_9: 1938 return "bit9"; 1939 case I915_BIT_6_SWIZZLE_9_10: 1940 return "bit9/bit10"; 1941 case I915_BIT_6_SWIZZLE_9_11: 1942 return "bit9/bit11"; 1943 case I915_BIT_6_SWIZZLE_9_10_11: 1944 return "bit9/bit10/bit11"; 1945 case I915_BIT_6_SWIZZLE_9_17: 1946 return "bit9/bit17"; 1947 case I915_BIT_6_SWIZZLE_9_10_17: 1948 return "bit9/bit10/bit17"; 1949 case I915_BIT_6_SWIZZLE_UNKNOWN: 1950 return "unknown"; 1951 } 1952 1953 return "bug"; 1954 } 1955 1956 static int i915_swizzle_info(struct seq_file *m, void *data) 1957 { 1958 struct drm_info_node *node = m->private; 1959 struct drm_device *dev = node->minor->dev; 1960 struct drm_i915_private *dev_priv = dev->dev_private; 1961 int ret; 1962 1963 ret = mutex_lock_interruptible(&dev->struct_mutex); 1964 if (ret) 1965 return ret; 1966 intel_runtime_pm_get(dev_priv); 1967 1968 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1969 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1970 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1971 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1972 1973 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1974 seq_printf(m, "DDC = 0x%08x\n", 1975 I915_READ(DCC)); 1976 seq_printf(m, "C0DRB3 = 0x%04x\n", 1977 I915_READ16(C0DRB3)); 1978 seq_printf(m, "C1DRB3 = 0x%04x\n", 1979 I915_READ16(C1DRB3)); 1980 } else if (INTEL_INFO(dev)->gen >= 6) { 1981 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1982 I915_READ(MAD_DIMM_C0)); 1983 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1984 I915_READ(MAD_DIMM_C1)); 1985 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1986 I915_READ(MAD_DIMM_C2)); 1987 seq_printf(m, "TILECTL = 0x%08x\n", 1988 I915_READ(TILECTL)); 1989 if (IS_GEN8(dev)) 1990 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 1991 I915_READ(GAMTARBMODE)); 1992 else 1993 seq_printf(m, "ARB_MODE = 0x%08x\n", 1994 I915_READ(ARB_MODE)); 1995 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1996 I915_READ(DISP_ARB_CTL)); 1997 } 1998 intel_runtime_pm_put(dev_priv); 1999 mutex_unlock(&dev->struct_mutex); 2000 2001 return 0; 2002 } 2003 2004 static int per_file_ctx(int id, void *ptr, void *data) 2005 { 2006 struct intel_context *ctx = ptr; 2007 struct seq_file *m = data; 2008 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2009 2010 if (!ppgtt) { 2011 seq_printf(m, " no ppgtt for context %d\n", 2012 ctx->user_handle); 2013 return 0; 2014 } 2015 2016 if (i915_gem_context_is_default(ctx)) 2017 seq_puts(m, " default context:\n"); 2018 else 2019 seq_printf(m, " context %d:\n", ctx->user_handle); 2020 ppgtt->debug_dump(ppgtt, m); 2021 2022 return 0; 2023 } 2024 2025 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2026 { 2027 struct drm_i915_private *dev_priv = dev->dev_private; 2028 struct intel_engine_cs *ring; 2029 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2030 int unused, i; 2031 2032 if (!ppgtt) 2033 return; 2034 2035 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages); 2036 seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries); 2037 for_each_ring(ring, dev_priv, unused) { 2038 seq_printf(m, "%s\n", ring->name); 2039 for (i = 0; i < 4; i++) { 2040 u32 offset = 0x270 + i * 8; 2041 u64 pdp = I915_READ(ring->mmio_base + offset + 4); 2042 pdp <<= 32; 2043 pdp |= I915_READ(ring->mmio_base + offset); 2044 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 2045 } 2046 } 2047 } 2048 2049 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2050 { 2051 struct drm_i915_private *dev_priv = dev->dev_private; 2052 struct intel_engine_cs *ring; 2053 struct drm_file *file; 2054 int i; 2055 2056 if (INTEL_INFO(dev)->gen == 6) 2057 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2058 2059 for_each_ring(ring, dev_priv, i) { 2060 seq_printf(m, "%s\n", ring->name); 2061 if (INTEL_INFO(dev)->gen == 7) 2062 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 2063 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 2064 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 2065 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 2066 } 2067 if (dev_priv->mm.aliasing_ppgtt) { 2068 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2069 2070 seq_puts(m, "aliasing PPGTT:\n"); 2071 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 2072 2073 ppgtt->debug_dump(ppgtt, m); 2074 } 2075 2076 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2077 struct drm_i915_file_private *file_priv = file->driver_priv; 2078 2079 seq_printf(m, "proc: %s\n", 2080 get_pid_task(file->pid, PIDTYPE_PID)->comm); 2081 idr_for_each(&file_priv->context_idr, per_file_ctx, m); 2082 } 2083 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 2084 } 2085 2086 static int i915_ppgtt_info(struct seq_file *m, void *data) 2087 { 2088 struct drm_info_node *node = m->private; 2089 struct drm_device *dev = node->minor->dev; 2090 struct drm_i915_private *dev_priv = dev->dev_private; 2091 2092 int ret = mutex_lock_interruptible(&dev->struct_mutex); 2093 if (ret) 2094 return ret; 2095 intel_runtime_pm_get(dev_priv); 2096 2097 if (INTEL_INFO(dev)->gen >= 8) 2098 gen8_ppgtt_info(m, dev); 2099 else if (INTEL_INFO(dev)->gen >= 6) 2100 gen6_ppgtt_info(m, dev); 2101 2102 intel_runtime_pm_put(dev_priv); 2103 mutex_unlock(&dev->struct_mutex); 2104 2105 return 0; 2106 } 2107 2108 static int i915_llc(struct seq_file *m, void *data) 2109 { 2110 struct drm_info_node *node = m->private; 2111 struct drm_device *dev = node->minor->dev; 2112 struct drm_i915_private *dev_priv = dev->dev_private; 2113 2114 /* Size calculation for LLC is a bit of a pain. Ignore for now. */ 2115 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); 2116 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size); 2117 2118 return 0; 2119 } 2120 2121 static int i915_edp_psr_status(struct seq_file *m, void *data) 2122 { 2123 struct drm_info_node *node = m->private; 2124 struct drm_device *dev = node->minor->dev; 2125 struct drm_i915_private *dev_priv = dev->dev_private; 2126 u32 psrperf = 0; 2127 bool enabled = false; 2128 2129 intel_runtime_pm_get(dev_priv); 2130 2131 mutex_lock(&dev_priv->psr.lock); 2132 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 2133 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 2134 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); 2135 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); 2136 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", 2137 dev_priv->psr.busy_frontbuffer_bits); 2138 seq_printf(m, "Re-enable work scheduled: %s\n", 2139 yesno(work_busy(&dev_priv->psr.work.work))); 2140 2141 enabled = HAS_PSR(dev) && 2142 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; 2143 seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled)); 2144 2145 if (HAS_PSR(dev)) 2146 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & 2147 EDP_PSR_PERF_CNT_MASK; 2148 seq_printf(m, "Performance_Counter: %u\n", psrperf); 2149 mutex_unlock(&dev_priv->psr.lock); 2150 2151 intel_runtime_pm_put(dev_priv); 2152 return 0; 2153 } 2154 2155 static int i915_sink_crc(struct seq_file *m, void *data) 2156 { 2157 struct drm_info_node *node = m->private; 2158 struct drm_device *dev = node->minor->dev; 2159 struct intel_encoder *encoder; 2160 struct intel_connector *connector; 2161 struct intel_dp *intel_dp = NULL; 2162 int ret; 2163 u8 crc[6]; 2164 2165 drm_modeset_lock_all(dev); 2166 list_for_each_entry(connector, &dev->mode_config.connector_list, 2167 base.head) { 2168 2169 if (connector->base.dpms != DRM_MODE_DPMS_ON) 2170 continue; 2171 2172 if (!connector->base.encoder) 2173 continue; 2174 2175 encoder = to_intel_encoder(connector->base.encoder); 2176 if (encoder->type != INTEL_OUTPUT_EDP) 2177 continue; 2178 2179 intel_dp = enc_to_intel_dp(&encoder->base); 2180 2181 ret = intel_dp_sink_crc(intel_dp, crc); 2182 if (ret) 2183 goto out; 2184 2185 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 2186 crc[0], crc[1], crc[2], 2187 crc[3], crc[4], crc[5]); 2188 goto out; 2189 } 2190 ret = -ENODEV; 2191 out: 2192 drm_modeset_unlock_all(dev); 2193 return ret; 2194 } 2195 2196 static int i915_energy_uJ(struct seq_file *m, void *data) 2197 { 2198 struct drm_info_node *node = m->private; 2199 struct drm_device *dev = node->minor->dev; 2200 struct drm_i915_private *dev_priv = dev->dev_private; 2201 u64 power; 2202 u32 units; 2203 2204 if (INTEL_INFO(dev)->gen < 6) 2205 return -ENODEV; 2206 2207 intel_runtime_pm_get(dev_priv); 2208 2209 rdmsrl(MSR_RAPL_POWER_UNIT, power); 2210 power = (power & 0x1f00) >> 8; 2211 units = 1000000 / (1 << power); /* convert to uJ */ 2212 power = I915_READ(MCH_SECP_NRG_STTS); 2213 power *= units; 2214 2215 intel_runtime_pm_put(dev_priv); 2216 2217 seq_printf(m, "%llu", (long long unsigned)power); 2218 2219 return 0; 2220 } 2221 2222 static int i915_pc8_status(struct seq_file *m, void *unused) 2223 { 2224 struct drm_info_node *node = m->private; 2225 struct drm_device *dev = node->minor->dev; 2226 struct drm_i915_private *dev_priv = dev->dev_private; 2227 2228 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { 2229 seq_puts(m, "not supported\n"); 2230 return 0; 2231 } 2232 2233 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); 2234 seq_printf(m, "IRQs disabled: %s\n", 2235 yesno(!intel_irqs_enabled(dev_priv))); 2236 2237 return 0; 2238 } 2239 2240 static const char *power_domain_str(enum intel_display_power_domain domain) 2241 { 2242 switch (domain) { 2243 case POWER_DOMAIN_PIPE_A: 2244 return "PIPE_A"; 2245 case POWER_DOMAIN_PIPE_B: 2246 return "PIPE_B"; 2247 case POWER_DOMAIN_PIPE_C: 2248 return "PIPE_C"; 2249 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 2250 return "PIPE_A_PANEL_FITTER"; 2251 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 2252 return "PIPE_B_PANEL_FITTER"; 2253 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 2254 return "PIPE_C_PANEL_FITTER"; 2255 case POWER_DOMAIN_TRANSCODER_A: 2256 return "TRANSCODER_A"; 2257 case POWER_DOMAIN_TRANSCODER_B: 2258 return "TRANSCODER_B"; 2259 case POWER_DOMAIN_TRANSCODER_C: 2260 return "TRANSCODER_C"; 2261 case POWER_DOMAIN_TRANSCODER_EDP: 2262 return "TRANSCODER_EDP"; 2263 case POWER_DOMAIN_PORT_DDI_A_2_LANES: 2264 return "PORT_DDI_A_2_LANES"; 2265 case POWER_DOMAIN_PORT_DDI_A_4_LANES: 2266 return "PORT_DDI_A_4_LANES"; 2267 case POWER_DOMAIN_PORT_DDI_B_2_LANES: 2268 return "PORT_DDI_B_2_LANES"; 2269 case POWER_DOMAIN_PORT_DDI_B_4_LANES: 2270 return "PORT_DDI_B_4_LANES"; 2271 case POWER_DOMAIN_PORT_DDI_C_2_LANES: 2272 return "PORT_DDI_C_2_LANES"; 2273 case POWER_DOMAIN_PORT_DDI_C_4_LANES: 2274 return "PORT_DDI_C_4_LANES"; 2275 case POWER_DOMAIN_PORT_DDI_D_2_LANES: 2276 return "PORT_DDI_D_2_LANES"; 2277 case POWER_DOMAIN_PORT_DDI_D_4_LANES: 2278 return "PORT_DDI_D_4_LANES"; 2279 case POWER_DOMAIN_PORT_DSI: 2280 return "PORT_DSI"; 2281 case POWER_DOMAIN_PORT_CRT: 2282 return "PORT_CRT"; 2283 case POWER_DOMAIN_PORT_OTHER: 2284 return "PORT_OTHER"; 2285 case POWER_DOMAIN_VGA: 2286 return "VGA"; 2287 case POWER_DOMAIN_AUDIO: 2288 return "AUDIO"; 2289 case POWER_DOMAIN_PLLS: 2290 return "PLLS"; 2291 case POWER_DOMAIN_INIT: 2292 return "INIT"; 2293 default: 2294 WARN_ON(1); 2295 return "?"; 2296 } 2297 } 2298 2299 static int i915_power_domain_info(struct seq_file *m, void *unused) 2300 { 2301 struct drm_info_node *node = m->private; 2302 struct drm_device *dev = node->minor->dev; 2303 struct drm_i915_private *dev_priv = dev->dev_private; 2304 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2305 int i; 2306 2307 mutex_lock(&power_domains->lock); 2308 2309 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2310 for (i = 0; i < power_domains->power_well_count; i++) { 2311 struct i915_power_well *power_well; 2312 enum intel_display_power_domain power_domain; 2313 2314 power_well = &power_domains->power_wells[i]; 2315 seq_printf(m, "%-25s %d\n", power_well->name, 2316 power_well->count); 2317 2318 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM; 2319 power_domain++) { 2320 if (!(BIT(power_domain) & power_well->domains)) 2321 continue; 2322 2323 seq_printf(m, " %-23s %d\n", 2324 power_domain_str(power_domain), 2325 power_domains->domain_use_count[power_domain]); 2326 } 2327 } 2328 2329 mutex_unlock(&power_domains->lock); 2330 2331 return 0; 2332 } 2333 2334 static void intel_seq_print_mode(struct seq_file *m, int tabs, 2335 struct drm_display_mode *mode) 2336 { 2337 int i; 2338 2339 for (i = 0; i < tabs; i++) 2340 seq_putc(m, '\t'); 2341 2342 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2343 mode->base.id, mode->name, 2344 mode->vrefresh, mode->clock, 2345 mode->hdisplay, mode->hsync_start, 2346 mode->hsync_end, mode->htotal, 2347 mode->vdisplay, mode->vsync_start, 2348 mode->vsync_end, mode->vtotal, 2349 mode->type, mode->flags); 2350 } 2351 2352 static void intel_encoder_info(struct seq_file *m, 2353 struct intel_crtc *intel_crtc, 2354 struct intel_encoder *intel_encoder) 2355 { 2356 struct drm_info_node *node = m->private; 2357 struct drm_device *dev = node->minor->dev; 2358 struct drm_crtc *crtc = &intel_crtc->base; 2359 struct intel_connector *intel_connector; 2360 struct drm_encoder *encoder; 2361 2362 encoder = &intel_encoder->base; 2363 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2364 encoder->base.id, encoder->name); 2365 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2366 struct drm_connector *connector = &intel_connector->base; 2367 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2368 connector->base.id, 2369 connector->name, 2370 drm_get_connector_status_name(connector->status)); 2371 if (connector->status == connector_status_connected) { 2372 struct drm_display_mode *mode = &crtc->mode; 2373 seq_printf(m, ", mode:\n"); 2374 intel_seq_print_mode(m, 2, mode); 2375 } else { 2376 seq_putc(m, '\n'); 2377 } 2378 } 2379 } 2380 2381 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2382 { 2383 struct drm_info_node *node = m->private; 2384 struct drm_device *dev = node->minor->dev; 2385 struct drm_crtc *crtc = &intel_crtc->base; 2386 struct intel_encoder *intel_encoder; 2387 2388 if (crtc->primary->fb) 2389 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2390 crtc->primary->fb->base.id, crtc->x, crtc->y, 2391 crtc->primary->fb->width, crtc->primary->fb->height); 2392 else 2393 seq_puts(m, "\tprimary plane disabled\n"); 2394 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2395 intel_encoder_info(m, intel_crtc, intel_encoder); 2396 } 2397 2398 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 2399 { 2400 struct drm_display_mode *mode = panel->fixed_mode; 2401 2402 seq_printf(m, "\tfixed mode:\n"); 2403 intel_seq_print_mode(m, 2, mode); 2404 } 2405 2406 static void intel_dp_info(struct seq_file *m, 2407 struct intel_connector *intel_connector) 2408 { 2409 struct intel_encoder *intel_encoder = intel_connector->encoder; 2410 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2411 2412 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2413 seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" : 2414 "no"); 2415 if (intel_encoder->type == INTEL_OUTPUT_EDP) 2416 intel_panel_info(m, &intel_connector->panel); 2417 } 2418 2419 static void intel_hdmi_info(struct seq_file *m, 2420 struct intel_connector *intel_connector) 2421 { 2422 struct intel_encoder *intel_encoder = intel_connector->encoder; 2423 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 2424 2425 seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" : 2426 "no"); 2427 } 2428 2429 static void intel_lvds_info(struct seq_file *m, 2430 struct intel_connector *intel_connector) 2431 { 2432 intel_panel_info(m, &intel_connector->panel); 2433 } 2434 2435 static void intel_connector_info(struct seq_file *m, 2436 struct drm_connector *connector) 2437 { 2438 struct intel_connector *intel_connector = to_intel_connector(connector); 2439 struct intel_encoder *intel_encoder = intel_connector->encoder; 2440 struct drm_display_mode *mode; 2441 2442 seq_printf(m, "connector %d: type %s, status: %s\n", 2443 connector->base.id, connector->name, 2444 drm_get_connector_status_name(connector->status)); 2445 if (connector->status == connector_status_connected) { 2446 seq_printf(m, "\tname: %s\n", connector->display_info.name); 2447 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 2448 connector->display_info.width_mm, 2449 connector->display_info.height_mm); 2450 seq_printf(m, "\tsubpixel order: %s\n", 2451 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 2452 seq_printf(m, "\tCEA rev: %d\n", 2453 connector->display_info.cea_rev); 2454 } 2455 if (intel_encoder) { 2456 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2457 intel_encoder->type == INTEL_OUTPUT_EDP) 2458 intel_dp_info(m, intel_connector); 2459 else if (intel_encoder->type == INTEL_OUTPUT_HDMI) 2460 intel_hdmi_info(m, intel_connector); 2461 else if (intel_encoder->type == INTEL_OUTPUT_LVDS) 2462 intel_lvds_info(m, intel_connector); 2463 } 2464 2465 seq_printf(m, "\tmodes:\n"); 2466 list_for_each_entry(mode, &connector->modes, head) 2467 intel_seq_print_mode(m, 2, mode); 2468 } 2469 2470 static bool cursor_active(struct drm_device *dev, int pipe) 2471 { 2472 struct drm_i915_private *dev_priv = dev->dev_private; 2473 u32 state; 2474 2475 if (IS_845G(dev) || IS_I865G(dev)) 2476 state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 2477 else 2478 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 2479 2480 return state; 2481 } 2482 2483 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) 2484 { 2485 struct drm_i915_private *dev_priv = dev->dev_private; 2486 u32 pos; 2487 2488 pos = I915_READ(CURPOS(pipe)); 2489 2490 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK; 2491 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT)) 2492 *x = -*x; 2493 2494 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK; 2495 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT)) 2496 *y = -*y; 2497 2498 return cursor_active(dev, pipe); 2499 } 2500 2501 static int i915_display_info(struct seq_file *m, void *unused) 2502 { 2503 struct drm_info_node *node = m->private; 2504 struct drm_device *dev = node->minor->dev; 2505 struct drm_i915_private *dev_priv = dev->dev_private; 2506 struct intel_crtc *crtc; 2507 struct drm_connector *connector; 2508 2509 intel_runtime_pm_get(dev_priv); 2510 drm_modeset_lock_all(dev); 2511 seq_printf(m, "CRTC info\n"); 2512 seq_printf(m, "---------\n"); 2513 for_each_intel_crtc(dev, crtc) { 2514 bool active; 2515 int x, y; 2516 2517 seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n", 2518 crtc->base.base.id, pipe_name(crtc->pipe), 2519 yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h); 2520 if (crtc->active) { 2521 intel_crtc_info(m, crtc); 2522 2523 active = cursor_position(dev, crtc->pipe, &x, &y); 2524 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n", 2525 yesno(crtc->cursor_base), 2526 x, y, crtc->cursor_width, crtc->cursor_height, 2527 crtc->cursor_addr, yesno(active)); 2528 } 2529 2530 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 2531 yesno(!crtc->cpu_fifo_underrun_disabled), 2532 yesno(!crtc->pch_fifo_underrun_disabled)); 2533 } 2534 2535 seq_printf(m, "\n"); 2536 seq_printf(m, "Connector info\n"); 2537 seq_printf(m, "--------------\n"); 2538 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 2539 intel_connector_info(m, connector); 2540 } 2541 drm_modeset_unlock_all(dev); 2542 intel_runtime_pm_put(dev_priv); 2543 2544 return 0; 2545 } 2546 2547 static int i915_semaphore_status(struct seq_file *m, void *unused) 2548 { 2549 struct drm_info_node *node = (struct drm_info_node *) m->private; 2550 struct drm_device *dev = node->minor->dev; 2551 struct drm_i915_private *dev_priv = dev->dev_private; 2552 struct intel_engine_cs *ring; 2553 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 2554 int i, j, ret; 2555 2556 if (!i915_semaphore_is_enabled(dev)) { 2557 seq_puts(m, "Semaphores are disabled\n"); 2558 return 0; 2559 } 2560 2561 ret = mutex_lock_interruptible(&dev->struct_mutex); 2562 if (ret) 2563 return ret; 2564 intel_runtime_pm_get(dev_priv); 2565 2566 if (IS_BROADWELL(dev)) { 2567 struct page *page; 2568 uint64_t *seqno; 2569 2570 page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); 2571 2572 seqno = (uint64_t *)kmap_atomic(page); 2573 for_each_ring(ring, dev_priv, i) { 2574 uint64_t offset; 2575 2576 seq_printf(m, "%s\n", ring->name); 2577 2578 seq_puts(m, " Last signal:"); 2579 for (j = 0; j < num_rings; j++) { 2580 offset = i * I915_NUM_RINGS + j; 2581 seq_printf(m, "0x%08llx (0x%02llx) ", 2582 seqno[offset], offset * 8); 2583 } 2584 seq_putc(m, '\n'); 2585 2586 seq_puts(m, " Last wait: "); 2587 for (j = 0; j < num_rings; j++) { 2588 offset = i + (j * I915_NUM_RINGS); 2589 seq_printf(m, "0x%08llx (0x%02llx) ", 2590 seqno[offset], offset * 8); 2591 } 2592 seq_putc(m, '\n'); 2593 2594 } 2595 kunmap_atomic(seqno); 2596 } else { 2597 seq_puts(m, " Last signal:"); 2598 for_each_ring(ring, dev_priv, i) 2599 for (j = 0; j < num_rings; j++) 2600 seq_printf(m, "0x%08x\n", 2601 I915_READ(ring->semaphore.mbox.signal[j])); 2602 seq_putc(m, '\n'); 2603 } 2604 2605 seq_puts(m, "\nSync seqno:\n"); 2606 for_each_ring(ring, dev_priv, i) { 2607 for (j = 0; j < num_rings; j++) { 2608 seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]); 2609 } 2610 seq_putc(m, '\n'); 2611 } 2612 seq_putc(m, '\n'); 2613 2614 intel_runtime_pm_put(dev_priv); 2615 mutex_unlock(&dev->struct_mutex); 2616 return 0; 2617 } 2618 2619 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 2620 { 2621 struct drm_info_node *node = (struct drm_info_node *) m->private; 2622 struct drm_device *dev = node->minor->dev; 2623 struct drm_i915_private *dev_priv = dev->dev_private; 2624 int i; 2625 2626 drm_modeset_lock_all(dev); 2627 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 2628 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 2629 2630 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); 2631 seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount, 2632 pll->active, yesno(pll->on)); 2633 seq_printf(m, " tracked hardware state:\n"); 2634 seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll); 2635 seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md); 2636 seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0); 2637 seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1); 2638 seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll); 2639 } 2640 drm_modeset_unlock_all(dev); 2641 2642 return 0; 2643 } 2644 2645 static int i915_wa_registers(struct seq_file *m, void *unused) 2646 { 2647 int i; 2648 int ret; 2649 struct drm_info_node *node = (struct drm_info_node *) m->private; 2650 struct drm_device *dev = node->minor->dev; 2651 struct drm_i915_private *dev_priv = dev->dev_private; 2652 2653 ret = mutex_lock_interruptible(&dev->struct_mutex); 2654 if (ret) 2655 return ret; 2656 2657 intel_runtime_pm_get(dev_priv); 2658 2659 seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs); 2660 for (i = 0; i < dev_priv->num_wa_regs; ++i) { 2661 u32 addr, mask; 2662 2663 addr = dev_priv->intel_wa_regs[i].addr; 2664 mask = dev_priv->intel_wa_regs[i].mask; 2665 dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask; 2666 if (dev_priv->intel_wa_regs[i].addr) 2667 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", 2668 dev_priv->intel_wa_regs[i].addr, 2669 dev_priv->intel_wa_regs[i].value, 2670 dev_priv->intel_wa_regs[i].mask); 2671 } 2672 2673 intel_runtime_pm_put(dev_priv); 2674 mutex_unlock(&dev->struct_mutex); 2675 2676 return 0; 2677 } 2678 2679 struct pipe_crc_info { 2680 const char *name; 2681 struct drm_device *dev; 2682 enum pipe pipe; 2683 }; 2684 2685 static int i915_dp_mst_info(struct seq_file *m, void *unused) 2686 { 2687 struct drm_info_node *node = (struct drm_info_node *) m->private; 2688 struct drm_device *dev = node->minor->dev; 2689 struct drm_encoder *encoder; 2690 struct intel_encoder *intel_encoder; 2691 struct intel_digital_port *intel_dig_port; 2692 drm_modeset_lock_all(dev); 2693 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2694 intel_encoder = to_intel_encoder(encoder); 2695 if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT) 2696 continue; 2697 intel_dig_port = enc_to_dig_port(encoder); 2698 if (!intel_dig_port->dp.can_mst) 2699 continue; 2700 2701 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 2702 } 2703 drm_modeset_unlock_all(dev); 2704 return 0; 2705 } 2706 2707 static int i915_pipe_crc_open(struct inode *inode, struct file *filep) 2708 { 2709 struct pipe_crc_info *info = inode->i_private; 2710 struct drm_i915_private *dev_priv = info->dev->dev_private; 2711 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2712 2713 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes) 2714 return -ENODEV; 2715 2716 spin_lock_irq(&pipe_crc->lock); 2717 2718 if (pipe_crc->opened) { 2719 spin_unlock_irq(&pipe_crc->lock); 2720 return -EBUSY; /* already open */ 2721 } 2722 2723 pipe_crc->opened = true; 2724 filep->private_data = inode->i_private; 2725 2726 spin_unlock_irq(&pipe_crc->lock); 2727 2728 return 0; 2729 } 2730 2731 static int i915_pipe_crc_release(struct inode *inode, struct file *filep) 2732 { 2733 struct pipe_crc_info *info = inode->i_private; 2734 struct drm_i915_private *dev_priv = info->dev->dev_private; 2735 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2736 2737 spin_lock_irq(&pipe_crc->lock); 2738 pipe_crc->opened = false; 2739 spin_unlock_irq(&pipe_crc->lock); 2740 2741 return 0; 2742 } 2743 2744 /* (6 fields, 8 chars each, space separated (5) + '\n') */ 2745 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) 2746 /* account for \'0' */ 2747 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) 2748 2749 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) 2750 { 2751 assert_spin_locked(&pipe_crc->lock); 2752 return CIRC_CNT(pipe_crc->head, pipe_crc->tail, 2753 INTEL_PIPE_CRC_ENTRIES_NR); 2754 } 2755 2756 static ssize_t 2757 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, 2758 loff_t *pos) 2759 { 2760 struct pipe_crc_info *info = filep->private_data; 2761 struct drm_device *dev = info->dev; 2762 struct drm_i915_private *dev_priv = dev->dev_private; 2763 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2764 char buf[PIPE_CRC_BUFFER_LEN]; 2765 int head, tail, n_entries, n; 2766 ssize_t bytes_read; 2767 2768 /* 2769 * Don't allow user space to provide buffers not big enough to hold 2770 * a line of data. 2771 */ 2772 if (count < PIPE_CRC_LINE_LEN) 2773 return -EINVAL; 2774 2775 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) 2776 return 0; 2777 2778 /* nothing to read */ 2779 spin_lock_irq(&pipe_crc->lock); 2780 while (pipe_crc_data_count(pipe_crc) == 0) { 2781 int ret; 2782 2783 if (filep->f_flags & O_NONBLOCK) { 2784 spin_unlock_irq(&pipe_crc->lock); 2785 return -EAGAIN; 2786 } 2787 2788 ret = wait_event_interruptible_lock_irq(pipe_crc->wq, 2789 pipe_crc_data_count(pipe_crc), pipe_crc->lock); 2790 if (ret) { 2791 spin_unlock_irq(&pipe_crc->lock); 2792 return ret; 2793 } 2794 } 2795 2796 /* We now have one or more entries to read */ 2797 head = pipe_crc->head; 2798 tail = pipe_crc->tail; 2799 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR), 2800 count / PIPE_CRC_LINE_LEN); 2801 spin_unlock_irq(&pipe_crc->lock); 2802 2803 bytes_read = 0; 2804 n = 0; 2805 do { 2806 struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail]; 2807 int ret; 2808 2809 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, 2810 "%8u %8x %8x %8x %8x %8x\n", 2811 entry->frame, entry->crc[0], 2812 entry->crc[1], entry->crc[2], 2813 entry->crc[3], entry->crc[4]); 2814 2815 ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN, 2816 buf, PIPE_CRC_LINE_LEN); 2817 if (ret == PIPE_CRC_LINE_LEN) 2818 return -EFAULT; 2819 2820 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); 2821 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 2822 n++; 2823 } while (--n_entries); 2824 2825 spin_lock_irq(&pipe_crc->lock); 2826 pipe_crc->tail = tail; 2827 spin_unlock_irq(&pipe_crc->lock); 2828 2829 return bytes_read; 2830 } 2831 2832 static const struct file_operations i915_pipe_crc_fops = { 2833 .owner = THIS_MODULE, 2834 .open = i915_pipe_crc_open, 2835 .read = i915_pipe_crc_read, 2836 .release = i915_pipe_crc_release, 2837 }; 2838 2839 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { 2840 { 2841 .name = "i915_pipe_A_crc", 2842 .pipe = PIPE_A, 2843 }, 2844 { 2845 .name = "i915_pipe_B_crc", 2846 .pipe = PIPE_B, 2847 }, 2848 { 2849 .name = "i915_pipe_C_crc", 2850 .pipe = PIPE_C, 2851 }, 2852 }; 2853 2854 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, 2855 enum pipe pipe) 2856 { 2857 struct drm_device *dev = minor->dev; 2858 struct dentry *ent; 2859 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; 2860 2861 info->dev = dev; 2862 ent = debugfs_create_file(info->name, S_IRUGO, root, info, 2863 &i915_pipe_crc_fops); 2864 if (!ent) 2865 return -ENOMEM; 2866 2867 return drm_add_fake_info_node(minor, ent, info); 2868 } 2869 2870 static const char * const pipe_crc_sources[] = { 2871 "none", 2872 "plane1", 2873 "plane2", 2874 "pf", 2875 "pipe", 2876 "TV", 2877 "DP-B", 2878 "DP-C", 2879 "DP-D", 2880 "auto", 2881 }; 2882 2883 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) 2884 { 2885 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); 2886 return pipe_crc_sources[source]; 2887 } 2888 2889 static int display_crc_ctl_show(struct seq_file *m, void *data) 2890 { 2891 struct drm_device *dev = m->private; 2892 struct drm_i915_private *dev_priv = dev->dev_private; 2893 int i; 2894 2895 for (i = 0; i < I915_MAX_PIPES; i++) 2896 seq_printf(m, "%c %s\n", pipe_name(i), 2897 pipe_crc_source_name(dev_priv->pipe_crc[i].source)); 2898 2899 return 0; 2900 } 2901 2902 static int display_crc_ctl_open(struct inode *inode, struct file *file) 2903 { 2904 struct drm_device *dev = inode->i_private; 2905 2906 return single_open(file, display_crc_ctl_show, dev); 2907 } 2908 2909 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2910 uint32_t *val) 2911 { 2912 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2913 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2914 2915 switch (*source) { 2916 case INTEL_PIPE_CRC_SOURCE_PIPE: 2917 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; 2918 break; 2919 case INTEL_PIPE_CRC_SOURCE_NONE: 2920 *val = 0; 2921 break; 2922 default: 2923 return -EINVAL; 2924 } 2925 2926 return 0; 2927 } 2928 2929 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe, 2930 enum intel_pipe_crc_source *source) 2931 { 2932 struct intel_encoder *encoder; 2933 struct intel_crtc *crtc; 2934 struct intel_digital_port *dig_port; 2935 int ret = 0; 2936 2937 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2938 2939 drm_modeset_lock_all(dev); 2940 for_each_intel_encoder(dev, encoder) { 2941 if (!encoder->base.crtc) 2942 continue; 2943 2944 crtc = to_intel_crtc(encoder->base.crtc); 2945 2946 if (crtc->pipe != pipe) 2947 continue; 2948 2949 switch (encoder->type) { 2950 case INTEL_OUTPUT_TVOUT: 2951 *source = INTEL_PIPE_CRC_SOURCE_TV; 2952 break; 2953 case INTEL_OUTPUT_DISPLAYPORT: 2954 case INTEL_OUTPUT_EDP: 2955 dig_port = enc_to_dig_port(&encoder->base); 2956 switch (dig_port->port) { 2957 case PORT_B: 2958 *source = INTEL_PIPE_CRC_SOURCE_DP_B; 2959 break; 2960 case PORT_C: 2961 *source = INTEL_PIPE_CRC_SOURCE_DP_C; 2962 break; 2963 case PORT_D: 2964 *source = INTEL_PIPE_CRC_SOURCE_DP_D; 2965 break; 2966 default: 2967 WARN(1, "nonexisting DP port %c\n", 2968 port_name(dig_port->port)); 2969 break; 2970 } 2971 break; 2972 } 2973 } 2974 drm_modeset_unlock_all(dev); 2975 2976 return ret; 2977 } 2978 2979 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, 2980 enum pipe pipe, 2981 enum intel_pipe_crc_source *source, 2982 uint32_t *val) 2983 { 2984 struct drm_i915_private *dev_priv = dev->dev_private; 2985 bool need_stable_symbols = false; 2986 2987 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 2988 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 2989 if (ret) 2990 return ret; 2991 } 2992 2993 switch (*source) { 2994 case INTEL_PIPE_CRC_SOURCE_PIPE: 2995 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; 2996 break; 2997 case INTEL_PIPE_CRC_SOURCE_DP_B: 2998 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; 2999 need_stable_symbols = true; 3000 break; 3001 case INTEL_PIPE_CRC_SOURCE_DP_C: 3002 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; 3003 need_stable_symbols = true; 3004 break; 3005 case INTEL_PIPE_CRC_SOURCE_NONE: 3006 *val = 0; 3007 break; 3008 default: 3009 return -EINVAL; 3010 } 3011 3012 /* 3013 * When the pipe CRC tap point is after the transcoders we need 3014 * to tweak symbol-level features to produce a deterministic series of 3015 * symbols for a given frame. We need to reset those features only once 3016 * a frame (instead of every nth symbol): 3017 * - DC-balance: used to ensure a better clock recovery from the data 3018 * link (SDVO) 3019 * - DisplayPort scrambling: used for EMI reduction 3020 */ 3021 if (need_stable_symbols) { 3022 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3023 3024 tmp |= DC_BALANCE_RESET_VLV; 3025 if (pipe == PIPE_A) 3026 tmp |= PIPE_A_SCRAMBLE_RESET; 3027 else 3028 tmp |= PIPE_B_SCRAMBLE_RESET; 3029 3030 I915_WRITE(PORT_DFT2_G4X, tmp); 3031 } 3032 3033 return 0; 3034 } 3035 3036 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, 3037 enum pipe pipe, 3038 enum intel_pipe_crc_source *source, 3039 uint32_t *val) 3040 { 3041 struct drm_i915_private *dev_priv = dev->dev_private; 3042 bool need_stable_symbols = false; 3043 3044 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3045 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 3046 if (ret) 3047 return ret; 3048 } 3049 3050 switch (*source) { 3051 case INTEL_PIPE_CRC_SOURCE_PIPE: 3052 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; 3053 break; 3054 case INTEL_PIPE_CRC_SOURCE_TV: 3055 if (!SUPPORTS_TV(dev)) 3056 return -EINVAL; 3057 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; 3058 break; 3059 case INTEL_PIPE_CRC_SOURCE_DP_B: 3060 if (!IS_G4X(dev)) 3061 return -EINVAL; 3062 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; 3063 need_stable_symbols = true; 3064 break; 3065 case INTEL_PIPE_CRC_SOURCE_DP_C: 3066 if (!IS_G4X(dev)) 3067 return -EINVAL; 3068 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; 3069 need_stable_symbols = true; 3070 break; 3071 case INTEL_PIPE_CRC_SOURCE_DP_D: 3072 if (!IS_G4X(dev)) 3073 return -EINVAL; 3074 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; 3075 need_stable_symbols = true; 3076 break; 3077 case INTEL_PIPE_CRC_SOURCE_NONE: 3078 *val = 0; 3079 break; 3080 default: 3081 return -EINVAL; 3082 } 3083 3084 /* 3085 * When the pipe CRC tap point is after the transcoders we need 3086 * to tweak symbol-level features to produce a deterministic series of 3087 * symbols for a given frame. We need to reset those features only once 3088 * a frame (instead of every nth symbol): 3089 * - DC-balance: used to ensure a better clock recovery from the data 3090 * link (SDVO) 3091 * - DisplayPort scrambling: used for EMI reduction 3092 */ 3093 if (need_stable_symbols) { 3094 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3095 3096 WARN_ON(!IS_G4X(dev)); 3097 3098 I915_WRITE(PORT_DFT_I9XX, 3099 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); 3100 3101 if (pipe == PIPE_A) 3102 tmp |= PIPE_A_SCRAMBLE_RESET; 3103 else 3104 tmp |= PIPE_B_SCRAMBLE_RESET; 3105 3106 I915_WRITE(PORT_DFT2_G4X, tmp); 3107 } 3108 3109 return 0; 3110 } 3111 3112 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, 3113 enum pipe pipe) 3114 { 3115 struct drm_i915_private *dev_priv = dev->dev_private; 3116 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3117 3118 if (pipe == PIPE_A) 3119 tmp &= ~PIPE_A_SCRAMBLE_RESET; 3120 else 3121 tmp &= ~PIPE_B_SCRAMBLE_RESET; 3122 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) 3123 tmp &= ~DC_BALANCE_RESET_VLV; 3124 I915_WRITE(PORT_DFT2_G4X, tmp); 3125 3126 } 3127 3128 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, 3129 enum pipe pipe) 3130 { 3131 struct drm_i915_private *dev_priv = dev->dev_private; 3132 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3133 3134 if (pipe == PIPE_A) 3135 tmp &= ~PIPE_A_SCRAMBLE_RESET; 3136 else 3137 tmp &= ~PIPE_B_SCRAMBLE_RESET; 3138 I915_WRITE(PORT_DFT2_G4X, tmp); 3139 3140 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { 3141 I915_WRITE(PORT_DFT_I9XX, 3142 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); 3143 } 3144 } 3145 3146 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 3147 uint32_t *val) 3148 { 3149 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3150 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3151 3152 switch (*source) { 3153 case INTEL_PIPE_CRC_SOURCE_PLANE1: 3154 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; 3155 break; 3156 case INTEL_PIPE_CRC_SOURCE_PLANE2: 3157 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; 3158 break; 3159 case INTEL_PIPE_CRC_SOURCE_PIPE: 3160 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; 3161 break; 3162 case INTEL_PIPE_CRC_SOURCE_NONE: 3163 *val = 0; 3164 break; 3165 default: 3166 return -EINVAL; 3167 } 3168 3169 return 0; 3170 } 3171 3172 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev) 3173 { 3174 struct drm_i915_private *dev_priv = dev->dev_private; 3175 struct intel_crtc *crtc = 3176 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); 3177 3178 drm_modeset_lock_all(dev); 3179 /* 3180 * If we use the eDP transcoder we need to make sure that we don't 3181 * bypass the pfit, since otherwise the pipe CRC source won't work. Only 3182 * relevant on hsw with pipe A when using the always-on power well 3183 * routing. 3184 */ 3185 if (crtc->config.cpu_transcoder == TRANSCODER_EDP && 3186 !crtc->config.pch_pfit.enabled) { 3187 crtc->config.pch_pfit.force_thru = true; 3188 3189 intel_display_power_get(dev_priv, 3190 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A)); 3191 3192 dev_priv->display.crtc_disable(&crtc->base); 3193 dev_priv->display.crtc_enable(&crtc->base); 3194 } 3195 drm_modeset_unlock_all(dev); 3196 } 3197 3198 static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev) 3199 { 3200 struct drm_i915_private *dev_priv = dev->dev_private; 3201 struct intel_crtc *crtc = 3202 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); 3203 3204 drm_modeset_lock_all(dev); 3205 /* 3206 * If we use the eDP transcoder we need to make sure that we don't 3207 * bypass the pfit, since otherwise the pipe CRC source won't work. Only 3208 * relevant on hsw with pipe A when using the always-on power well 3209 * routing. 3210 */ 3211 if (crtc->config.pch_pfit.force_thru) { 3212 crtc->config.pch_pfit.force_thru = false; 3213 3214 dev_priv->display.crtc_disable(&crtc->base); 3215 dev_priv->display.crtc_enable(&crtc->base); 3216 3217 intel_display_power_put(dev_priv, 3218 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A)); 3219 } 3220 drm_modeset_unlock_all(dev); 3221 } 3222 3223 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev, 3224 enum pipe pipe, 3225 enum intel_pipe_crc_source *source, 3226 uint32_t *val) 3227 { 3228 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3229 *source = INTEL_PIPE_CRC_SOURCE_PF; 3230 3231 switch (*source) { 3232 case INTEL_PIPE_CRC_SOURCE_PLANE1: 3233 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; 3234 break; 3235 case INTEL_PIPE_CRC_SOURCE_PLANE2: 3236 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; 3237 break; 3238 case INTEL_PIPE_CRC_SOURCE_PF: 3239 if (IS_HASWELL(dev) && pipe == PIPE_A) 3240 hsw_trans_edp_pipe_A_crc_wa(dev); 3241 3242 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; 3243 break; 3244 case INTEL_PIPE_CRC_SOURCE_NONE: 3245 *val = 0; 3246 break; 3247 default: 3248 return -EINVAL; 3249 } 3250 3251 return 0; 3252 } 3253 3254 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, 3255 enum intel_pipe_crc_source source) 3256 { 3257 struct drm_i915_private *dev_priv = dev->dev_private; 3258 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 3259 u32 val = 0; /* shut up gcc */ 3260 int ret; 3261 3262 if (pipe_crc->source == source) 3263 return 0; 3264 3265 /* forbid changing the source without going back to 'none' */ 3266 if (pipe_crc->source && source) 3267 return -EINVAL; 3268 3269 if (IS_GEN2(dev)) 3270 ret = i8xx_pipe_crc_ctl_reg(&source, &val); 3271 else if (INTEL_INFO(dev)->gen < 5) 3272 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); 3273 else if (IS_VALLEYVIEW(dev)) 3274 ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val); 3275 else if (IS_GEN5(dev) || IS_GEN6(dev)) 3276 ret = ilk_pipe_crc_ctl_reg(&source, &val); 3277 else 3278 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val); 3279 3280 if (ret != 0) 3281 return ret; 3282 3283 /* none -> real source transition */ 3284 if (source) { 3285 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", 3286 pipe_name(pipe), pipe_crc_source_name(source)); 3287 3288 pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) * 3289 INTEL_PIPE_CRC_ENTRIES_NR, 3290 GFP_KERNEL); 3291 if (!pipe_crc->entries) 3292 return -ENOMEM; 3293 3294 spin_lock_irq(&pipe_crc->lock); 3295 pipe_crc->head = 0; 3296 pipe_crc->tail = 0; 3297 spin_unlock_irq(&pipe_crc->lock); 3298 } 3299 3300 pipe_crc->source = source; 3301 3302 I915_WRITE(PIPE_CRC_CTL(pipe), val); 3303 POSTING_READ(PIPE_CRC_CTL(pipe)); 3304 3305 /* real source -> none transition */ 3306 if (source == INTEL_PIPE_CRC_SOURCE_NONE) { 3307 struct intel_pipe_crc_entry *entries; 3308 struct intel_crtc *crtc = 3309 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 3310 3311 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", 3312 pipe_name(pipe)); 3313 3314 drm_modeset_lock(&crtc->base.mutex, NULL); 3315 if (crtc->active) 3316 intel_wait_for_vblank(dev, pipe); 3317 drm_modeset_unlock(&crtc->base.mutex); 3318 3319 spin_lock_irq(&pipe_crc->lock); 3320 entries = pipe_crc->entries; 3321 pipe_crc->entries = NULL; 3322 spin_unlock_irq(&pipe_crc->lock); 3323 3324 kfree(entries); 3325 3326 if (IS_G4X(dev)) 3327 g4x_undo_pipe_scramble_reset(dev, pipe); 3328 else if (IS_VALLEYVIEW(dev)) 3329 vlv_undo_pipe_scramble_reset(dev, pipe); 3330 else if (IS_HASWELL(dev) && pipe == PIPE_A) 3331 hsw_undo_trans_edp_pipe_A_crc_wa(dev); 3332 } 3333 3334 return 0; 3335 } 3336 3337 /* 3338 * Parse pipe CRC command strings: 3339 * command: wsp* object wsp+ name wsp+ source wsp* 3340 * object: 'pipe' 3341 * name: (A | B | C) 3342 * source: (none | plane1 | plane2 | pf) 3343 * wsp: (#0x20 | #0x9 | #0xA)+ 3344 * 3345 * eg.: 3346 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A 3347 * "pipe A none" -> Stop CRC 3348 */ 3349 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) 3350 { 3351 int n_words = 0; 3352 3353 while (*buf) { 3354 char *end; 3355 3356 /* skip leading white space */ 3357 buf = skip_spaces(buf); 3358 if (!*buf) 3359 break; /* end of buffer */ 3360 3361 /* find end of word */ 3362 for (end = buf; *end && !isspace(*end); end++) 3363 ; 3364 3365 if (n_words == max_words) { 3366 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", 3367 max_words); 3368 return -EINVAL; /* ran out of words[] before bytes */ 3369 } 3370 3371 if (*end) 3372 *end++ = '\0'; 3373 words[n_words++] = buf; 3374 buf = end; 3375 } 3376 3377 return n_words; 3378 } 3379 3380 enum intel_pipe_crc_object { 3381 PIPE_CRC_OBJECT_PIPE, 3382 }; 3383 3384 static const char * const pipe_crc_objects[] = { 3385 "pipe", 3386 }; 3387 3388 static int 3389 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) 3390 { 3391 int i; 3392 3393 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) 3394 if (!strcmp(buf, pipe_crc_objects[i])) { 3395 *o = i; 3396 return 0; 3397 } 3398 3399 return -EINVAL; 3400 } 3401 3402 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) 3403 { 3404 const char name = buf[0]; 3405 3406 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) 3407 return -EINVAL; 3408 3409 *pipe = name - 'A'; 3410 3411 return 0; 3412 } 3413 3414 static int 3415 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) 3416 { 3417 int i; 3418 3419 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) 3420 if (!strcmp(buf, pipe_crc_sources[i])) { 3421 *s = i; 3422 return 0; 3423 } 3424 3425 return -EINVAL; 3426 } 3427 3428 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len) 3429 { 3430 #define N_WORDS 3 3431 int n_words; 3432 char *words[N_WORDS]; 3433 enum pipe pipe; 3434 enum intel_pipe_crc_object object; 3435 enum intel_pipe_crc_source source; 3436 3437 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); 3438 if (n_words != N_WORDS) { 3439 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", 3440 N_WORDS); 3441 return -EINVAL; 3442 } 3443 3444 if (display_crc_ctl_parse_object(words[0], &object) < 0) { 3445 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); 3446 return -EINVAL; 3447 } 3448 3449 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { 3450 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); 3451 return -EINVAL; 3452 } 3453 3454 if (display_crc_ctl_parse_source(words[2], &source) < 0) { 3455 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); 3456 return -EINVAL; 3457 } 3458 3459 return pipe_crc_set_source(dev, pipe, source); 3460 } 3461 3462 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, 3463 size_t len, loff_t *offp) 3464 { 3465 struct seq_file *m = file->private_data; 3466 struct drm_device *dev = m->private; 3467 char *tmpbuf; 3468 int ret; 3469 3470 if (len == 0) 3471 return 0; 3472 3473 if (len > PAGE_SIZE - 1) { 3474 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", 3475 PAGE_SIZE); 3476 return -E2BIG; 3477 } 3478 3479 tmpbuf = kmalloc(len + 1, GFP_KERNEL); 3480 if (!tmpbuf) 3481 return -ENOMEM; 3482 3483 if (copy_from_user(tmpbuf, ubuf, len)) { 3484 ret = -EFAULT; 3485 goto out; 3486 } 3487 tmpbuf[len] = '\0'; 3488 3489 ret = display_crc_ctl_parse(dev, tmpbuf, len); 3490 3491 out: 3492 kfree(tmpbuf); 3493 if (ret < 0) 3494 return ret; 3495 3496 *offp += len; 3497 return len; 3498 } 3499 3500 static const struct file_operations i915_display_crc_ctl_fops = { 3501 .owner = THIS_MODULE, 3502 .open = display_crc_ctl_open, 3503 .read = seq_read, 3504 .llseek = seq_lseek, 3505 .release = single_release, 3506 .write = display_crc_ctl_write 3507 }; 3508 3509 static void wm_latency_show(struct seq_file *m, const uint16_t wm[5]) 3510 { 3511 struct drm_device *dev = m->private; 3512 int num_levels = ilk_wm_max_level(dev) + 1; 3513 int level; 3514 3515 drm_modeset_lock_all(dev); 3516 3517 for (level = 0; level < num_levels; level++) { 3518 unsigned int latency = wm[level]; 3519 3520 /* WM1+ latency values in 0.5us units */ 3521 if (level > 0) 3522 latency *= 5; 3523 3524 seq_printf(m, "WM%d %u (%u.%u usec)\n", 3525 level, wm[level], 3526 latency / 10, latency % 10); 3527 } 3528 3529 drm_modeset_unlock_all(dev); 3530 } 3531 3532 static int pri_wm_latency_show(struct seq_file *m, void *data) 3533 { 3534 struct drm_device *dev = m->private; 3535 3536 wm_latency_show(m, to_i915(dev)->wm.pri_latency); 3537 3538 return 0; 3539 } 3540 3541 static int spr_wm_latency_show(struct seq_file *m, void *data) 3542 { 3543 struct drm_device *dev = m->private; 3544 3545 wm_latency_show(m, to_i915(dev)->wm.spr_latency); 3546 3547 return 0; 3548 } 3549 3550 static int cur_wm_latency_show(struct seq_file *m, void *data) 3551 { 3552 struct drm_device *dev = m->private; 3553 3554 wm_latency_show(m, to_i915(dev)->wm.cur_latency); 3555 3556 return 0; 3557 } 3558 3559 static int pri_wm_latency_open(struct inode *inode, struct file *file) 3560 { 3561 struct drm_device *dev = inode->i_private; 3562 3563 if (HAS_GMCH_DISPLAY(dev)) 3564 return -ENODEV; 3565 3566 return single_open(file, pri_wm_latency_show, dev); 3567 } 3568 3569 static int spr_wm_latency_open(struct inode *inode, struct file *file) 3570 { 3571 struct drm_device *dev = inode->i_private; 3572 3573 if (HAS_GMCH_DISPLAY(dev)) 3574 return -ENODEV; 3575 3576 return single_open(file, spr_wm_latency_show, dev); 3577 } 3578 3579 static int cur_wm_latency_open(struct inode *inode, struct file *file) 3580 { 3581 struct drm_device *dev = inode->i_private; 3582 3583 if (HAS_GMCH_DISPLAY(dev)) 3584 return -ENODEV; 3585 3586 return single_open(file, cur_wm_latency_show, dev); 3587 } 3588 3589 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 3590 size_t len, loff_t *offp, uint16_t wm[5]) 3591 { 3592 struct seq_file *m = file->private_data; 3593 struct drm_device *dev = m->private; 3594 uint16_t new[5] = { 0 }; 3595 int num_levels = ilk_wm_max_level(dev) + 1; 3596 int level; 3597 int ret; 3598 char tmp[32]; 3599 3600 if (len >= sizeof(tmp)) 3601 return -EINVAL; 3602 3603 if (copy_from_user(tmp, ubuf, len)) 3604 return -EFAULT; 3605 3606 tmp[len] = '\0'; 3607 3608 ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]); 3609 if (ret != num_levels) 3610 return -EINVAL; 3611 3612 drm_modeset_lock_all(dev); 3613 3614 for (level = 0; level < num_levels; level++) 3615 wm[level] = new[level]; 3616 3617 drm_modeset_unlock_all(dev); 3618 3619 return len; 3620 } 3621 3622 3623 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 3624 size_t len, loff_t *offp) 3625 { 3626 struct seq_file *m = file->private_data; 3627 struct drm_device *dev = m->private; 3628 3629 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency); 3630 } 3631 3632 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 3633 size_t len, loff_t *offp) 3634 { 3635 struct seq_file *m = file->private_data; 3636 struct drm_device *dev = m->private; 3637 3638 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency); 3639 } 3640 3641 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 3642 size_t len, loff_t *offp) 3643 { 3644 struct seq_file *m = file->private_data; 3645 struct drm_device *dev = m->private; 3646 3647 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency); 3648 } 3649 3650 static const struct file_operations i915_pri_wm_latency_fops = { 3651 .owner = THIS_MODULE, 3652 .open = pri_wm_latency_open, 3653 .read = seq_read, 3654 .llseek = seq_lseek, 3655 .release = single_release, 3656 .write = pri_wm_latency_write 3657 }; 3658 3659 static const struct file_operations i915_spr_wm_latency_fops = { 3660 .owner = THIS_MODULE, 3661 .open = spr_wm_latency_open, 3662 .read = seq_read, 3663 .llseek = seq_lseek, 3664 .release = single_release, 3665 .write = spr_wm_latency_write 3666 }; 3667 3668 static const struct file_operations i915_cur_wm_latency_fops = { 3669 .owner = THIS_MODULE, 3670 .open = cur_wm_latency_open, 3671 .read = seq_read, 3672 .llseek = seq_lseek, 3673 .release = single_release, 3674 .write = cur_wm_latency_write 3675 }; 3676 3677 static int 3678 i915_wedged_get(void *data, u64 *val) 3679 { 3680 struct drm_device *dev = data; 3681 struct drm_i915_private *dev_priv = dev->dev_private; 3682 3683 *val = atomic_read(&dev_priv->gpu_error.reset_counter); 3684 3685 return 0; 3686 } 3687 3688 static int 3689 i915_wedged_set(void *data, u64 val) 3690 { 3691 struct drm_device *dev = data; 3692 struct drm_i915_private *dev_priv = dev->dev_private; 3693 3694 intel_runtime_pm_get(dev_priv); 3695 3696 i915_handle_error(dev, val, 3697 "Manually setting wedged to %llu", val); 3698 3699 intel_runtime_pm_put(dev_priv); 3700 3701 return 0; 3702 } 3703 3704 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 3705 i915_wedged_get, i915_wedged_set, 3706 "%llu\n"); 3707 3708 static int 3709 i915_ring_stop_get(void *data, u64 *val) 3710 { 3711 struct drm_device *dev = data; 3712 struct drm_i915_private *dev_priv = dev->dev_private; 3713 3714 *val = dev_priv->gpu_error.stop_rings; 3715 3716 return 0; 3717 } 3718 3719 static int 3720 i915_ring_stop_set(void *data, u64 val) 3721 { 3722 struct drm_device *dev = data; 3723 struct drm_i915_private *dev_priv = dev->dev_private; 3724 int ret; 3725 3726 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); 3727 3728 ret = mutex_lock_interruptible(&dev->struct_mutex); 3729 if (ret) 3730 return ret; 3731 3732 dev_priv->gpu_error.stop_rings = val; 3733 mutex_unlock(&dev->struct_mutex); 3734 3735 return 0; 3736 } 3737 3738 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, 3739 i915_ring_stop_get, i915_ring_stop_set, 3740 "0x%08llx\n"); 3741 3742 static int 3743 i915_ring_missed_irq_get(void *data, u64 *val) 3744 { 3745 struct drm_device *dev = data; 3746 struct drm_i915_private *dev_priv = dev->dev_private; 3747 3748 *val = dev_priv->gpu_error.missed_irq_rings; 3749 return 0; 3750 } 3751 3752 static int 3753 i915_ring_missed_irq_set(void *data, u64 val) 3754 { 3755 struct drm_device *dev = data; 3756 struct drm_i915_private *dev_priv = dev->dev_private; 3757 int ret; 3758 3759 /* Lock against concurrent debugfs callers */ 3760 ret = mutex_lock_interruptible(&dev->struct_mutex); 3761 if (ret) 3762 return ret; 3763 dev_priv->gpu_error.missed_irq_rings = val; 3764 mutex_unlock(&dev->struct_mutex); 3765 3766 return 0; 3767 } 3768 3769 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 3770 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 3771 "0x%08llx\n"); 3772 3773 static int 3774 i915_ring_test_irq_get(void *data, u64 *val) 3775 { 3776 struct drm_device *dev = data; 3777 struct drm_i915_private *dev_priv = dev->dev_private; 3778 3779 *val = dev_priv->gpu_error.test_irq_rings; 3780 3781 return 0; 3782 } 3783 3784 static int 3785 i915_ring_test_irq_set(void *data, u64 val) 3786 { 3787 struct drm_device *dev = data; 3788 struct drm_i915_private *dev_priv = dev->dev_private; 3789 int ret; 3790 3791 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 3792 3793 /* Lock against concurrent debugfs callers */ 3794 ret = mutex_lock_interruptible(&dev->struct_mutex); 3795 if (ret) 3796 return ret; 3797 3798 dev_priv->gpu_error.test_irq_rings = val; 3799 mutex_unlock(&dev->struct_mutex); 3800 3801 return 0; 3802 } 3803 3804 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 3805 i915_ring_test_irq_get, i915_ring_test_irq_set, 3806 "0x%08llx\n"); 3807 3808 #define DROP_UNBOUND 0x1 3809 #define DROP_BOUND 0x2 3810 #define DROP_RETIRE 0x4 3811 #define DROP_ACTIVE 0x8 3812 #define DROP_ALL (DROP_UNBOUND | \ 3813 DROP_BOUND | \ 3814 DROP_RETIRE | \ 3815 DROP_ACTIVE) 3816 static int 3817 i915_drop_caches_get(void *data, u64 *val) 3818 { 3819 *val = DROP_ALL; 3820 3821 return 0; 3822 } 3823 3824 static int 3825 i915_drop_caches_set(void *data, u64 val) 3826 { 3827 struct drm_device *dev = data; 3828 struct drm_i915_private *dev_priv = dev->dev_private; 3829 int ret; 3830 3831 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 3832 3833 /* No need to check and wait for gpu resets, only libdrm auto-restarts 3834 * on ioctls on -EAGAIN. */ 3835 ret = mutex_lock_interruptible(&dev->struct_mutex); 3836 if (ret) 3837 return ret; 3838 3839 if (val & DROP_ACTIVE) { 3840 ret = i915_gpu_idle(dev); 3841 if (ret) 3842 goto unlock; 3843 } 3844 3845 if (val & (DROP_RETIRE | DROP_ACTIVE)) 3846 i915_gem_retire_requests(dev); 3847 3848 if (val & DROP_BOUND) 3849 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); 3850 3851 if (val & DROP_UNBOUND) 3852 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND); 3853 3854 unlock: 3855 mutex_unlock(&dev->struct_mutex); 3856 3857 return ret; 3858 } 3859 3860 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 3861 i915_drop_caches_get, i915_drop_caches_set, 3862 "0x%08llx\n"); 3863 3864 static int 3865 i915_max_freq_get(void *data, u64 *val) 3866 { 3867 struct drm_device *dev = data; 3868 struct drm_i915_private *dev_priv = dev->dev_private; 3869 int ret; 3870 3871 if (INTEL_INFO(dev)->gen < 6) 3872 return -ENODEV; 3873 3874 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3875 3876 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3877 if (ret) 3878 return ret; 3879 3880 if (IS_VALLEYVIEW(dev)) 3881 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); 3882 else 3883 *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER; 3884 mutex_unlock(&dev_priv->rps.hw_lock); 3885 3886 return 0; 3887 } 3888 3889 static int 3890 i915_max_freq_set(void *data, u64 val) 3891 { 3892 struct drm_device *dev = data; 3893 struct drm_i915_private *dev_priv = dev->dev_private; 3894 u32 rp_state_cap, hw_max, hw_min; 3895 int ret; 3896 3897 if (INTEL_INFO(dev)->gen < 6) 3898 return -ENODEV; 3899 3900 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3901 3902 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 3903 3904 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3905 if (ret) 3906 return ret; 3907 3908 /* 3909 * Turbo will still be enabled, but won't go above the set value. 3910 */ 3911 if (IS_VALLEYVIEW(dev)) { 3912 val = vlv_freq_opcode(dev_priv, val); 3913 3914 hw_max = dev_priv->rps.max_freq; 3915 hw_min = dev_priv->rps.min_freq; 3916 } else { 3917 do_div(val, GT_FREQUENCY_MULTIPLIER); 3918 3919 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3920 hw_max = dev_priv->rps.max_freq; 3921 hw_min = (rp_state_cap >> 16) & 0xff; 3922 } 3923 3924 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { 3925 mutex_unlock(&dev_priv->rps.hw_lock); 3926 return -EINVAL; 3927 } 3928 3929 dev_priv->rps.max_freq_softlimit = val; 3930 3931 if (IS_VALLEYVIEW(dev)) 3932 valleyview_set_rps(dev, val); 3933 else 3934 gen6_set_rps(dev, val); 3935 3936 mutex_unlock(&dev_priv->rps.hw_lock); 3937 3938 return 0; 3939 } 3940 3941 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 3942 i915_max_freq_get, i915_max_freq_set, 3943 "%llu\n"); 3944 3945 static int 3946 i915_min_freq_get(void *data, u64 *val) 3947 { 3948 struct drm_device *dev = data; 3949 struct drm_i915_private *dev_priv = dev->dev_private; 3950 int ret; 3951 3952 if (INTEL_INFO(dev)->gen < 6) 3953 return -ENODEV; 3954 3955 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3956 3957 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3958 if (ret) 3959 return ret; 3960 3961 if (IS_VALLEYVIEW(dev)) 3962 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); 3963 else 3964 *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER; 3965 mutex_unlock(&dev_priv->rps.hw_lock); 3966 3967 return 0; 3968 } 3969 3970 static int 3971 i915_min_freq_set(void *data, u64 val) 3972 { 3973 struct drm_device *dev = data; 3974 struct drm_i915_private *dev_priv = dev->dev_private; 3975 u32 rp_state_cap, hw_max, hw_min; 3976 int ret; 3977 3978 if (INTEL_INFO(dev)->gen < 6) 3979 return -ENODEV; 3980 3981 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3982 3983 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 3984 3985 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3986 if (ret) 3987 return ret; 3988 3989 /* 3990 * Turbo will still be enabled, but won't go below the set value. 3991 */ 3992 if (IS_VALLEYVIEW(dev)) { 3993 val = vlv_freq_opcode(dev_priv, val); 3994 3995 hw_max = dev_priv->rps.max_freq; 3996 hw_min = dev_priv->rps.min_freq; 3997 } else { 3998 do_div(val, GT_FREQUENCY_MULTIPLIER); 3999 4000 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 4001 hw_max = dev_priv->rps.max_freq; 4002 hw_min = (rp_state_cap >> 16) & 0xff; 4003 } 4004 4005 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { 4006 mutex_unlock(&dev_priv->rps.hw_lock); 4007 return -EINVAL; 4008 } 4009 4010 dev_priv->rps.min_freq_softlimit = val; 4011 4012 if (IS_VALLEYVIEW(dev)) 4013 valleyview_set_rps(dev, val); 4014 else 4015 gen6_set_rps(dev, val); 4016 4017 mutex_unlock(&dev_priv->rps.hw_lock); 4018 4019 return 0; 4020 } 4021 4022 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 4023 i915_min_freq_get, i915_min_freq_set, 4024 "%llu\n"); 4025 4026 static int 4027 i915_cache_sharing_get(void *data, u64 *val) 4028 { 4029 struct drm_device *dev = data; 4030 struct drm_i915_private *dev_priv = dev->dev_private; 4031 u32 snpcr; 4032 int ret; 4033 4034 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 4035 return -ENODEV; 4036 4037 ret = mutex_lock_interruptible(&dev->struct_mutex); 4038 if (ret) 4039 return ret; 4040 intel_runtime_pm_get(dev_priv); 4041 4042 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4043 4044 intel_runtime_pm_put(dev_priv); 4045 mutex_unlock(&dev_priv->dev->struct_mutex); 4046 4047 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 4048 4049 return 0; 4050 } 4051 4052 static int 4053 i915_cache_sharing_set(void *data, u64 val) 4054 { 4055 struct drm_device *dev = data; 4056 struct drm_i915_private *dev_priv = dev->dev_private; 4057 u32 snpcr; 4058 4059 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 4060 return -ENODEV; 4061 4062 if (val > 3) 4063 return -EINVAL; 4064 4065 intel_runtime_pm_get(dev_priv); 4066 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 4067 4068 /* Update the cache sharing policy here as well */ 4069 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4070 snpcr &= ~GEN6_MBC_SNPCR_MASK; 4071 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 4072 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 4073 4074 intel_runtime_pm_put(dev_priv); 4075 return 0; 4076 } 4077 4078 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 4079 i915_cache_sharing_get, i915_cache_sharing_set, 4080 "%llu\n"); 4081 4082 static int i915_forcewake_open(struct inode *inode, struct file *file) 4083 { 4084 struct drm_device *dev = inode->i_private; 4085 struct drm_i915_private *dev_priv = dev->dev_private; 4086 4087 if (INTEL_INFO(dev)->gen < 6) 4088 return 0; 4089 4090 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 4091 4092 return 0; 4093 } 4094 4095 static int i915_forcewake_release(struct inode *inode, struct file *file) 4096 { 4097 struct drm_device *dev = inode->i_private; 4098 struct drm_i915_private *dev_priv = dev->dev_private; 4099 4100 if (INTEL_INFO(dev)->gen < 6) 4101 return 0; 4102 4103 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 4104 4105 return 0; 4106 } 4107 4108 static const struct file_operations i915_forcewake_fops = { 4109 .owner = THIS_MODULE, 4110 .open = i915_forcewake_open, 4111 .release = i915_forcewake_release, 4112 }; 4113 4114 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 4115 { 4116 struct drm_device *dev = minor->dev; 4117 struct dentry *ent; 4118 4119 ent = debugfs_create_file("i915_forcewake_user", 4120 S_IRUSR, 4121 root, dev, 4122 &i915_forcewake_fops); 4123 if (!ent) 4124 return -ENOMEM; 4125 4126 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 4127 } 4128 4129 static int i915_debugfs_create(struct dentry *root, 4130 struct drm_minor *minor, 4131 const char *name, 4132 const struct file_operations *fops) 4133 { 4134 struct drm_device *dev = minor->dev; 4135 struct dentry *ent; 4136 4137 ent = debugfs_create_file(name, 4138 S_IRUGO | S_IWUSR, 4139 root, dev, 4140 fops); 4141 if (!ent) 4142 return -ENOMEM; 4143 4144 return drm_add_fake_info_node(minor, ent, fops); 4145 } 4146 4147 static const struct drm_info_list i915_debugfs_list[] = { 4148 {"i915_capabilities", i915_capabilities, 0}, 4149 {"i915_gem_objects", i915_gem_object_info, 0}, 4150 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 4151 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 4152 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 4153 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 4154 {"i915_gem_stolen", i915_gem_stolen_list_info }, 4155 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 4156 {"i915_gem_request", i915_gem_request_info, 0}, 4157 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 4158 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 4159 {"i915_gem_interrupt", i915_interrupt_info, 0}, 4160 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 4161 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 4162 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 4163 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 4164 {"i915_frequency_info", i915_frequency_info, 0}, 4165 {"i915_drpc_info", i915_drpc_info, 0}, 4166 {"i915_emon_status", i915_emon_status, 0}, 4167 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 4168 {"i915_fbc_status", i915_fbc_status, 0}, 4169 {"i915_ips_status", i915_ips_status, 0}, 4170 {"i915_sr_status", i915_sr_status, 0}, 4171 {"i915_opregion", i915_opregion, 0}, 4172 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 4173 {"i915_context_status", i915_context_status, 0}, 4174 {"i915_dump_lrc", i915_dump_lrc, 0}, 4175 {"i915_execlists", i915_execlists, 0}, 4176 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 4177 {"i915_swizzle_info", i915_swizzle_info, 0}, 4178 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 4179 {"i915_llc", i915_llc, 0}, 4180 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 4181 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 4182 {"i915_energy_uJ", i915_energy_uJ, 0}, 4183 {"i915_pc8_status", i915_pc8_status, 0}, 4184 {"i915_power_domain_info", i915_power_domain_info, 0}, 4185 {"i915_display_info", i915_display_info, 0}, 4186 {"i915_semaphore_status", i915_semaphore_status, 0}, 4187 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 4188 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 4189 {"i915_wa_registers", i915_wa_registers, 0}, 4190 }; 4191 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 4192 4193 static const struct i915_debugfs_files { 4194 const char *name; 4195 const struct file_operations *fops; 4196 } i915_debugfs_files[] = { 4197 {"i915_wedged", &i915_wedged_fops}, 4198 {"i915_max_freq", &i915_max_freq_fops}, 4199 {"i915_min_freq", &i915_min_freq_fops}, 4200 {"i915_cache_sharing", &i915_cache_sharing_fops}, 4201 {"i915_ring_stop", &i915_ring_stop_fops}, 4202 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 4203 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 4204 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 4205 {"i915_error_state", &i915_error_state_fops}, 4206 {"i915_next_seqno", &i915_next_seqno_fops}, 4207 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 4208 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 4209 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 4210 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 4211 {"i915_fbc_false_color", &i915_fbc_fc_fops}, 4212 }; 4213 4214 void intel_display_crc_init(struct drm_device *dev) 4215 { 4216 struct drm_i915_private *dev_priv = dev->dev_private; 4217 enum pipe pipe; 4218 4219 for_each_pipe(dev_priv, pipe) { 4220 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 4221 4222 pipe_crc->opened = false; 4223 spin_lock_init(&pipe_crc->lock); 4224 init_waitqueue_head(&pipe_crc->wq); 4225 } 4226 } 4227 4228 int i915_debugfs_init(struct drm_minor *minor) 4229 { 4230 int ret, i; 4231 4232 ret = i915_forcewake_create(minor->debugfs_root, minor); 4233 if (ret) 4234 return ret; 4235 4236 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 4237 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); 4238 if (ret) 4239 return ret; 4240 } 4241 4242 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 4243 ret = i915_debugfs_create(minor->debugfs_root, minor, 4244 i915_debugfs_files[i].name, 4245 i915_debugfs_files[i].fops); 4246 if (ret) 4247 return ret; 4248 } 4249 4250 return drm_debugfs_create_files(i915_debugfs_list, 4251 I915_DEBUGFS_ENTRIES, 4252 minor->debugfs_root, minor); 4253 } 4254 4255 void i915_debugfs_cleanup(struct drm_minor *minor) 4256 { 4257 int i; 4258 4259 drm_debugfs_remove_files(i915_debugfs_list, 4260 I915_DEBUGFS_ENTRIES, minor); 4261 4262 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 4263 1, minor); 4264 4265 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 4266 struct drm_info_list *info_list = 4267 (struct drm_info_list *)&i915_pipe_crc_data[i]; 4268 4269 drm_debugfs_remove_files(info_list, 1, minor); 4270 } 4271 4272 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 4273 struct drm_info_list *info_list = 4274 (struct drm_info_list *) i915_debugfs_files[i].fops; 4275 4276 drm_debugfs_remove_files(info_list, 1, minor); 4277 } 4278 } 4279