1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/circ_buf.h> 31 #include <linux/ctype.h> 32 #include <linux/debugfs.h> 33 #include <linux/slab.h> 34 #include <linux/export.h> 35 #include <linux/list_sort.h> 36 #include <asm/msr-index.h> 37 #include <drm/drmP.h> 38 #include "intel_drv.h" 39 #include "intel_ringbuffer.h" 40 #include <drm/i915_drm.h> 41 #include "i915_drv.h" 42 43 #if defined(CONFIG_DEBUG_FS) 44 45 enum { 46 ACTIVE_LIST, 47 INACTIVE_LIST, 48 PINNED_LIST, 49 }; 50 51 static const char *yesno(int v) 52 { 53 return v ? "yes" : "no"; 54 } 55 56 /* As the drm_debugfs_init() routines are called before dev->dev_private is 57 * allocated we need to hook into the minor for release. */ 58 static int 59 drm_add_fake_info_node(struct drm_minor *minor, 60 struct dentry *ent, 61 const void *key) 62 { 63 struct drm_info_node *node; 64 65 node = kmalloc(sizeof(*node), GFP_KERNEL); 66 if (node == NULL) { 67 debugfs_remove(ent); 68 return -ENOMEM; 69 } 70 71 node->minor = minor; 72 node->dent = ent; 73 node->info_ent = (void *) key; 74 75 mutex_lock(&minor->debugfs_lock); 76 list_add(&node->list, &minor->debugfs_list); 77 mutex_unlock(&minor->debugfs_lock); 78 79 return 0; 80 } 81 82 static int i915_capabilities(struct seq_file *m, void *data) 83 { 84 struct drm_info_node *node = (struct drm_info_node *) m->private; 85 struct drm_device *dev = node->minor->dev; 86 const struct intel_device_info *info = INTEL_INFO(dev); 87 88 seq_printf(m, "gen: %d\n", info->gen); 89 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 90 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 91 #define SEP_SEMICOLON ; 92 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON); 93 #undef PRINT_FLAG 94 #undef SEP_SEMICOLON 95 96 return 0; 97 } 98 99 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 100 { 101 if (obj->user_pin_count > 0) 102 return "P"; 103 else if (obj->pin_count > 0) 104 return "p"; 105 else 106 return " "; 107 } 108 109 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 110 { 111 switch (obj->tiling_mode) { 112 default: 113 case I915_TILING_NONE: return " "; 114 case I915_TILING_X: return "X"; 115 case I915_TILING_Y: return "Y"; 116 } 117 } 118 119 static inline const char *get_global_flag(struct drm_i915_gem_object *obj) 120 { 121 return obj->has_global_gtt_mapping ? "g" : " "; 122 } 123 124 static void 125 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 126 { 127 struct i915_vma *vma; 128 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s", 129 &obj->base, 130 get_pin_flag(obj), 131 get_tiling_flag(obj), 132 get_global_flag(obj), 133 obj->base.size / 1024, 134 obj->base.read_domains, 135 obj->base.write_domain, 136 obj->last_read_seqno, 137 obj->last_write_seqno, 138 obj->last_fenced_seqno, 139 i915_cache_level_str(obj->cache_level), 140 obj->dirty ? " dirty" : "", 141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 142 if (obj->base.name) 143 seq_printf(m, " (name: %d)", obj->base.name); 144 if (obj->pin_count) 145 seq_printf(m, " (pinned x %d)", obj->pin_count); 146 if (obj->pin_display) 147 seq_printf(m, " (display)"); 148 if (obj->fence_reg != I915_FENCE_REG_NONE) 149 seq_printf(m, " (fence: %d)", obj->fence_reg); 150 list_for_each_entry(vma, &obj->vma_list, vma_link) { 151 if (!i915_is_ggtt(vma->vm)) 152 seq_puts(m, " (pp"); 153 else 154 seq_puts(m, " (g"); 155 seq_printf(m, "gtt offset: %08lx, size: %08lx)", 156 vma->node.start, vma->node.size); 157 } 158 if (obj->stolen) 159 seq_printf(m, " (stolen: %08lx)", obj->stolen->start); 160 if (obj->pin_mappable || obj->fault_mappable) { 161 char s[3], *t = s; 162 if (obj->pin_mappable) 163 *t++ = 'p'; 164 if (obj->fault_mappable) 165 *t++ = 'f'; 166 *t = '\0'; 167 seq_printf(m, " (%s mappable)", s); 168 } 169 if (obj->ring != NULL) 170 seq_printf(m, " (%s)", obj->ring->name); 171 } 172 173 static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx) 174 { 175 seq_putc(m, ctx->is_initialized ? 'I' : 'i'); 176 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 177 seq_putc(m, ' '); 178 } 179 180 static int i915_gem_object_list_info(struct seq_file *m, void *data) 181 { 182 struct drm_info_node *node = (struct drm_info_node *) m->private; 183 uintptr_t list = (uintptr_t) node->info_ent->data; 184 struct list_head *head; 185 struct drm_device *dev = node->minor->dev; 186 struct drm_i915_private *dev_priv = dev->dev_private; 187 struct i915_address_space *vm = &dev_priv->gtt.base; 188 struct i915_vma *vma; 189 size_t total_obj_size, total_gtt_size; 190 int count, ret; 191 192 ret = mutex_lock_interruptible(&dev->struct_mutex); 193 if (ret) 194 return ret; 195 196 /* FIXME: the user of this interface might want more than just GGTT */ 197 switch (list) { 198 case ACTIVE_LIST: 199 seq_puts(m, "Active:\n"); 200 head = &vm->active_list; 201 break; 202 case INACTIVE_LIST: 203 seq_puts(m, "Inactive:\n"); 204 head = &vm->inactive_list; 205 break; 206 default: 207 mutex_unlock(&dev->struct_mutex); 208 return -EINVAL; 209 } 210 211 total_obj_size = total_gtt_size = count = 0; 212 list_for_each_entry(vma, head, mm_list) { 213 seq_printf(m, " "); 214 describe_obj(m, vma->obj); 215 seq_printf(m, "\n"); 216 total_obj_size += vma->obj->base.size; 217 total_gtt_size += vma->node.size; 218 count++; 219 } 220 mutex_unlock(&dev->struct_mutex); 221 222 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 223 count, total_obj_size, total_gtt_size); 224 return 0; 225 } 226 227 static int obj_rank_by_stolen(void *priv, 228 struct list_head *A, struct list_head *B) 229 { 230 struct drm_i915_gem_object *a = 231 container_of(A, struct drm_i915_gem_object, obj_exec_link); 232 struct drm_i915_gem_object *b = 233 container_of(B, struct drm_i915_gem_object, obj_exec_link); 234 235 return a->stolen->start - b->stolen->start; 236 } 237 238 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 239 { 240 struct drm_info_node *node = (struct drm_info_node *) m->private; 241 struct drm_device *dev = node->minor->dev; 242 struct drm_i915_private *dev_priv = dev->dev_private; 243 struct drm_i915_gem_object *obj; 244 size_t total_obj_size, total_gtt_size; 245 LIST_HEAD(stolen); 246 int count, ret; 247 248 ret = mutex_lock_interruptible(&dev->struct_mutex); 249 if (ret) 250 return ret; 251 252 total_obj_size = total_gtt_size = count = 0; 253 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 254 if (obj->stolen == NULL) 255 continue; 256 257 list_add(&obj->obj_exec_link, &stolen); 258 259 total_obj_size += obj->base.size; 260 total_gtt_size += i915_gem_obj_ggtt_size(obj); 261 count++; 262 } 263 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 264 if (obj->stolen == NULL) 265 continue; 266 267 list_add(&obj->obj_exec_link, &stolen); 268 269 total_obj_size += obj->base.size; 270 count++; 271 } 272 list_sort(NULL, &stolen, obj_rank_by_stolen); 273 seq_puts(m, "Stolen:\n"); 274 while (!list_empty(&stolen)) { 275 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link); 276 seq_puts(m, " "); 277 describe_obj(m, obj); 278 seq_putc(m, '\n'); 279 list_del_init(&obj->obj_exec_link); 280 } 281 mutex_unlock(&dev->struct_mutex); 282 283 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 284 count, total_obj_size, total_gtt_size); 285 return 0; 286 } 287 288 #define count_objects(list, member) do { \ 289 list_for_each_entry(obj, list, member) { \ 290 size += i915_gem_obj_ggtt_size(obj); \ 291 ++count; \ 292 if (obj->map_and_fenceable) { \ 293 mappable_size += i915_gem_obj_ggtt_size(obj); \ 294 ++mappable_count; \ 295 } \ 296 } \ 297 } while (0) 298 299 struct file_stats { 300 int count; 301 size_t total, active, inactive, unbound; 302 }; 303 304 static int per_file_stats(int id, void *ptr, void *data) 305 { 306 struct drm_i915_gem_object *obj = ptr; 307 struct file_stats *stats = data; 308 309 stats->count++; 310 stats->total += obj->base.size; 311 312 if (i915_gem_obj_ggtt_bound(obj)) { 313 if (!list_empty(&obj->ring_list)) 314 stats->active += obj->base.size; 315 else 316 stats->inactive += obj->base.size; 317 } else { 318 if (!list_empty(&obj->global_list)) 319 stats->unbound += obj->base.size; 320 } 321 322 return 0; 323 } 324 325 #define count_vmas(list, member) do { \ 326 list_for_each_entry(vma, list, member) { \ 327 size += i915_gem_obj_ggtt_size(vma->obj); \ 328 ++count; \ 329 if (vma->obj->map_and_fenceable) { \ 330 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ 331 ++mappable_count; \ 332 } \ 333 } \ 334 } while (0) 335 336 static int i915_gem_object_info(struct seq_file *m, void* data) 337 { 338 struct drm_info_node *node = (struct drm_info_node *) m->private; 339 struct drm_device *dev = node->minor->dev; 340 struct drm_i915_private *dev_priv = dev->dev_private; 341 u32 count, mappable_count, purgeable_count; 342 size_t size, mappable_size, purgeable_size; 343 struct drm_i915_gem_object *obj; 344 struct i915_address_space *vm = &dev_priv->gtt.base; 345 struct drm_file *file; 346 struct i915_vma *vma; 347 int ret; 348 349 ret = mutex_lock_interruptible(&dev->struct_mutex); 350 if (ret) 351 return ret; 352 353 seq_printf(m, "%u objects, %zu bytes\n", 354 dev_priv->mm.object_count, 355 dev_priv->mm.object_memory); 356 357 size = count = mappable_size = mappable_count = 0; 358 count_objects(&dev_priv->mm.bound_list, global_list); 359 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 360 count, mappable_count, size, mappable_size); 361 362 size = count = mappable_size = mappable_count = 0; 363 count_vmas(&vm->active_list, mm_list); 364 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 365 count, mappable_count, size, mappable_size); 366 367 size = count = mappable_size = mappable_count = 0; 368 count_vmas(&vm->inactive_list, mm_list); 369 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 370 count, mappable_count, size, mappable_size); 371 372 size = count = purgeable_size = purgeable_count = 0; 373 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 374 size += obj->base.size, ++count; 375 if (obj->madv == I915_MADV_DONTNEED) 376 purgeable_size += obj->base.size, ++purgeable_count; 377 } 378 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); 379 380 size = count = mappable_size = mappable_count = 0; 381 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 382 if (obj->fault_mappable) { 383 size += i915_gem_obj_ggtt_size(obj); 384 ++count; 385 } 386 if (obj->pin_mappable) { 387 mappable_size += i915_gem_obj_ggtt_size(obj); 388 ++mappable_count; 389 } 390 if (obj->madv == I915_MADV_DONTNEED) { 391 purgeable_size += obj->base.size; 392 ++purgeable_count; 393 } 394 } 395 seq_printf(m, "%u purgeable objects, %zu bytes\n", 396 purgeable_count, purgeable_size); 397 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 398 mappable_count, mappable_size); 399 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 400 count, size); 401 402 seq_printf(m, "%zu [%lu] gtt total\n", 403 dev_priv->gtt.base.total, 404 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); 405 406 seq_putc(m, '\n'); 407 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 408 struct file_stats stats; 409 410 memset(&stats, 0, sizeof(stats)); 411 idr_for_each(&file->object_idr, per_file_stats, &stats); 412 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n", 413 get_pid_task(file->pid, PIDTYPE_PID)->comm, 414 stats.count, 415 stats.total, 416 stats.active, 417 stats.inactive, 418 stats.unbound); 419 } 420 421 mutex_unlock(&dev->struct_mutex); 422 423 return 0; 424 } 425 426 static int i915_gem_gtt_info(struct seq_file *m, void *data) 427 { 428 struct drm_info_node *node = (struct drm_info_node *) m->private; 429 struct drm_device *dev = node->minor->dev; 430 uintptr_t list = (uintptr_t) node->info_ent->data; 431 struct drm_i915_private *dev_priv = dev->dev_private; 432 struct drm_i915_gem_object *obj; 433 size_t total_obj_size, total_gtt_size; 434 int count, ret; 435 436 ret = mutex_lock_interruptible(&dev->struct_mutex); 437 if (ret) 438 return ret; 439 440 total_obj_size = total_gtt_size = count = 0; 441 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 442 if (list == PINNED_LIST && obj->pin_count == 0) 443 continue; 444 445 seq_puts(m, " "); 446 describe_obj(m, obj); 447 seq_putc(m, '\n'); 448 total_obj_size += obj->base.size; 449 total_gtt_size += i915_gem_obj_ggtt_size(obj); 450 count++; 451 } 452 453 mutex_unlock(&dev->struct_mutex); 454 455 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 456 count, total_obj_size, total_gtt_size); 457 458 return 0; 459 } 460 461 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 462 { 463 struct drm_info_node *node = (struct drm_info_node *) m->private; 464 struct drm_device *dev = node->minor->dev; 465 unsigned long flags; 466 struct intel_crtc *crtc; 467 468 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 469 const char pipe = pipe_name(crtc->pipe); 470 const char plane = plane_name(crtc->plane); 471 struct intel_unpin_work *work; 472 473 spin_lock_irqsave(&dev->event_lock, flags); 474 work = crtc->unpin_work; 475 if (work == NULL) { 476 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 477 pipe, plane); 478 } else { 479 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 480 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 481 pipe, plane); 482 } else { 483 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 484 pipe, plane); 485 } 486 if (work->enable_stall_check) 487 seq_puts(m, "Stall check enabled, "); 488 else 489 seq_puts(m, "Stall check waiting for page flip ioctl, "); 490 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 491 492 if (work->old_fb_obj) { 493 struct drm_i915_gem_object *obj = work->old_fb_obj; 494 if (obj) 495 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n", 496 i915_gem_obj_ggtt_offset(obj)); 497 } 498 if (work->pending_flip_obj) { 499 struct drm_i915_gem_object *obj = work->pending_flip_obj; 500 if (obj) 501 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n", 502 i915_gem_obj_ggtt_offset(obj)); 503 } 504 } 505 spin_unlock_irqrestore(&dev->event_lock, flags); 506 } 507 508 return 0; 509 } 510 511 static int i915_gem_request_info(struct seq_file *m, void *data) 512 { 513 struct drm_info_node *node = (struct drm_info_node *) m->private; 514 struct drm_device *dev = node->minor->dev; 515 drm_i915_private_t *dev_priv = dev->dev_private; 516 struct intel_ring_buffer *ring; 517 struct drm_i915_gem_request *gem_request; 518 int ret, count, i; 519 520 ret = mutex_lock_interruptible(&dev->struct_mutex); 521 if (ret) 522 return ret; 523 524 count = 0; 525 for_each_ring(ring, dev_priv, i) { 526 if (list_empty(&ring->request_list)) 527 continue; 528 529 seq_printf(m, "%s requests:\n", ring->name); 530 list_for_each_entry(gem_request, 531 &ring->request_list, 532 list) { 533 seq_printf(m, " %d @ %d\n", 534 gem_request->seqno, 535 (int) (jiffies - gem_request->emitted_jiffies)); 536 } 537 count++; 538 } 539 mutex_unlock(&dev->struct_mutex); 540 541 if (count == 0) 542 seq_puts(m, "No requests\n"); 543 544 return 0; 545 } 546 547 static void i915_ring_seqno_info(struct seq_file *m, 548 struct intel_ring_buffer *ring) 549 { 550 if (ring->get_seqno) { 551 seq_printf(m, "Current sequence (%s): %u\n", 552 ring->name, ring->get_seqno(ring, false)); 553 } 554 } 555 556 static int i915_gem_seqno_info(struct seq_file *m, void *data) 557 { 558 struct drm_info_node *node = (struct drm_info_node *) m->private; 559 struct drm_device *dev = node->minor->dev; 560 drm_i915_private_t *dev_priv = dev->dev_private; 561 struct intel_ring_buffer *ring; 562 int ret, i; 563 564 ret = mutex_lock_interruptible(&dev->struct_mutex); 565 if (ret) 566 return ret; 567 568 for_each_ring(ring, dev_priv, i) 569 i915_ring_seqno_info(m, ring); 570 571 mutex_unlock(&dev->struct_mutex); 572 573 return 0; 574 } 575 576 577 static int i915_interrupt_info(struct seq_file *m, void *data) 578 { 579 struct drm_info_node *node = (struct drm_info_node *) m->private; 580 struct drm_device *dev = node->minor->dev; 581 drm_i915_private_t *dev_priv = dev->dev_private; 582 struct intel_ring_buffer *ring; 583 int ret, i, pipe; 584 585 ret = mutex_lock_interruptible(&dev->struct_mutex); 586 if (ret) 587 return ret; 588 589 if (INTEL_INFO(dev)->gen >= 8) { 590 int i; 591 seq_printf(m, "Master Interrupt Control:\t%08x\n", 592 I915_READ(GEN8_MASTER_IRQ)); 593 594 for (i = 0; i < 4; i++) { 595 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 596 i, I915_READ(GEN8_GT_IMR(i))); 597 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 598 i, I915_READ(GEN8_GT_IIR(i))); 599 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 600 i, I915_READ(GEN8_GT_IER(i))); 601 } 602 603 for_each_pipe(i) { 604 seq_printf(m, "Pipe %c IMR:\t%08x\n", 605 pipe_name(i), 606 I915_READ(GEN8_DE_PIPE_IMR(i))); 607 seq_printf(m, "Pipe %c IIR:\t%08x\n", 608 pipe_name(i), 609 I915_READ(GEN8_DE_PIPE_IIR(i))); 610 seq_printf(m, "Pipe %c IER:\t%08x\n", 611 pipe_name(i), 612 I915_READ(GEN8_DE_PIPE_IER(i))); 613 } 614 615 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 616 I915_READ(GEN8_DE_PORT_IMR)); 617 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 618 I915_READ(GEN8_DE_PORT_IIR)); 619 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 620 I915_READ(GEN8_DE_PORT_IER)); 621 622 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 623 I915_READ(GEN8_DE_MISC_IMR)); 624 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 625 I915_READ(GEN8_DE_MISC_IIR)); 626 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 627 I915_READ(GEN8_DE_MISC_IER)); 628 629 seq_printf(m, "PCU interrupt mask:\t%08x\n", 630 I915_READ(GEN8_PCU_IMR)); 631 seq_printf(m, "PCU interrupt identity:\t%08x\n", 632 I915_READ(GEN8_PCU_IIR)); 633 seq_printf(m, "PCU interrupt enable:\t%08x\n", 634 I915_READ(GEN8_PCU_IER)); 635 } else if (IS_VALLEYVIEW(dev)) { 636 seq_printf(m, "Display IER:\t%08x\n", 637 I915_READ(VLV_IER)); 638 seq_printf(m, "Display IIR:\t%08x\n", 639 I915_READ(VLV_IIR)); 640 seq_printf(m, "Display IIR_RW:\t%08x\n", 641 I915_READ(VLV_IIR_RW)); 642 seq_printf(m, "Display IMR:\t%08x\n", 643 I915_READ(VLV_IMR)); 644 for_each_pipe(pipe) 645 seq_printf(m, "Pipe %c stat:\t%08x\n", 646 pipe_name(pipe), 647 I915_READ(PIPESTAT(pipe))); 648 649 seq_printf(m, "Master IER:\t%08x\n", 650 I915_READ(VLV_MASTER_IER)); 651 652 seq_printf(m, "Render IER:\t%08x\n", 653 I915_READ(GTIER)); 654 seq_printf(m, "Render IIR:\t%08x\n", 655 I915_READ(GTIIR)); 656 seq_printf(m, "Render IMR:\t%08x\n", 657 I915_READ(GTIMR)); 658 659 seq_printf(m, "PM IER:\t\t%08x\n", 660 I915_READ(GEN6_PMIER)); 661 seq_printf(m, "PM IIR:\t\t%08x\n", 662 I915_READ(GEN6_PMIIR)); 663 seq_printf(m, "PM IMR:\t\t%08x\n", 664 I915_READ(GEN6_PMIMR)); 665 666 seq_printf(m, "Port hotplug:\t%08x\n", 667 I915_READ(PORT_HOTPLUG_EN)); 668 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 669 I915_READ(VLV_DPFLIPSTAT)); 670 seq_printf(m, "DPINVGTT:\t%08x\n", 671 I915_READ(DPINVGTT)); 672 673 } else if (!HAS_PCH_SPLIT(dev)) { 674 seq_printf(m, "Interrupt enable: %08x\n", 675 I915_READ(IER)); 676 seq_printf(m, "Interrupt identity: %08x\n", 677 I915_READ(IIR)); 678 seq_printf(m, "Interrupt mask: %08x\n", 679 I915_READ(IMR)); 680 for_each_pipe(pipe) 681 seq_printf(m, "Pipe %c stat: %08x\n", 682 pipe_name(pipe), 683 I915_READ(PIPESTAT(pipe))); 684 } else { 685 seq_printf(m, "North Display Interrupt enable: %08x\n", 686 I915_READ(DEIER)); 687 seq_printf(m, "North Display Interrupt identity: %08x\n", 688 I915_READ(DEIIR)); 689 seq_printf(m, "North Display Interrupt mask: %08x\n", 690 I915_READ(DEIMR)); 691 seq_printf(m, "South Display Interrupt enable: %08x\n", 692 I915_READ(SDEIER)); 693 seq_printf(m, "South Display Interrupt identity: %08x\n", 694 I915_READ(SDEIIR)); 695 seq_printf(m, "South Display Interrupt mask: %08x\n", 696 I915_READ(SDEIMR)); 697 seq_printf(m, "Graphics Interrupt enable: %08x\n", 698 I915_READ(GTIER)); 699 seq_printf(m, "Graphics Interrupt identity: %08x\n", 700 I915_READ(GTIIR)); 701 seq_printf(m, "Graphics Interrupt mask: %08x\n", 702 I915_READ(GTIMR)); 703 } 704 seq_printf(m, "Interrupts received: %d\n", 705 atomic_read(&dev_priv->irq_received)); 706 for_each_ring(ring, dev_priv, i) { 707 if (INTEL_INFO(dev)->gen >= 6) { 708 seq_printf(m, 709 "Graphics Interrupt mask (%s): %08x\n", 710 ring->name, I915_READ_IMR(ring)); 711 } 712 i915_ring_seqno_info(m, ring); 713 } 714 mutex_unlock(&dev->struct_mutex); 715 716 return 0; 717 } 718 719 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 720 { 721 struct drm_info_node *node = (struct drm_info_node *) m->private; 722 struct drm_device *dev = node->minor->dev; 723 drm_i915_private_t *dev_priv = dev->dev_private; 724 int i, ret; 725 726 ret = mutex_lock_interruptible(&dev->struct_mutex); 727 if (ret) 728 return ret; 729 730 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 731 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 732 for (i = 0; i < dev_priv->num_fence_regs; i++) { 733 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 734 735 seq_printf(m, "Fence %d, pin count = %d, object = ", 736 i, dev_priv->fence_regs[i].pin_count); 737 if (obj == NULL) 738 seq_puts(m, "unused"); 739 else 740 describe_obj(m, obj); 741 seq_putc(m, '\n'); 742 } 743 744 mutex_unlock(&dev->struct_mutex); 745 return 0; 746 } 747 748 static int i915_hws_info(struct seq_file *m, void *data) 749 { 750 struct drm_info_node *node = (struct drm_info_node *) m->private; 751 struct drm_device *dev = node->minor->dev; 752 drm_i915_private_t *dev_priv = dev->dev_private; 753 struct intel_ring_buffer *ring; 754 const u32 *hws; 755 int i; 756 757 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 758 hws = ring->status_page.page_addr; 759 if (hws == NULL) 760 return 0; 761 762 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 763 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 764 i * 4, 765 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 766 } 767 return 0; 768 } 769 770 static ssize_t 771 i915_error_state_write(struct file *filp, 772 const char __user *ubuf, 773 size_t cnt, 774 loff_t *ppos) 775 { 776 struct i915_error_state_file_priv *error_priv = filp->private_data; 777 struct drm_device *dev = error_priv->dev; 778 int ret; 779 780 DRM_DEBUG_DRIVER("Resetting error state\n"); 781 782 ret = mutex_lock_interruptible(&dev->struct_mutex); 783 if (ret) 784 return ret; 785 786 i915_destroy_error_state(dev); 787 mutex_unlock(&dev->struct_mutex); 788 789 return cnt; 790 } 791 792 static int i915_error_state_open(struct inode *inode, struct file *file) 793 { 794 struct drm_device *dev = inode->i_private; 795 struct i915_error_state_file_priv *error_priv; 796 797 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 798 if (!error_priv) 799 return -ENOMEM; 800 801 error_priv->dev = dev; 802 803 i915_error_state_get(dev, error_priv); 804 805 file->private_data = error_priv; 806 807 return 0; 808 } 809 810 static int i915_error_state_release(struct inode *inode, struct file *file) 811 { 812 struct i915_error_state_file_priv *error_priv = file->private_data; 813 814 i915_error_state_put(error_priv); 815 kfree(error_priv); 816 817 return 0; 818 } 819 820 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, 821 size_t count, loff_t *pos) 822 { 823 struct i915_error_state_file_priv *error_priv = file->private_data; 824 struct drm_i915_error_state_buf error_str; 825 loff_t tmp_pos = 0; 826 ssize_t ret_count = 0; 827 int ret; 828 829 ret = i915_error_state_buf_init(&error_str, count, *pos); 830 if (ret) 831 return ret; 832 833 ret = i915_error_state_to_str(&error_str, error_priv); 834 if (ret) 835 goto out; 836 837 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, 838 error_str.buf, 839 error_str.bytes); 840 841 if (ret_count < 0) 842 ret = ret_count; 843 else 844 *pos = error_str.start + ret_count; 845 out: 846 i915_error_state_buf_release(&error_str); 847 return ret ?: ret_count; 848 } 849 850 static const struct file_operations i915_error_state_fops = { 851 .owner = THIS_MODULE, 852 .open = i915_error_state_open, 853 .read = i915_error_state_read, 854 .write = i915_error_state_write, 855 .llseek = default_llseek, 856 .release = i915_error_state_release, 857 }; 858 859 static int 860 i915_next_seqno_get(void *data, u64 *val) 861 { 862 struct drm_device *dev = data; 863 drm_i915_private_t *dev_priv = dev->dev_private; 864 int ret; 865 866 ret = mutex_lock_interruptible(&dev->struct_mutex); 867 if (ret) 868 return ret; 869 870 *val = dev_priv->next_seqno; 871 mutex_unlock(&dev->struct_mutex); 872 873 return 0; 874 } 875 876 static int 877 i915_next_seqno_set(void *data, u64 val) 878 { 879 struct drm_device *dev = data; 880 int ret; 881 882 ret = mutex_lock_interruptible(&dev->struct_mutex); 883 if (ret) 884 return ret; 885 886 ret = i915_gem_set_seqno(dev, val); 887 mutex_unlock(&dev->struct_mutex); 888 889 return ret; 890 } 891 892 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 893 i915_next_seqno_get, i915_next_seqno_set, 894 "0x%llx\n"); 895 896 static int i915_rstdby_delays(struct seq_file *m, void *unused) 897 { 898 struct drm_info_node *node = (struct drm_info_node *) m->private; 899 struct drm_device *dev = node->minor->dev; 900 drm_i915_private_t *dev_priv = dev->dev_private; 901 u16 crstanddelay; 902 int ret; 903 904 ret = mutex_lock_interruptible(&dev->struct_mutex); 905 if (ret) 906 return ret; 907 908 crstanddelay = I915_READ16(CRSTANDVID); 909 910 mutex_unlock(&dev->struct_mutex); 911 912 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 913 914 return 0; 915 } 916 917 static int i915_cur_delayinfo(struct seq_file *m, void *unused) 918 { 919 struct drm_info_node *node = (struct drm_info_node *) m->private; 920 struct drm_device *dev = node->minor->dev; 921 drm_i915_private_t *dev_priv = dev->dev_private; 922 int ret; 923 924 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 925 926 if (IS_GEN5(dev)) { 927 u16 rgvswctl = I915_READ16(MEMSWCTL); 928 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 929 930 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 931 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 932 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 933 MEMSTAT_VID_SHIFT); 934 seq_printf(m, "Current P-state: %d\n", 935 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 936 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { 937 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 938 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 939 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 940 u32 rpstat, cagf, reqf; 941 u32 rpupei, rpcurup, rpprevup; 942 u32 rpdownei, rpcurdown, rpprevdown; 943 int max_freq; 944 945 /* RPSTAT1 is in the GT power well */ 946 ret = mutex_lock_interruptible(&dev->struct_mutex); 947 if (ret) 948 return ret; 949 950 gen6_gt_force_wake_get(dev_priv); 951 952 reqf = I915_READ(GEN6_RPNSWREQ); 953 reqf &= ~GEN6_TURBO_DISABLE; 954 if (IS_HASWELL(dev)) 955 reqf >>= 24; 956 else 957 reqf >>= 25; 958 reqf *= GT_FREQUENCY_MULTIPLIER; 959 960 rpstat = I915_READ(GEN6_RPSTAT1); 961 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 962 rpcurup = I915_READ(GEN6_RP_CUR_UP); 963 rpprevup = I915_READ(GEN6_RP_PREV_UP); 964 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 965 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 966 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 967 if (IS_HASWELL(dev)) 968 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 969 else 970 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 971 cagf *= GT_FREQUENCY_MULTIPLIER; 972 973 gen6_gt_force_wake_put(dev_priv); 974 mutex_unlock(&dev->struct_mutex); 975 976 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 977 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 978 seq_printf(m, "Render p-state ratio: %d\n", 979 (gt_perf_status & 0xff00) >> 8); 980 seq_printf(m, "Render p-state VID: %d\n", 981 gt_perf_status & 0xff); 982 seq_printf(m, "Render p-state limit: %d\n", 983 rp_state_limits & 0xff); 984 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 985 seq_printf(m, "CAGF: %dMHz\n", cagf); 986 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 987 GEN6_CURICONT_MASK); 988 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 989 GEN6_CURBSYTAVG_MASK); 990 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 991 GEN6_CURBSYTAVG_MASK); 992 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 993 GEN6_CURIAVG_MASK); 994 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 995 GEN6_CURBSYTAVG_MASK); 996 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 997 GEN6_CURBSYTAVG_MASK); 998 999 max_freq = (rp_state_cap & 0xff0000) >> 16; 1000 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1001 max_freq * GT_FREQUENCY_MULTIPLIER); 1002 1003 max_freq = (rp_state_cap & 0xff00) >> 8; 1004 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1005 max_freq * GT_FREQUENCY_MULTIPLIER); 1006 1007 max_freq = rp_state_cap & 0xff; 1008 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1009 max_freq * GT_FREQUENCY_MULTIPLIER); 1010 1011 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1012 dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER); 1013 } else if (IS_VALLEYVIEW(dev)) { 1014 u32 freq_sts, val; 1015 1016 mutex_lock(&dev_priv->rps.hw_lock); 1017 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1018 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1019 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1020 1021 val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1); 1022 seq_printf(m, "max GPU freq: %d MHz\n", 1023 vlv_gpu_freq(dev_priv->mem_freq, val)); 1024 1025 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM); 1026 seq_printf(m, "min GPU freq: %d MHz\n", 1027 vlv_gpu_freq(dev_priv->mem_freq, val)); 1028 1029 seq_printf(m, "current GPU freq: %d MHz\n", 1030 vlv_gpu_freq(dev_priv->mem_freq, 1031 (freq_sts >> 8) & 0xff)); 1032 mutex_unlock(&dev_priv->rps.hw_lock); 1033 } else { 1034 seq_puts(m, "no P-state info available\n"); 1035 } 1036 1037 return 0; 1038 } 1039 1040 static int i915_delayfreq_table(struct seq_file *m, void *unused) 1041 { 1042 struct drm_info_node *node = (struct drm_info_node *) m->private; 1043 struct drm_device *dev = node->minor->dev; 1044 drm_i915_private_t *dev_priv = dev->dev_private; 1045 u32 delayfreq; 1046 int ret, i; 1047 1048 ret = mutex_lock_interruptible(&dev->struct_mutex); 1049 if (ret) 1050 return ret; 1051 1052 for (i = 0; i < 16; i++) { 1053 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 1054 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 1055 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 1056 } 1057 1058 mutex_unlock(&dev->struct_mutex); 1059 1060 return 0; 1061 } 1062 1063 static inline int MAP_TO_MV(int map) 1064 { 1065 return 1250 - (map * 25); 1066 } 1067 1068 static int i915_inttoext_table(struct seq_file *m, void *unused) 1069 { 1070 struct drm_info_node *node = (struct drm_info_node *) m->private; 1071 struct drm_device *dev = node->minor->dev; 1072 drm_i915_private_t *dev_priv = dev->dev_private; 1073 u32 inttoext; 1074 int ret, i; 1075 1076 ret = mutex_lock_interruptible(&dev->struct_mutex); 1077 if (ret) 1078 return ret; 1079 1080 for (i = 1; i <= 32; i++) { 1081 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 1082 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 1083 } 1084 1085 mutex_unlock(&dev->struct_mutex); 1086 1087 return 0; 1088 } 1089 1090 static int ironlake_drpc_info(struct seq_file *m) 1091 { 1092 struct drm_info_node *node = (struct drm_info_node *) m->private; 1093 struct drm_device *dev = node->minor->dev; 1094 drm_i915_private_t *dev_priv = dev->dev_private; 1095 u32 rgvmodectl, rstdbyctl; 1096 u16 crstandvid; 1097 int ret; 1098 1099 ret = mutex_lock_interruptible(&dev->struct_mutex); 1100 if (ret) 1101 return ret; 1102 1103 rgvmodectl = I915_READ(MEMMODECTL); 1104 rstdbyctl = I915_READ(RSTDBYCTL); 1105 crstandvid = I915_READ16(CRSTANDVID); 1106 1107 mutex_unlock(&dev->struct_mutex); 1108 1109 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1110 "yes" : "no"); 1111 seq_printf(m, "Boost freq: %d\n", 1112 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1113 MEMMODE_BOOST_FREQ_SHIFT); 1114 seq_printf(m, "HW control enabled: %s\n", 1115 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 1116 seq_printf(m, "SW control enabled: %s\n", 1117 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 1118 seq_printf(m, "Gated voltage change: %s\n", 1119 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 1120 seq_printf(m, "Starting frequency: P%d\n", 1121 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1122 seq_printf(m, "Max P-state: P%d\n", 1123 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1124 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1125 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1126 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1127 seq_printf(m, "Render standby enabled: %s\n", 1128 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1129 seq_puts(m, "Current RS state: "); 1130 switch (rstdbyctl & RSX_STATUS_MASK) { 1131 case RSX_STATUS_ON: 1132 seq_puts(m, "on\n"); 1133 break; 1134 case RSX_STATUS_RC1: 1135 seq_puts(m, "RC1\n"); 1136 break; 1137 case RSX_STATUS_RC1E: 1138 seq_puts(m, "RC1E\n"); 1139 break; 1140 case RSX_STATUS_RS1: 1141 seq_puts(m, "RS1\n"); 1142 break; 1143 case RSX_STATUS_RS2: 1144 seq_puts(m, "RS2 (RC6)\n"); 1145 break; 1146 case RSX_STATUS_RS3: 1147 seq_puts(m, "RC3 (RC6+)\n"); 1148 break; 1149 default: 1150 seq_puts(m, "unknown\n"); 1151 break; 1152 } 1153 1154 return 0; 1155 } 1156 1157 static int gen6_drpc_info(struct seq_file *m) 1158 { 1159 1160 struct drm_info_node *node = (struct drm_info_node *) m->private; 1161 struct drm_device *dev = node->minor->dev; 1162 struct drm_i915_private *dev_priv = dev->dev_private; 1163 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1164 unsigned forcewake_count; 1165 int count = 0, ret; 1166 1167 ret = mutex_lock_interruptible(&dev->struct_mutex); 1168 if (ret) 1169 return ret; 1170 1171 spin_lock_irq(&dev_priv->uncore.lock); 1172 forcewake_count = dev_priv->uncore.forcewake_count; 1173 spin_unlock_irq(&dev_priv->uncore.lock); 1174 1175 if (forcewake_count) { 1176 seq_puts(m, "RC information inaccurate because somebody " 1177 "holds a forcewake reference \n"); 1178 } else { 1179 /* NB: we cannot use forcewake, else we read the wrong values */ 1180 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1181 udelay(10); 1182 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1183 } 1184 1185 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1186 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1187 1188 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1189 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1190 mutex_unlock(&dev->struct_mutex); 1191 mutex_lock(&dev_priv->rps.hw_lock); 1192 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1193 mutex_unlock(&dev_priv->rps.hw_lock); 1194 1195 seq_printf(m, "Video Turbo Mode: %s\n", 1196 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1197 seq_printf(m, "HW control enabled: %s\n", 1198 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1199 seq_printf(m, "SW control enabled: %s\n", 1200 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1201 GEN6_RP_MEDIA_SW_MODE)); 1202 seq_printf(m, "RC1e Enabled: %s\n", 1203 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1204 seq_printf(m, "RC6 Enabled: %s\n", 1205 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1206 seq_printf(m, "Deep RC6 Enabled: %s\n", 1207 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1208 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1209 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1210 seq_puts(m, "Current RC state: "); 1211 switch (gt_core_status & GEN6_RCn_MASK) { 1212 case GEN6_RC0: 1213 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1214 seq_puts(m, "Core Power Down\n"); 1215 else 1216 seq_puts(m, "on\n"); 1217 break; 1218 case GEN6_RC3: 1219 seq_puts(m, "RC3\n"); 1220 break; 1221 case GEN6_RC6: 1222 seq_puts(m, "RC6\n"); 1223 break; 1224 case GEN6_RC7: 1225 seq_puts(m, "RC7\n"); 1226 break; 1227 default: 1228 seq_puts(m, "Unknown\n"); 1229 break; 1230 } 1231 1232 seq_printf(m, "Core Power Down: %s\n", 1233 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1234 1235 /* Not exactly sure what this is */ 1236 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1237 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1238 seq_printf(m, "RC6 residency since boot: %u\n", 1239 I915_READ(GEN6_GT_GFX_RC6)); 1240 seq_printf(m, "RC6+ residency since boot: %u\n", 1241 I915_READ(GEN6_GT_GFX_RC6p)); 1242 seq_printf(m, "RC6++ residency since boot: %u\n", 1243 I915_READ(GEN6_GT_GFX_RC6pp)); 1244 1245 seq_printf(m, "RC6 voltage: %dmV\n", 1246 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1247 seq_printf(m, "RC6+ voltage: %dmV\n", 1248 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1249 seq_printf(m, "RC6++ voltage: %dmV\n", 1250 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1251 return 0; 1252 } 1253 1254 static int i915_drpc_info(struct seq_file *m, void *unused) 1255 { 1256 struct drm_info_node *node = (struct drm_info_node *) m->private; 1257 struct drm_device *dev = node->minor->dev; 1258 1259 if (IS_GEN6(dev) || IS_GEN7(dev)) 1260 return gen6_drpc_info(m); 1261 else 1262 return ironlake_drpc_info(m); 1263 } 1264 1265 static int i915_fbc_status(struct seq_file *m, void *unused) 1266 { 1267 struct drm_info_node *node = (struct drm_info_node *) m->private; 1268 struct drm_device *dev = node->minor->dev; 1269 drm_i915_private_t *dev_priv = dev->dev_private; 1270 1271 if (!I915_HAS_FBC(dev)) { 1272 seq_puts(m, "FBC unsupported on this chipset\n"); 1273 return 0; 1274 } 1275 1276 if (intel_fbc_enabled(dev)) { 1277 seq_puts(m, "FBC enabled\n"); 1278 } else { 1279 seq_puts(m, "FBC disabled: "); 1280 switch (dev_priv->fbc.no_fbc_reason) { 1281 case FBC_OK: 1282 seq_puts(m, "FBC actived, but currently disabled in hardware"); 1283 break; 1284 case FBC_UNSUPPORTED: 1285 seq_puts(m, "unsupported by this chipset"); 1286 break; 1287 case FBC_NO_OUTPUT: 1288 seq_puts(m, "no outputs"); 1289 break; 1290 case FBC_STOLEN_TOO_SMALL: 1291 seq_puts(m, "not enough stolen memory"); 1292 break; 1293 case FBC_UNSUPPORTED_MODE: 1294 seq_puts(m, "mode not supported"); 1295 break; 1296 case FBC_MODE_TOO_LARGE: 1297 seq_puts(m, "mode too large"); 1298 break; 1299 case FBC_BAD_PLANE: 1300 seq_puts(m, "FBC unsupported on plane"); 1301 break; 1302 case FBC_NOT_TILED: 1303 seq_puts(m, "scanout buffer not tiled"); 1304 break; 1305 case FBC_MULTIPLE_PIPES: 1306 seq_puts(m, "multiple pipes are enabled"); 1307 break; 1308 case FBC_MODULE_PARAM: 1309 seq_puts(m, "disabled per module param (default off)"); 1310 break; 1311 case FBC_CHIP_DEFAULT: 1312 seq_puts(m, "disabled per chip default"); 1313 break; 1314 default: 1315 seq_puts(m, "unknown reason"); 1316 } 1317 seq_putc(m, '\n'); 1318 } 1319 return 0; 1320 } 1321 1322 static int i915_ips_status(struct seq_file *m, void *unused) 1323 { 1324 struct drm_info_node *node = (struct drm_info_node *) m->private; 1325 struct drm_device *dev = node->minor->dev; 1326 struct drm_i915_private *dev_priv = dev->dev_private; 1327 1328 if (!HAS_IPS(dev)) { 1329 seq_puts(m, "not supported\n"); 1330 return 0; 1331 } 1332 1333 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1334 seq_puts(m, "enabled\n"); 1335 else 1336 seq_puts(m, "disabled\n"); 1337 1338 return 0; 1339 } 1340 1341 static int i915_sr_status(struct seq_file *m, void *unused) 1342 { 1343 struct drm_info_node *node = (struct drm_info_node *) m->private; 1344 struct drm_device *dev = node->minor->dev; 1345 drm_i915_private_t *dev_priv = dev->dev_private; 1346 bool sr_enabled = false; 1347 1348 if (HAS_PCH_SPLIT(dev)) 1349 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1350 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1351 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1352 else if (IS_I915GM(dev)) 1353 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1354 else if (IS_PINEVIEW(dev)) 1355 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1356 1357 seq_printf(m, "self-refresh: %s\n", 1358 sr_enabled ? "enabled" : "disabled"); 1359 1360 return 0; 1361 } 1362 1363 static int i915_emon_status(struct seq_file *m, void *unused) 1364 { 1365 struct drm_info_node *node = (struct drm_info_node *) m->private; 1366 struct drm_device *dev = node->minor->dev; 1367 drm_i915_private_t *dev_priv = dev->dev_private; 1368 unsigned long temp, chipset, gfx; 1369 int ret; 1370 1371 if (!IS_GEN5(dev)) 1372 return -ENODEV; 1373 1374 ret = mutex_lock_interruptible(&dev->struct_mutex); 1375 if (ret) 1376 return ret; 1377 1378 temp = i915_mch_val(dev_priv); 1379 chipset = i915_chipset_val(dev_priv); 1380 gfx = i915_gfx_val(dev_priv); 1381 mutex_unlock(&dev->struct_mutex); 1382 1383 seq_printf(m, "GMCH temp: %ld\n", temp); 1384 seq_printf(m, "Chipset power: %ld\n", chipset); 1385 seq_printf(m, "GFX power: %ld\n", gfx); 1386 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1387 1388 return 0; 1389 } 1390 1391 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1392 { 1393 struct drm_info_node *node = (struct drm_info_node *) m->private; 1394 struct drm_device *dev = node->minor->dev; 1395 drm_i915_private_t *dev_priv = dev->dev_private; 1396 int ret; 1397 int gpu_freq, ia_freq; 1398 1399 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1400 seq_puts(m, "unsupported on this chipset\n"); 1401 return 0; 1402 } 1403 1404 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1405 1406 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1407 if (ret) 1408 return ret; 1409 1410 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1411 1412 for (gpu_freq = dev_priv->rps.min_delay; 1413 gpu_freq <= dev_priv->rps.max_delay; 1414 gpu_freq++) { 1415 ia_freq = gpu_freq; 1416 sandybridge_pcode_read(dev_priv, 1417 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1418 &ia_freq); 1419 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1420 gpu_freq * GT_FREQUENCY_MULTIPLIER, 1421 ((ia_freq >> 0) & 0xff) * 100, 1422 ((ia_freq >> 8) & 0xff) * 100); 1423 } 1424 1425 mutex_unlock(&dev_priv->rps.hw_lock); 1426 1427 return 0; 1428 } 1429 1430 static int i915_gfxec(struct seq_file *m, void *unused) 1431 { 1432 struct drm_info_node *node = (struct drm_info_node *) m->private; 1433 struct drm_device *dev = node->minor->dev; 1434 drm_i915_private_t *dev_priv = dev->dev_private; 1435 int ret; 1436 1437 ret = mutex_lock_interruptible(&dev->struct_mutex); 1438 if (ret) 1439 return ret; 1440 1441 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1442 1443 mutex_unlock(&dev->struct_mutex); 1444 1445 return 0; 1446 } 1447 1448 static int i915_opregion(struct seq_file *m, void *unused) 1449 { 1450 struct drm_info_node *node = (struct drm_info_node *) m->private; 1451 struct drm_device *dev = node->minor->dev; 1452 drm_i915_private_t *dev_priv = dev->dev_private; 1453 struct intel_opregion *opregion = &dev_priv->opregion; 1454 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); 1455 int ret; 1456 1457 if (data == NULL) 1458 return -ENOMEM; 1459 1460 ret = mutex_lock_interruptible(&dev->struct_mutex); 1461 if (ret) 1462 goto out; 1463 1464 if (opregion->header) { 1465 memcpy_fromio(data, opregion->header, OPREGION_SIZE); 1466 seq_write(m, data, OPREGION_SIZE); 1467 } 1468 1469 mutex_unlock(&dev->struct_mutex); 1470 1471 out: 1472 kfree(data); 1473 return 0; 1474 } 1475 1476 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1477 { 1478 struct drm_info_node *node = (struct drm_info_node *) m->private; 1479 struct drm_device *dev = node->minor->dev; 1480 struct intel_fbdev *ifbdev = NULL; 1481 struct intel_framebuffer *fb; 1482 1483 #ifdef CONFIG_DRM_I915_FBDEV 1484 struct drm_i915_private *dev_priv = dev->dev_private; 1485 int ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1486 if (ret) 1487 return ret; 1488 1489 ifbdev = dev_priv->fbdev; 1490 fb = to_intel_framebuffer(ifbdev->helper.fb); 1491 1492 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1493 fb->base.width, 1494 fb->base.height, 1495 fb->base.depth, 1496 fb->base.bits_per_pixel, 1497 atomic_read(&fb->base.refcount.refcount)); 1498 describe_obj(m, fb->obj); 1499 seq_putc(m, '\n'); 1500 mutex_unlock(&dev->mode_config.mutex); 1501 #endif 1502 1503 mutex_lock(&dev->mode_config.fb_lock); 1504 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1505 if (ifbdev && &fb->base == ifbdev->helper.fb) 1506 continue; 1507 1508 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1509 fb->base.width, 1510 fb->base.height, 1511 fb->base.depth, 1512 fb->base.bits_per_pixel, 1513 atomic_read(&fb->base.refcount.refcount)); 1514 describe_obj(m, fb->obj); 1515 seq_putc(m, '\n'); 1516 } 1517 mutex_unlock(&dev->mode_config.fb_lock); 1518 1519 return 0; 1520 } 1521 1522 static int i915_context_status(struct seq_file *m, void *unused) 1523 { 1524 struct drm_info_node *node = (struct drm_info_node *) m->private; 1525 struct drm_device *dev = node->minor->dev; 1526 drm_i915_private_t *dev_priv = dev->dev_private; 1527 struct intel_ring_buffer *ring; 1528 struct i915_hw_context *ctx; 1529 int ret, i; 1530 1531 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1532 if (ret) 1533 return ret; 1534 1535 if (dev_priv->ips.pwrctx) { 1536 seq_puts(m, "power context "); 1537 describe_obj(m, dev_priv->ips.pwrctx); 1538 seq_putc(m, '\n'); 1539 } 1540 1541 if (dev_priv->ips.renderctx) { 1542 seq_puts(m, "render context "); 1543 describe_obj(m, dev_priv->ips.renderctx); 1544 seq_putc(m, '\n'); 1545 } 1546 1547 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1548 seq_puts(m, "HW context "); 1549 describe_ctx(m, ctx); 1550 for_each_ring(ring, dev_priv, i) 1551 if (ring->default_context == ctx) 1552 seq_printf(m, "(default context %s) ", ring->name); 1553 1554 describe_obj(m, ctx->obj); 1555 seq_putc(m, '\n'); 1556 } 1557 1558 mutex_unlock(&dev->mode_config.mutex); 1559 1560 return 0; 1561 } 1562 1563 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1564 { 1565 struct drm_info_node *node = (struct drm_info_node *) m->private; 1566 struct drm_device *dev = node->minor->dev; 1567 struct drm_i915_private *dev_priv = dev->dev_private; 1568 unsigned forcewake_count; 1569 1570 spin_lock_irq(&dev_priv->uncore.lock); 1571 forcewake_count = dev_priv->uncore.forcewake_count; 1572 spin_unlock_irq(&dev_priv->uncore.lock); 1573 1574 seq_printf(m, "forcewake count = %u\n", forcewake_count); 1575 1576 return 0; 1577 } 1578 1579 static const char *swizzle_string(unsigned swizzle) 1580 { 1581 switch (swizzle) { 1582 case I915_BIT_6_SWIZZLE_NONE: 1583 return "none"; 1584 case I915_BIT_6_SWIZZLE_9: 1585 return "bit9"; 1586 case I915_BIT_6_SWIZZLE_9_10: 1587 return "bit9/bit10"; 1588 case I915_BIT_6_SWIZZLE_9_11: 1589 return "bit9/bit11"; 1590 case I915_BIT_6_SWIZZLE_9_10_11: 1591 return "bit9/bit10/bit11"; 1592 case I915_BIT_6_SWIZZLE_9_17: 1593 return "bit9/bit17"; 1594 case I915_BIT_6_SWIZZLE_9_10_17: 1595 return "bit9/bit10/bit17"; 1596 case I915_BIT_6_SWIZZLE_UNKNOWN: 1597 return "unknown"; 1598 } 1599 1600 return "bug"; 1601 } 1602 1603 static int i915_swizzle_info(struct seq_file *m, void *data) 1604 { 1605 struct drm_info_node *node = (struct drm_info_node *) m->private; 1606 struct drm_device *dev = node->minor->dev; 1607 struct drm_i915_private *dev_priv = dev->dev_private; 1608 int ret; 1609 1610 ret = mutex_lock_interruptible(&dev->struct_mutex); 1611 if (ret) 1612 return ret; 1613 1614 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1615 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1616 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1617 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1618 1619 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1620 seq_printf(m, "DDC = 0x%08x\n", 1621 I915_READ(DCC)); 1622 seq_printf(m, "C0DRB3 = 0x%04x\n", 1623 I915_READ16(C0DRB3)); 1624 seq_printf(m, "C1DRB3 = 0x%04x\n", 1625 I915_READ16(C1DRB3)); 1626 } else if (INTEL_INFO(dev)->gen >= 6) { 1627 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1628 I915_READ(MAD_DIMM_C0)); 1629 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1630 I915_READ(MAD_DIMM_C1)); 1631 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1632 I915_READ(MAD_DIMM_C2)); 1633 seq_printf(m, "TILECTL = 0x%08x\n", 1634 I915_READ(TILECTL)); 1635 if (IS_GEN8(dev)) 1636 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 1637 I915_READ(GAMTARBMODE)); 1638 else 1639 seq_printf(m, "ARB_MODE = 0x%08x\n", 1640 I915_READ(ARB_MODE)); 1641 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1642 I915_READ(DISP_ARB_CTL)); 1643 } 1644 mutex_unlock(&dev->struct_mutex); 1645 1646 return 0; 1647 } 1648 1649 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 1650 { 1651 struct drm_i915_private *dev_priv = dev->dev_private; 1652 struct intel_ring_buffer *ring; 1653 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1654 int unused, i; 1655 1656 if (!ppgtt) 1657 return; 1658 1659 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages); 1660 seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages); 1661 for_each_ring(ring, dev_priv, unused) { 1662 seq_printf(m, "%s\n", ring->name); 1663 for (i = 0; i < 4; i++) { 1664 u32 offset = 0x270 + i * 8; 1665 u64 pdp = I915_READ(ring->mmio_base + offset + 4); 1666 pdp <<= 32; 1667 pdp |= I915_READ(ring->mmio_base + offset); 1668 for (i = 0; i < 4; i++) 1669 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 1670 } 1671 } 1672 } 1673 1674 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) 1675 { 1676 struct drm_i915_private *dev_priv = dev->dev_private; 1677 struct intel_ring_buffer *ring; 1678 int i; 1679 1680 if (INTEL_INFO(dev)->gen == 6) 1681 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1682 1683 for_each_ring(ring, dev_priv, i) { 1684 seq_printf(m, "%s\n", ring->name); 1685 if (INTEL_INFO(dev)->gen == 7) 1686 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 1687 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 1688 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 1689 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 1690 } 1691 if (dev_priv->mm.aliasing_ppgtt) { 1692 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1693 1694 seq_puts(m, "aliasing PPGTT:\n"); 1695 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1696 } 1697 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1698 } 1699 1700 static int i915_ppgtt_info(struct seq_file *m, void *data) 1701 { 1702 struct drm_info_node *node = (struct drm_info_node *) m->private; 1703 struct drm_device *dev = node->minor->dev; 1704 1705 int ret = mutex_lock_interruptible(&dev->struct_mutex); 1706 if (ret) 1707 return ret; 1708 1709 if (INTEL_INFO(dev)->gen >= 8) 1710 gen8_ppgtt_info(m, dev); 1711 else if (INTEL_INFO(dev)->gen >= 6) 1712 gen6_ppgtt_info(m, dev); 1713 1714 mutex_unlock(&dev->struct_mutex); 1715 1716 return 0; 1717 } 1718 1719 static int i915_dpio_info(struct seq_file *m, void *data) 1720 { 1721 struct drm_info_node *node = (struct drm_info_node *) m->private; 1722 struct drm_device *dev = node->minor->dev; 1723 struct drm_i915_private *dev_priv = dev->dev_private; 1724 int ret; 1725 1726 1727 if (!IS_VALLEYVIEW(dev)) { 1728 seq_puts(m, "unsupported\n"); 1729 return 0; 1730 } 1731 1732 ret = mutex_lock_interruptible(&dev_priv->dpio_lock); 1733 if (ret) 1734 return ret; 1735 1736 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); 1737 1738 seq_printf(m, "DPIO_DIV_A: 0x%08x\n", 1739 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A)); 1740 seq_printf(m, "DPIO_DIV_B: 0x%08x\n", 1741 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B)); 1742 1743 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n", 1744 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A)); 1745 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n", 1746 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B)); 1747 1748 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", 1749 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A)); 1750 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", 1751 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B)); 1752 1753 seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n", 1754 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A)); 1755 seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n", 1756 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B)); 1757 1758 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1759 vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE)); 1760 1761 mutex_unlock(&dev_priv->dpio_lock); 1762 1763 return 0; 1764 } 1765 1766 static int i915_llc(struct seq_file *m, void *data) 1767 { 1768 struct drm_info_node *node = (struct drm_info_node *) m->private; 1769 struct drm_device *dev = node->minor->dev; 1770 struct drm_i915_private *dev_priv = dev->dev_private; 1771 1772 /* Size calculation for LLC is a bit of a pain. Ignore for now. */ 1773 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); 1774 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size); 1775 1776 return 0; 1777 } 1778 1779 static int i915_edp_psr_status(struct seq_file *m, void *data) 1780 { 1781 struct drm_info_node *node = m->private; 1782 struct drm_device *dev = node->minor->dev; 1783 struct drm_i915_private *dev_priv = dev->dev_private; 1784 u32 psrperf = 0; 1785 bool enabled = false; 1786 1787 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 1788 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 1789 1790 enabled = HAS_PSR(dev) && 1791 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; 1792 seq_printf(m, "Enabled: %s\n", yesno(enabled)); 1793 1794 if (HAS_PSR(dev)) 1795 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & 1796 EDP_PSR_PERF_CNT_MASK; 1797 seq_printf(m, "Performance_Counter: %u\n", psrperf); 1798 1799 return 0; 1800 } 1801 1802 static int i915_energy_uJ(struct seq_file *m, void *data) 1803 { 1804 struct drm_info_node *node = m->private; 1805 struct drm_device *dev = node->minor->dev; 1806 struct drm_i915_private *dev_priv = dev->dev_private; 1807 u64 power; 1808 u32 units; 1809 1810 if (INTEL_INFO(dev)->gen < 6) 1811 return -ENODEV; 1812 1813 rdmsrl(MSR_RAPL_POWER_UNIT, power); 1814 power = (power & 0x1f00) >> 8; 1815 units = 1000000 / (1 << power); /* convert to uJ */ 1816 power = I915_READ(MCH_SECP_NRG_STTS); 1817 power *= units; 1818 1819 seq_printf(m, "%llu", (long long unsigned)power); 1820 1821 return 0; 1822 } 1823 1824 static int i915_pc8_status(struct seq_file *m, void *unused) 1825 { 1826 struct drm_info_node *node = (struct drm_info_node *) m->private; 1827 struct drm_device *dev = node->minor->dev; 1828 struct drm_i915_private *dev_priv = dev->dev_private; 1829 1830 if (!IS_HASWELL(dev)) { 1831 seq_puts(m, "not supported\n"); 1832 return 0; 1833 } 1834 1835 mutex_lock(&dev_priv->pc8.lock); 1836 seq_printf(m, "Requirements met: %s\n", 1837 yesno(dev_priv->pc8.requirements_met)); 1838 seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle)); 1839 seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count); 1840 seq_printf(m, "IRQs disabled: %s\n", 1841 yesno(dev_priv->pc8.irqs_disabled)); 1842 seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled)); 1843 mutex_unlock(&dev_priv->pc8.lock); 1844 1845 return 0; 1846 } 1847 1848 struct pipe_crc_info { 1849 const char *name; 1850 struct drm_device *dev; 1851 enum pipe pipe; 1852 }; 1853 1854 static int i915_pipe_crc_open(struct inode *inode, struct file *filep) 1855 { 1856 struct pipe_crc_info *info = inode->i_private; 1857 struct drm_i915_private *dev_priv = info->dev->dev_private; 1858 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 1859 1860 spin_lock_irq(&pipe_crc->lock); 1861 1862 if (pipe_crc->opened) { 1863 spin_unlock_irq(&pipe_crc->lock); 1864 return -EBUSY; /* already open */ 1865 } 1866 1867 pipe_crc->opened = true; 1868 filep->private_data = inode->i_private; 1869 1870 spin_unlock_irq(&pipe_crc->lock); 1871 1872 return 0; 1873 } 1874 1875 static int i915_pipe_crc_release(struct inode *inode, struct file *filep) 1876 { 1877 struct pipe_crc_info *info = inode->i_private; 1878 struct drm_i915_private *dev_priv = info->dev->dev_private; 1879 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 1880 1881 spin_lock_irq(&pipe_crc->lock); 1882 pipe_crc->opened = false; 1883 spin_unlock_irq(&pipe_crc->lock); 1884 1885 return 0; 1886 } 1887 1888 /* (6 fields, 8 chars each, space separated (5) + '\n') */ 1889 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) 1890 /* account for \'0' */ 1891 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) 1892 1893 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) 1894 { 1895 assert_spin_locked(&pipe_crc->lock); 1896 return CIRC_CNT(pipe_crc->head, pipe_crc->tail, 1897 INTEL_PIPE_CRC_ENTRIES_NR); 1898 } 1899 1900 static ssize_t 1901 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, 1902 loff_t *pos) 1903 { 1904 struct pipe_crc_info *info = filep->private_data; 1905 struct drm_device *dev = info->dev; 1906 struct drm_i915_private *dev_priv = dev->dev_private; 1907 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 1908 char buf[PIPE_CRC_BUFFER_LEN]; 1909 int head, tail, n_entries, n; 1910 ssize_t bytes_read; 1911 1912 /* 1913 * Don't allow user space to provide buffers not big enough to hold 1914 * a line of data. 1915 */ 1916 if (count < PIPE_CRC_LINE_LEN) 1917 return -EINVAL; 1918 1919 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) 1920 return 0; 1921 1922 /* nothing to read */ 1923 spin_lock_irq(&pipe_crc->lock); 1924 while (pipe_crc_data_count(pipe_crc) == 0) { 1925 int ret; 1926 1927 if (filep->f_flags & O_NONBLOCK) { 1928 spin_unlock_irq(&pipe_crc->lock); 1929 return -EAGAIN; 1930 } 1931 1932 ret = wait_event_interruptible_lock_irq(pipe_crc->wq, 1933 pipe_crc_data_count(pipe_crc), pipe_crc->lock); 1934 if (ret) { 1935 spin_unlock_irq(&pipe_crc->lock); 1936 return ret; 1937 } 1938 } 1939 1940 /* We now have one or more entries to read */ 1941 head = pipe_crc->head; 1942 tail = pipe_crc->tail; 1943 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR), 1944 count / PIPE_CRC_LINE_LEN); 1945 spin_unlock_irq(&pipe_crc->lock); 1946 1947 bytes_read = 0; 1948 n = 0; 1949 do { 1950 struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail]; 1951 int ret; 1952 1953 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, 1954 "%8u %8x %8x %8x %8x %8x\n", 1955 entry->frame, entry->crc[0], 1956 entry->crc[1], entry->crc[2], 1957 entry->crc[3], entry->crc[4]); 1958 1959 ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN, 1960 buf, PIPE_CRC_LINE_LEN); 1961 if (ret == PIPE_CRC_LINE_LEN) 1962 return -EFAULT; 1963 1964 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); 1965 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1966 n++; 1967 } while (--n_entries); 1968 1969 spin_lock_irq(&pipe_crc->lock); 1970 pipe_crc->tail = tail; 1971 spin_unlock_irq(&pipe_crc->lock); 1972 1973 return bytes_read; 1974 } 1975 1976 static const struct file_operations i915_pipe_crc_fops = { 1977 .owner = THIS_MODULE, 1978 .open = i915_pipe_crc_open, 1979 .read = i915_pipe_crc_read, 1980 .release = i915_pipe_crc_release, 1981 }; 1982 1983 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { 1984 { 1985 .name = "i915_pipe_A_crc", 1986 .pipe = PIPE_A, 1987 }, 1988 { 1989 .name = "i915_pipe_B_crc", 1990 .pipe = PIPE_B, 1991 }, 1992 { 1993 .name = "i915_pipe_C_crc", 1994 .pipe = PIPE_C, 1995 }, 1996 }; 1997 1998 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, 1999 enum pipe pipe) 2000 { 2001 struct drm_device *dev = minor->dev; 2002 struct dentry *ent; 2003 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; 2004 2005 info->dev = dev; 2006 ent = debugfs_create_file(info->name, S_IRUGO, root, info, 2007 &i915_pipe_crc_fops); 2008 if (IS_ERR(ent)) 2009 return PTR_ERR(ent); 2010 2011 return drm_add_fake_info_node(minor, ent, info); 2012 } 2013 2014 static const char * const pipe_crc_sources[] = { 2015 "none", 2016 "plane1", 2017 "plane2", 2018 "pf", 2019 "pipe", 2020 "TV", 2021 "DP-B", 2022 "DP-C", 2023 "DP-D", 2024 "auto", 2025 }; 2026 2027 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) 2028 { 2029 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); 2030 return pipe_crc_sources[source]; 2031 } 2032 2033 static int display_crc_ctl_show(struct seq_file *m, void *data) 2034 { 2035 struct drm_device *dev = m->private; 2036 struct drm_i915_private *dev_priv = dev->dev_private; 2037 int i; 2038 2039 for (i = 0; i < I915_MAX_PIPES; i++) 2040 seq_printf(m, "%c %s\n", pipe_name(i), 2041 pipe_crc_source_name(dev_priv->pipe_crc[i].source)); 2042 2043 return 0; 2044 } 2045 2046 static int display_crc_ctl_open(struct inode *inode, struct file *file) 2047 { 2048 struct drm_device *dev = inode->i_private; 2049 2050 return single_open(file, display_crc_ctl_show, dev); 2051 } 2052 2053 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2054 uint32_t *val) 2055 { 2056 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2057 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2058 2059 switch (*source) { 2060 case INTEL_PIPE_CRC_SOURCE_PIPE: 2061 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; 2062 break; 2063 case INTEL_PIPE_CRC_SOURCE_NONE: 2064 *val = 0; 2065 break; 2066 default: 2067 return -EINVAL; 2068 } 2069 2070 return 0; 2071 } 2072 2073 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe, 2074 enum intel_pipe_crc_source *source) 2075 { 2076 struct intel_encoder *encoder; 2077 struct intel_crtc *crtc; 2078 struct intel_digital_port *dig_port; 2079 int ret = 0; 2080 2081 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2082 2083 mutex_lock(&dev->mode_config.mutex); 2084 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 2085 base.head) { 2086 if (!encoder->base.crtc) 2087 continue; 2088 2089 crtc = to_intel_crtc(encoder->base.crtc); 2090 2091 if (crtc->pipe != pipe) 2092 continue; 2093 2094 switch (encoder->type) { 2095 case INTEL_OUTPUT_TVOUT: 2096 *source = INTEL_PIPE_CRC_SOURCE_TV; 2097 break; 2098 case INTEL_OUTPUT_DISPLAYPORT: 2099 case INTEL_OUTPUT_EDP: 2100 dig_port = enc_to_dig_port(&encoder->base); 2101 switch (dig_port->port) { 2102 case PORT_B: 2103 *source = INTEL_PIPE_CRC_SOURCE_DP_B; 2104 break; 2105 case PORT_C: 2106 *source = INTEL_PIPE_CRC_SOURCE_DP_C; 2107 break; 2108 case PORT_D: 2109 *source = INTEL_PIPE_CRC_SOURCE_DP_D; 2110 break; 2111 default: 2112 WARN(1, "nonexisting DP port %c\n", 2113 port_name(dig_port->port)); 2114 break; 2115 } 2116 break; 2117 } 2118 } 2119 mutex_unlock(&dev->mode_config.mutex); 2120 2121 return ret; 2122 } 2123 2124 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, 2125 enum pipe pipe, 2126 enum intel_pipe_crc_source *source, 2127 uint32_t *val) 2128 { 2129 struct drm_i915_private *dev_priv = dev->dev_private; 2130 bool need_stable_symbols = false; 2131 2132 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 2133 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 2134 if (ret) 2135 return ret; 2136 } 2137 2138 switch (*source) { 2139 case INTEL_PIPE_CRC_SOURCE_PIPE: 2140 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; 2141 break; 2142 case INTEL_PIPE_CRC_SOURCE_DP_B: 2143 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; 2144 need_stable_symbols = true; 2145 break; 2146 case INTEL_PIPE_CRC_SOURCE_DP_C: 2147 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; 2148 need_stable_symbols = true; 2149 break; 2150 case INTEL_PIPE_CRC_SOURCE_NONE: 2151 *val = 0; 2152 break; 2153 default: 2154 return -EINVAL; 2155 } 2156 2157 /* 2158 * When the pipe CRC tap point is after the transcoders we need 2159 * to tweak symbol-level features to produce a deterministic series of 2160 * symbols for a given frame. We need to reset those features only once 2161 * a frame (instead of every nth symbol): 2162 * - DC-balance: used to ensure a better clock recovery from the data 2163 * link (SDVO) 2164 * - DisplayPort scrambling: used for EMI reduction 2165 */ 2166 if (need_stable_symbols) { 2167 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2168 2169 WARN_ON(!IS_G4X(dev)); 2170 2171 tmp |= DC_BALANCE_RESET_VLV; 2172 if (pipe == PIPE_A) 2173 tmp |= PIPE_A_SCRAMBLE_RESET; 2174 else 2175 tmp |= PIPE_B_SCRAMBLE_RESET; 2176 2177 I915_WRITE(PORT_DFT2_G4X, tmp); 2178 } 2179 2180 return 0; 2181 } 2182 2183 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, 2184 enum pipe pipe, 2185 enum intel_pipe_crc_source *source, 2186 uint32_t *val) 2187 { 2188 struct drm_i915_private *dev_priv = dev->dev_private; 2189 bool need_stable_symbols = false; 2190 2191 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 2192 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 2193 if (ret) 2194 return ret; 2195 } 2196 2197 switch (*source) { 2198 case INTEL_PIPE_CRC_SOURCE_PIPE: 2199 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; 2200 break; 2201 case INTEL_PIPE_CRC_SOURCE_TV: 2202 if (!SUPPORTS_TV(dev)) 2203 return -EINVAL; 2204 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; 2205 break; 2206 case INTEL_PIPE_CRC_SOURCE_DP_B: 2207 if (!IS_G4X(dev)) 2208 return -EINVAL; 2209 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; 2210 need_stable_symbols = true; 2211 break; 2212 case INTEL_PIPE_CRC_SOURCE_DP_C: 2213 if (!IS_G4X(dev)) 2214 return -EINVAL; 2215 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; 2216 need_stable_symbols = true; 2217 break; 2218 case INTEL_PIPE_CRC_SOURCE_DP_D: 2219 if (!IS_G4X(dev)) 2220 return -EINVAL; 2221 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; 2222 need_stable_symbols = true; 2223 break; 2224 case INTEL_PIPE_CRC_SOURCE_NONE: 2225 *val = 0; 2226 break; 2227 default: 2228 return -EINVAL; 2229 } 2230 2231 /* 2232 * When the pipe CRC tap point is after the transcoders we need 2233 * to tweak symbol-level features to produce a deterministic series of 2234 * symbols for a given frame. We need to reset those features only once 2235 * a frame (instead of every nth symbol): 2236 * - DC-balance: used to ensure a better clock recovery from the data 2237 * link (SDVO) 2238 * - DisplayPort scrambling: used for EMI reduction 2239 */ 2240 if (need_stable_symbols) { 2241 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2242 2243 WARN_ON(!IS_G4X(dev)); 2244 2245 I915_WRITE(PORT_DFT_I9XX, 2246 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); 2247 2248 if (pipe == PIPE_A) 2249 tmp |= PIPE_A_SCRAMBLE_RESET; 2250 else 2251 tmp |= PIPE_B_SCRAMBLE_RESET; 2252 2253 I915_WRITE(PORT_DFT2_G4X, tmp); 2254 } 2255 2256 return 0; 2257 } 2258 2259 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, 2260 enum pipe pipe) 2261 { 2262 struct drm_i915_private *dev_priv = dev->dev_private; 2263 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2264 2265 if (pipe == PIPE_A) 2266 tmp &= ~PIPE_A_SCRAMBLE_RESET; 2267 else 2268 tmp &= ~PIPE_B_SCRAMBLE_RESET; 2269 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) 2270 tmp &= ~DC_BALANCE_RESET_VLV; 2271 I915_WRITE(PORT_DFT2_G4X, tmp); 2272 2273 } 2274 2275 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, 2276 enum pipe pipe) 2277 { 2278 struct drm_i915_private *dev_priv = dev->dev_private; 2279 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2280 2281 if (pipe == PIPE_A) 2282 tmp &= ~PIPE_A_SCRAMBLE_RESET; 2283 else 2284 tmp &= ~PIPE_B_SCRAMBLE_RESET; 2285 I915_WRITE(PORT_DFT2_G4X, tmp); 2286 2287 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { 2288 I915_WRITE(PORT_DFT_I9XX, 2289 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); 2290 } 2291 } 2292 2293 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2294 uint32_t *val) 2295 { 2296 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2297 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2298 2299 switch (*source) { 2300 case INTEL_PIPE_CRC_SOURCE_PLANE1: 2301 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; 2302 break; 2303 case INTEL_PIPE_CRC_SOURCE_PLANE2: 2304 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; 2305 break; 2306 case INTEL_PIPE_CRC_SOURCE_PIPE: 2307 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; 2308 break; 2309 case INTEL_PIPE_CRC_SOURCE_NONE: 2310 *val = 0; 2311 break; 2312 default: 2313 return -EINVAL; 2314 } 2315 2316 return 0; 2317 } 2318 2319 static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2320 uint32_t *val) 2321 { 2322 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2323 *source = INTEL_PIPE_CRC_SOURCE_PF; 2324 2325 switch (*source) { 2326 case INTEL_PIPE_CRC_SOURCE_PLANE1: 2327 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; 2328 break; 2329 case INTEL_PIPE_CRC_SOURCE_PLANE2: 2330 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; 2331 break; 2332 case INTEL_PIPE_CRC_SOURCE_PF: 2333 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; 2334 break; 2335 case INTEL_PIPE_CRC_SOURCE_NONE: 2336 *val = 0; 2337 break; 2338 default: 2339 return -EINVAL; 2340 } 2341 2342 return 0; 2343 } 2344 2345 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, 2346 enum intel_pipe_crc_source source) 2347 { 2348 struct drm_i915_private *dev_priv = dev->dev_private; 2349 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 2350 u32 val; 2351 int ret; 2352 2353 if (pipe_crc->source == source) 2354 return 0; 2355 2356 /* forbid changing the source without going back to 'none' */ 2357 if (pipe_crc->source && source) 2358 return -EINVAL; 2359 2360 if (IS_GEN2(dev)) 2361 ret = i8xx_pipe_crc_ctl_reg(&source, &val); 2362 else if (INTEL_INFO(dev)->gen < 5) 2363 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); 2364 else if (IS_VALLEYVIEW(dev)) 2365 ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val); 2366 else if (IS_GEN5(dev) || IS_GEN6(dev)) 2367 ret = ilk_pipe_crc_ctl_reg(&source, &val); 2368 else 2369 ret = ivb_pipe_crc_ctl_reg(&source, &val); 2370 2371 if (ret != 0) 2372 return ret; 2373 2374 /* none -> real source transition */ 2375 if (source) { 2376 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", 2377 pipe_name(pipe), pipe_crc_source_name(source)); 2378 2379 pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) * 2380 INTEL_PIPE_CRC_ENTRIES_NR, 2381 GFP_KERNEL); 2382 if (!pipe_crc->entries) 2383 return -ENOMEM; 2384 2385 spin_lock_irq(&pipe_crc->lock); 2386 pipe_crc->head = 0; 2387 pipe_crc->tail = 0; 2388 spin_unlock_irq(&pipe_crc->lock); 2389 } 2390 2391 pipe_crc->source = source; 2392 2393 I915_WRITE(PIPE_CRC_CTL(pipe), val); 2394 POSTING_READ(PIPE_CRC_CTL(pipe)); 2395 2396 /* real source -> none transition */ 2397 if (source == INTEL_PIPE_CRC_SOURCE_NONE) { 2398 struct intel_pipe_crc_entry *entries; 2399 2400 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", 2401 pipe_name(pipe)); 2402 2403 intel_wait_for_vblank(dev, pipe); 2404 2405 spin_lock_irq(&pipe_crc->lock); 2406 entries = pipe_crc->entries; 2407 pipe_crc->entries = NULL; 2408 spin_unlock_irq(&pipe_crc->lock); 2409 2410 kfree(entries); 2411 2412 if (IS_G4X(dev)) 2413 g4x_undo_pipe_scramble_reset(dev, pipe); 2414 else if (IS_VALLEYVIEW(dev)) 2415 vlv_undo_pipe_scramble_reset(dev, pipe); 2416 } 2417 2418 return 0; 2419 } 2420 2421 /* 2422 * Parse pipe CRC command strings: 2423 * command: wsp* object wsp+ name wsp+ source wsp* 2424 * object: 'pipe' 2425 * name: (A | B | C) 2426 * source: (none | plane1 | plane2 | pf) 2427 * wsp: (#0x20 | #0x9 | #0xA)+ 2428 * 2429 * eg.: 2430 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A 2431 * "pipe A none" -> Stop CRC 2432 */ 2433 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) 2434 { 2435 int n_words = 0; 2436 2437 while (*buf) { 2438 char *end; 2439 2440 /* skip leading white space */ 2441 buf = skip_spaces(buf); 2442 if (!*buf) 2443 break; /* end of buffer */ 2444 2445 /* find end of word */ 2446 for (end = buf; *end && !isspace(*end); end++) 2447 ; 2448 2449 if (n_words == max_words) { 2450 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", 2451 max_words); 2452 return -EINVAL; /* ran out of words[] before bytes */ 2453 } 2454 2455 if (*end) 2456 *end++ = '\0'; 2457 words[n_words++] = buf; 2458 buf = end; 2459 } 2460 2461 return n_words; 2462 } 2463 2464 enum intel_pipe_crc_object { 2465 PIPE_CRC_OBJECT_PIPE, 2466 }; 2467 2468 static const char * const pipe_crc_objects[] = { 2469 "pipe", 2470 }; 2471 2472 static int 2473 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) 2474 { 2475 int i; 2476 2477 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) 2478 if (!strcmp(buf, pipe_crc_objects[i])) { 2479 *o = i; 2480 return 0; 2481 } 2482 2483 return -EINVAL; 2484 } 2485 2486 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) 2487 { 2488 const char name = buf[0]; 2489 2490 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) 2491 return -EINVAL; 2492 2493 *pipe = name - 'A'; 2494 2495 return 0; 2496 } 2497 2498 static int 2499 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) 2500 { 2501 int i; 2502 2503 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) 2504 if (!strcmp(buf, pipe_crc_sources[i])) { 2505 *s = i; 2506 return 0; 2507 } 2508 2509 return -EINVAL; 2510 } 2511 2512 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len) 2513 { 2514 #define N_WORDS 3 2515 int n_words; 2516 char *words[N_WORDS]; 2517 enum pipe pipe; 2518 enum intel_pipe_crc_object object; 2519 enum intel_pipe_crc_source source; 2520 2521 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); 2522 if (n_words != N_WORDS) { 2523 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", 2524 N_WORDS); 2525 return -EINVAL; 2526 } 2527 2528 if (display_crc_ctl_parse_object(words[0], &object) < 0) { 2529 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); 2530 return -EINVAL; 2531 } 2532 2533 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { 2534 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); 2535 return -EINVAL; 2536 } 2537 2538 if (display_crc_ctl_parse_source(words[2], &source) < 0) { 2539 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); 2540 return -EINVAL; 2541 } 2542 2543 return pipe_crc_set_source(dev, pipe, source); 2544 } 2545 2546 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, 2547 size_t len, loff_t *offp) 2548 { 2549 struct seq_file *m = file->private_data; 2550 struct drm_device *dev = m->private; 2551 char *tmpbuf; 2552 int ret; 2553 2554 if (len == 0) 2555 return 0; 2556 2557 if (len > PAGE_SIZE - 1) { 2558 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", 2559 PAGE_SIZE); 2560 return -E2BIG; 2561 } 2562 2563 tmpbuf = kmalloc(len + 1, GFP_KERNEL); 2564 if (!tmpbuf) 2565 return -ENOMEM; 2566 2567 if (copy_from_user(tmpbuf, ubuf, len)) { 2568 ret = -EFAULT; 2569 goto out; 2570 } 2571 tmpbuf[len] = '\0'; 2572 2573 ret = display_crc_ctl_parse(dev, tmpbuf, len); 2574 2575 out: 2576 kfree(tmpbuf); 2577 if (ret < 0) 2578 return ret; 2579 2580 *offp += len; 2581 return len; 2582 } 2583 2584 static const struct file_operations i915_display_crc_ctl_fops = { 2585 .owner = THIS_MODULE, 2586 .open = display_crc_ctl_open, 2587 .read = seq_read, 2588 .llseek = seq_lseek, 2589 .release = single_release, 2590 .write = display_crc_ctl_write 2591 }; 2592 2593 static int 2594 i915_wedged_get(void *data, u64 *val) 2595 { 2596 struct drm_device *dev = data; 2597 drm_i915_private_t *dev_priv = dev->dev_private; 2598 2599 *val = atomic_read(&dev_priv->gpu_error.reset_counter); 2600 2601 return 0; 2602 } 2603 2604 static int 2605 i915_wedged_set(void *data, u64 val) 2606 { 2607 struct drm_device *dev = data; 2608 2609 DRM_INFO("Manually setting wedged to %llu\n", val); 2610 i915_handle_error(dev, val); 2611 2612 return 0; 2613 } 2614 2615 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 2616 i915_wedged_get, i915_wedged_set, 2617 "%llu\n"); 2618 2619 static int 2620 i915_ring_stop_get(void *data, u64 *val) 2621 { 2622 struct drm_device *dev = data; 2623 drm_i915_private_t *dev_priv = dev->dev_private; 2624 2625 *val = dev_priv->gpu_error.stop_rings; 2626 2627 return 0; 2628 } 2629 2630 static int 2631 i915_ring_stop_set(void *data, u64 val) 2632 { 2633 struct drm_device *dev = data; 2634 struct drm_i915_private *dev_priv = dev->dev_private; 2635 int ret; 2636 2637 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); 2638 2639 ret = mutex_lock_interruptible(&dev->struct_mutex); 2640 if (ret) 2641 return ret; 2642 2643 dev_priv->gpu_error.stop_rings = val; 2644 mutex_unlock(&dev->struct_mutex); 2645 2646 return 0; 2647 } 2648 2649 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, 2650 i915_ring_stop_get, i915_ring_stop_set, 2651 "0x%08llx\n"); 2652 2653 static int 2654 i915_ring_missed_irq_get(void *data, u64 *val) 2655 { 2656 struct drm_device *dev = data; 2657 struct drm_i915_private *dev_priv = dev->dev_private; 2658 2659 *val = dev_priv->gpu_error.missed_irq_rings; 2660 return 0; 2661 } 2662 2663 static int 2664 i915_ring_missed_irq_set(void *data, u64 val) 2665 { 2666 struct drm_device *dev = data; 2667 struct drm_i915_private *dev_priv = dev->dev_private; 2668 int ret; 2669 2670 /* Lock against concurrent debugfs callers */ 2671 ret = mutex_lock_interruptible(&dev->struct_mutex); 2672 if (ret) 2673 return ret; 2674 dev_priv->gpu_error.missed_irq_rings = val; 2675 mutex_unlock(&dev->struct_mutex); 2676 2677 return 0; 2678 } 2679 2680 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 2681 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 2682 "0x%08llx\n"); 2683 2684 static int 2685 i915_ring_test_irq_get(void *data, u64 *val) 2686 { 2687 struct drm_device *dev = data; 2688 struct drm_i915_private *dev_priv = dev->dev_private; 2689 2690 *val = dev_priv->gpu_error.test_irq_rings; 2691 2692 return 0; 2693 } 2694 2695 static int 2696 i915_ring_test_irq_set(void *data, u64 val) 2697 { 2698 struct drm_device *dev = data; 2699 struct drm_i915_private *dev_priv = dev->dev_private; 2700 int ret; 2701 2702 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 2703 2704 /* Lock against concurrent debugfs callers */ 2705 ret = mutex_lock_interruptible(&dev->struct_mutex); 2706 if (ret) 2707 return ret; 2708 2709 dev_priv->gpu_error.test_irq_rings = val; 2710 mutex_unlock(&dev->struct_mutex); 2711 2712 return 0; 2713 } 2714 2715 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 2716 i915_ring_test_irq_get, i915_ring_test_irq_set, 2717 "0x%08llx\n"); 2718 2719 #define DROP_UNBOUND 0x1 2720 #define DROP_BOUND 0x2 2721 #define DROP_RETIRE 0x4 2722 #define DROP_ACTIVE 0x8 2723 #define DROP_ALL (DROP_UNBOUND | \ 2724 DROP_BOUND | \ 2725 DROP_RETIRE | \ 2726 DROP_ACTIVE) 2727 static int 2728 i915_drop_caches_get(void *data, u64 *val) 2729 { 2730 *val = DROP_ALL; 2731 2732 return 0; 2733 } 2734 2735 static int 2736 i915_drop_caches_set(void *data, u64 val) 2737 { 2738 struct drm_device *dev = data; 2739 struct drm_i915_private *dev_priv = dev->dev_private; 2740 struct drm_i915_gem_object *obj, *next; 2741 struct i915_address_space *vm; 2742 struct i915_vma *vma, *x; 2743 int ret; 2744 2745 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); 2746 2747 /* No need to check and wait for gpu resets, only libdrm auto-restarts 2748 * on ioctls on -EAGAIN. */ 2749 ret = mutex_lock_interruptible(&dev->struct_mutex); 2750 if (ret) 2751 return ret; 2752 2753 if (val & DROP_ACTIVE) { 2754 ret = i915_gpu_idle(dev); 2755 if (ret) 2756 goto unlock; 2757 } 2758 2759 if (val & (DROP_RETIRE | DROP_ACTIVE)) 2760 i915_gem_retire_requests(dev); 2761 2762 if (val & DROP_BOUND) { 2763 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 2764 list_for_each_entry_safe(vma, x, &vm->inactive_list, 2765 mm_list) { 2766 if (vma->obj->pin_count) 2767 continue; 2768 2769 ret = i915_vma_unbind(vma); 2770 if (ret) 2771 goto unlock; 2772 } 2773 } 2774 } 2775 2776 if (val & DROP_UNBOUND) { 2777 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, 2778 global_list) 2779 if (obj->pages_pin_count == 0) { 2780 ret = i915_gem_object_put_pages(obj); 2781 if (ret) 2782 goto unlock; 2783 } 2784 } 2785 2786 unlock: 2787 mutex_unlock(&dev->struct_mutex); 2788 2789 return ret; 2790 } 2791 2792 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 2793 i915_drop_caches_get, i915_drop_caches_set, 2794 "0x%08llx\n"); 2795 2796 static int 2797 i915_max_freq_get(void *data, u64 *val) 2798 { 2799 struct drm_device *dev = data; 2800 drm_i915_private_t *dev_priv = dev->dev_private; 2801 int ret; 2802 2803 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2804 return -ENODEV; 2805 2806 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 2807 2808 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2809 if (ret) 2810 return ret; 2811 2812 if (IS_VALLEYVIEW(dev)) 2813 *val = vlv_gpu_freq(dev_priv->mem_freq, 2814 dev_priv->rps.max_delay); 2815 else 2816 *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; 2817 mutex_unlock(&dev_priv->rps.hw_lock); 2818 2819 return 0; 2820 } 2821 2822 static int 2823 i915_max_freq_set(void *data, u64 val) 2824 { 2825 struct drm_device *dev = data; 2826 struct drm_i915_private *dev_priv = dev->dev_private; 2827 int ret; 2828 2829 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2830 return -ENODEV; 2831 2832 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 2833 2834 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 2835 2836 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2837 if (ret) 2838 return ret; 2839 2840 /* 2841 * Turbo will still be enabled, but won't go above the set value. 2842 */ 2843 if (IS_VALLEYVIEW(dev)) { 2844 val = vlv_freq_opcode(dev_priv->mem_freq, val); 2845 dev_priv->rps.max_delay = val; 2846 gen6_set_rps(dev, val); 2847 } else { 2848 do_div(val, GT_FREQUENCY_MULTIPLIER); 2849 dev_priv->rps.max_delay = val; 2850 gen6_set_rps(dev, val); 2851 } 2852 2853 mutex_unlock(&dev_priv->rps.hw_lock); 2854 2855 return 0; 2856 } 2857 2858 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 2859 i915_max_freq_get, i915_max_freq_set, 2860 "%llu\n"); 2861 2862 static int 2863 i915_min_freq_get(void *data, u64 *val) 2864 { 2865 struct drm_device *dev = data; 2866 drm_i915_private_t *dev_priv = dev->dev_private; 2867 int ret; 2868 2869 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2870 return -ENODEV; 2871 2872 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 2873 2874 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2875 if (ret) 2876 return ret; 2877 2878 if (IS_VALLEYVIEW(dev)) 2879 *val = vlv_gpu_freq(dev_priv->mem_freq, 2880 dev_priv->rps.min_delay); 2881 else 2882 *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; 2883 mutex_unlock(&dev_priv->rps.hw_lock); 2884 2885 return 0; 2886 } 2887 2888 static int 2889 i915_min_freq_set(void *data, u64 val) 2890 { 2891 struct drm_device *dev = data; 2892 struct drm_i915_private *dev_priv = dev->dev_private; 2893 int ret; 2894 2895 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2896 return -ENODEV; 2897 2898 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 2899 2900 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 2901 2902 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2903 if (ret) 2904 return ret; 2905 2906 /* 2907 * Turbo will still be enabled, but won't go below the set value. 2908 */ 2909 if (IS_VALLEYVIEW(dev)) { 2910 val = vlv_freq_opcode(dev_priv->mem_freq, val); 2911 dev_priv->rps.min_delay = val; 2912 valleyview_set_rps(dev, val); 2913 } else { 2914 do_div(val, GT_FREQUENCY_MULTIPLIER); 2915 dev_priv->rps.min_delay = val; 2916 gen6_set_rps(dev, val); 2917 } 2918 mutex_unlock(&dev_priv->rps.hw_lock); 2919 2920 return 0; 2921 } 2922 2923 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 2924 i915_min_freq_get, i915_min_freq_set, 2925 "%llu\n"); 2926 2927 static int 2928 i915_cache_sharing_get(void *data, u64 *val) 2929 { 2930 struct drm_device *dev = data; 2931 drm_i915_private_t *dev_priv = dev->dev_private; 2932 u32 snpcr; 2933 int ret; 2934 2935 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2936 return -ENODEV; 2937 2938 ret = mutex_lock_interruptible(&dev->struct_mutex); 2939 if (ret) 2940 return ret; 2941 2942 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 2943 mutex_unlock(&dev_priv->dev->struct_mutex); 2944 2945 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 2946 2947 return 0; 2948 } 2949 2950 static int 2951 i915_cache_sharing_set(void *data, u64 val) 2952 { 2953 struct drm_device *dev = data; 2954 struct drm_i915_private *dev_priv = dev->dev_private; 2955 u32 snpcr; 2956 2957 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2958 return -ENODEV; 2959 2960 if (val > 3) 2961 return -EINVAL; 2962 2963 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 2964 2965 /* Update the cache sharing policy here as well */ 2966 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 2967 snpcr &= ~GEN6_MBC_SNPCR_MASK; 2968 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 2969 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 2970 2971 return 0; 2972 } 2973 2974 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 2975 i915_cache_sharing_get, i915_cache_sharing_set, 2976 "%llu\n"); 2977 2978 static int i915_forcewake_open(struct inode *inode, struct file *file) 2979 { 2980 struct drm_device *dev = inode->i_private; 2981 struct drm_i915_private *dev_priv = dev->dev_private; 2982 2983 if (INTEL_INFO(dev)->gen < 6) 2984 return 0; 2985 2986 gen6_gt_force_wake_get(dev_priv); 2987 2988 return 0; 2989 } 2990 2991 static int i915_forcewake_release(struct inode *inode, struct file *file) 2992 { 2993 struct drm_device *dev = inode->i_private; 2994 struct drm_i915_private *dev_priv = dev->dev_private; 2995 2996 if (INTEL_INFO(dev)->gen < 6) 2997 return 0; 2998 2999 gen6_gt_force_wake_put(dev_priv); 3000 3001 return 0; 3002 } 3003 3004 static const struct file_operations i915_forcewake_fops = { 3005 .owner = THIS_MODULE, 3006 .open = i915_forcewake_open, 3007 .release = i915_forcewake_release, 3008 }; 3009 3010 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 3011 { 3012 struct drm_device *dev = minor->dev; 3013 struct dentry *ent; 3014 3015 ent = debugfs_create_file("i915_forcewake_user", 3016 S_IRUSR, 3017 root, dev, 3018 &i915_forcewake_fops); 3019 if (IS_ERR(ent)) 3020 return PTR_ERR(ent); 3021 3022 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 3023 } 3024 3025 static int i915_debugfs_create(struct dentry *root, 3026 struct drm_minor *minor, 3027 const char *name, 3028 const struct file_operations *fops) 3029 { 3030 struct drm_device *dev = minor->dev; 3031 struct dentry *ent; 3032 3033 ent = debugfs_create_file(name, 3034 S_IRUGO | S_IWUSR, 3035 root, dev, 3036 fops); 3037 if (IS_ERR(ent)) 3038 return PTR_ERR(ent); 3039 3040 return drm_add_fake_info_node(minor, ent, fops); 3041 } 3042 3043 static const struct drm_info_list i915_debugfs_list[] = { 3044 {"i915_capabilities", i915_capabilities, 0}, 3045 {"i915_gem_objects", i915_gem_object_info, 0}, 3046 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 3047 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 3048 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 3049 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 3050 {"i915_gem_stolen", i915_gem_stolen_list_info }, 3051 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 3052 {"i915_gem_request", i915_gem_request_info, 0}, 3053 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 3054 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 3055 {"i915_gem_interrupt", i915_interrupt_info, 0}, 3056 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 3057 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 3058 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 3059 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 3060 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 3061 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 3062 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 3063 {"i915_inttoext_table", i915_inttoext_table, 0}, 3064 {"i915_drpc_info", i915_drpc_info, 0}, 3065 {"i915_emon_status", i915_emon_status, 0}, 3066 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 3067 {"i915_gfxec", i915_gfxec, 0}, 3068 {"i915_fbc_status", i915_fbc_status, 0}, 3069 {"i915_ips_status", i915_ips_status, 0}, 3070 {"i915_sr_status", i915_sr_status, 0}, 3071 {"i915_opregion", i915_opregion, 0}, 3072 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 3073 {"i915_context_status", i915_context_status, 0}, 3074 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 3075 {"i915_swizzle_info", i915_swizzle_info, 0}, 3076 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 3077 {"i915_dpio", i915_dpio_info, 0}, 3078 {"i915_llc", i915_llc, 0}, 3079 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 3080 {"i915_energy_uJ", i915_energy_uJ, 0}, 3081 {"i915_pc8_status", i915_pc8_status, 0}, 3082 }; 3083 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 3084 3085 static const struct i915_debugfs_files { 3086 const char *name; 3087 const struct file_operations *fops; 3088 } i915_debugfs_files[] = { 3089 {"i915_wedged", &i915_wedged_fops}, 3090 {"i915_max_freq", &i915_max_freq_fops}, 3091 {"i915_min_freq", &i915_min_freq_fops}, 3092 {"i915_cache_sharing", &i915_cache_sharing_fops}, 3093 {"i915_ring_stop", &i915_ring_stop_fops}, 3094 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 3095 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 3096 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 3097 {"i915_error_state", &i915_error_state_fops}, 3098 {"i915_next_seqno", &i915_next_seqno_fops}, 3099 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 3100 }; 3101 3102 void intel_display_crc_init(struct drm_device *dev) 3103 { 3104 struct drm_i915_private *dev_priv = dev->dev_private; 3105 int i; 3106 3107 for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { 3108 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i]; 3109 3110 pipe_crc->opened = false; 3111 spin_lock_init(&pipe_crc->lock); 3112 init_waitqueue_head(&pipe_crc->wq); 3113 } 3114 } 3115 3116 int i915_debugfs_init(struct drm_minor *minor) 3117 { 3118 int ret, i; 3119 3120 ret = i915_forcewake_create(minor->debugfs_root, minor); 3121 if (ret) 3122 return ret; 3123 3124 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 3125 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); 3126 if (ret) 3127 return ret; 3128 } 3129 3130 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 3131 ret = i915_debugfs_create(minor->debugfs_root, minor, 3132 i915_debugfs_files[i].name, 3133 i915_debugfs_files[i].fops); 3134 if (ret) 3135 return ret; 3136 } 3137 3138 return drm_debugfs_create_files(i915_debugfs_list, 3139 I915_DEBUGFS_ENTRIES, 3140 minor->debugfs_root, minor); 3141 } 3142 3143 void i915_debugfs_cleanup(struct drm_minor *minor) 3144 { 3145 int i; 3146 3147 drm_debugfs_remove_files(i915_debugfs_list, 3148 I915_DEBUGFS_ENTRIES, minor); 3149 3150 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 3151 1, minor); 3152 3153 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 3154 struct drm_info_list *info_list = 3155 (struct drm_info_list *)&i915_pipe_crc_data[i]; 3156 3157 drm_debugfs_remove_files(info_list, 1, minor); 3158 } 3159 3160 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 3161 struct drm_info_list *info_list = 3162 (struct drm_info_list *) i915_debugfs_files[i].fops; 3163 3164 drm_debugfs_remove_files(info_list, 1, minor); 3165 } 3166 } 3167 3168 #endif /* CONFIG_DEBUG_FS */ 3169