1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/debugfs.h> 31 #include <linux/slab.h> 32 #include <linux/export.h> 33 #include <generated/utsrelease.h> 34 #include <drm/drmP.h> 35 #include "intel_drv.h" 36 #include "intel_ringbuffer.h" 37 #include <drm/i915_drm.h> 38 #include "i915_drv.h" 39 40 #define DRM_I915_RING_DEBUG 1 41 42 43 #if defined(CONFIG_DEBUG_FS) 44 45 enum { 46 ACTIVE_LIST, 47 INACTIVE_LIST, 48 PINNED_LIST, 49 }; 50 51 static const char *yesno(int v) 52 { 53 return v ? "yes" : "no"; 54 } 55 56 static int i915_capabilities(struct seq_file *m, void *data) 57 { 58 struct drm_info_node *node = (struct drm_info_node *) m->private; 59 struct drm_device *dev = node->minor->dev; 60 const struct intel_device_info *info = INTEL_INFO(dev); 61 62 seq_printf(m, "gen: %d\n", info->gen); 63 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 64 #define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 65 #define DEV_INFO_SEP ; 66 DEV_INFO_FLAGS; 67 #undef DEV_INFO_FLAG 68 #undef DEV_INFO_SEP 69 70 return 0; 71 } 72 73 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 74 { 75 if (obj->user_pin_count > 0) 76 return "P"; 77 else if (obj->pin_count > 0) 78 return "p"; 79 else 80 return " "; 81 } 82 83 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 84 { 85 switch (obj->tiling_mode) { 86 default: 87 case I915_TILING_NONE: return " "; 88 case I915_TILING_X: return "X"; 89 case I915_TILING_Y: return "Y"; 90 } 91 } 92 93 static const char *cache_level_str(int type) 94 { 95 switch (type) { 96 case I915_CACHE_NONE: return " uncached"; 97 case I915_CACHE_LLC: return " snooped (LLC)"; 98 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; 99 default: return ""; 100 } 101 } 102 103 static void 104 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 105 { 106 seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s", 107 &obj->base, 108 get_pin_flag(obj), 109 get_tiling_flag(obj), 110 obj->base.size / 1024, 111 obj->base.read_domains, 112 obj->base.write_domain, 113 obj->last_read_seqno, 114 obj->last_write_seqno, 115 obj->last_fenced_seqno, 116 cache_level_str(obj->cache_level), 117 obj->dirty ? " dirty" : "", 118 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 119 if (obj->base.name) 120 seq_printf(m, " (name: %d)", obj->base.name); 121 if (obj->pin_count) 122 seq_printf(m, " (pinned x %d)", obj->pin_count); 123 if (obj->fence_reg != I915_FENCE_REG_NONE) 124 seq_printf(m, " (fence: %d)", obj->fence_reg); 125 if (obj->gtt_space != NULL) 126 seq_printf(m, " (gtt offset: %08x, size: %08x)", 127 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 128 if (obj->pin_mappable || obj->fault_mappable) { 129 char s[3], *t = s; 130 if (obj->pin_mappable) 131 *t++ = 'p'; 132 if (obj->fault_mappable) 133 *t++ = 'f'; 134 *t = '\0'; 135 seq_printf(m, " (%s mappable)", s); 136 } 137 if (obj->ring != NULL) 138 seq_printf(m, " (%s)", obj->ring->name); 139 } 140 141 static int i915_gem_object_list_info(struct seq_file *m, void *data) 142 { 143 struct drm_info_node *node = (struct drm_info_node *) m->private; 144 uintptr_t list = (uintptr_t) node->info_ent->data; 145 struct list_head *head; 146 struct drm_device *dev = node->minor->dev; 147 drm_i915_private_t *dev_priv = dev->dev_private; 148 struct drm_i915_gem_object *obj; 149 size_t total_obj_size, total_gtt_size; 150 int count, ret; 151 152 ret = mutex_lock_interruptible(&dev->struct_mutex); 153 if (ret) 154 return ret; 155 156 switch (list) { 157 case ACTIVE_LIST: 158 seq_printf(m, "Active:\n"); 159 head = &dev_priv->mm.active_list; 160 break; 161 case INACTIVE_LIST: 162 seq_printf(m, "Inactive:\n"); 163 head = &dev_priv->mm.inactive_list; 164 break; 165 default: 166 mutex_unlock(&dev->struct_mutex); 167 return -EINVAL; 168 } 169 170 total_obj_size = total_gtt_size = count = 0; 171 list_for_each_entry(obj, head, mm_list) { 172 seq_printf(m, " "); 173 describe_obj(m, obj); 174 seq_printf(m, "\n"); 175 total_obj_size += obj->base.size; 176 total_gtt_size += obj->gtt_space->size; 177 count++; 178 } 179 mutex_unlock(&dev->struct_mutex); 180 181 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 182 count, total_obj_size, total_gtt_size); 183 return 0; 184 } 185 186 #define count_objects(list, member) do { \ 187 list_for_each_entry(obj, list, member) { \ 188 size += obj->gtt_space->size; \ 189 ++count; \ 190 if (obj->map_and_fenceable) { \ 191 mappable_size += obj->gtt_space->size; \ 192 ++mappable_count; \ 193 } \ 194 } \ 195 } while (0) 196 197 static int i915_gem_object_info(struct seq_file *m, void* data) 198 { 199 struct drm_info_node *node = (struct drm_info_node *) m->private; 200 struct drm_device *dev = node->minor->dev; 201 struct drm_i915_private *dev_priv = dev->dev_private; 202 u32 count, mappable_count, purgeable_count; 203 size_t size, mappable_size, purgeable_size; 204 struct drm_i915_gem_object *obj; 205 int ret; 206 207 ret = mutex_lock_interruptible(&dev->struct_mutex); 208 if (ret) 209 return ret; 210 211 seq_printf(m, "%u objects, %zu bytes\n", 212 dev_priv->mm.object_count, 213 dev_priv->mm.object_memory); 214 215 size = count = mappable_size = mappable_count = 0; 216 count_objects(&dev_priv->mm.bound_list, gtt_list); 217 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 218 count, mappable_count, size, mappable_size); 219 220 size = count = mappable_size = mappable_count = 0; 221 count_objects(&dev_priv->mm.active_list, mm_list); 222 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 223 count, mappable_count, size, mappable_size); 224 225 size = count = mappable_size = mappable_count = 0; 226 count_objects(&dev_priv->mm.inactive_list, mm_list); 227 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 228 count, mappable_count, size, mappable_size); 229 230 size = count = purgeable_size = purgeable_count = 0; 231 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) { 232 size += obj->base.size, ++count; 233 if (obj->madv == I915_MADV_DONTNEED) 234 purgeable_size += obj->base.size, ++purgeable_count; 235 } 236 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); 237 238 size = count = mappable_size = mappable_count = 0; 239 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 240 if (obj->fault_mappable) { 241 size += obj->gtt_space->size; 242 ++count; 243 } 244 if (obj->pin_mappable) { 245 mappable_size += obj->gtt_space->size; 246 ++mappable_count; 247 } 248 if (obj->madv == I915_MADV_DONTNEED) { 249 purgeable_size += obj->base.size; 250 ++purgeable_count; 251 } 252 } 253 seq_printf(m, "%u purgeable objects, %zu bytes\n", 254 purgeable_count, purgeable_size); 255 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 256 mappable_count, mappable_size); 257 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 258 count, size); 259 260 seq_printf(m, "%zu [%zu] gtt total\n", 261 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); 262 263 mutex_unlock(&dev->struct_mutex); 264 265 return 0; 266 } 267 268 static int i915_gem_gtt_info(struct seq_file *m, void* data) 269 { 270 struct drm_info_node *node = (struct drm_info_node *) m->private; 271 struct drm_device *dev = node->minor->dev; 272 uintptr_t list = (uintptr_t) node->info_ent->data; 273 struct drm_i915_private *dev_priv = dev->dev_private; 274 struct drm_i915_gem_object *obj; 275 size_t total_obj_size, total_gtt_size; 276 int count, ret; 277 278 ret = mutex_lock_interruptible(&dev->struct_mutex); 279 if (ret) 280 return ret; 281 282 total_obj_size = total_gtt_size = count = 0; 283 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 284 if (list == PINNED_LIST && obj->pin_count == 0) 285 continue; 286 287 seq_printf(m, " "); 288 describe_obj(m, obj); 289 seq_printf(m, "\n"); 290 total_obj_size += obj->base.size; 291 total_gtt_size += obj->gtt_space->size; 292 count++; 293 } 294 295 mutex_unlock(&dev->struct_mutex); 296 297 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 298 count, total_obj_size, total_gtt_size); 299 300 return 0; 301 } 302 303 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 304 { 305 struct drm_info_node *node = (struct drm_info_node *) m->private; 306 struct drm_device *dev = node->minor->dev; 307 unsigned long flags; 308 struct intel_crtc *crtc; 309 310 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 311 const char pipe = pipe_name(crtc->pipe); 312 const char plane = plane_name(crtc->plane); 313 struct intel_unpin_work *work; 314 315 spin_lock_irqsave(&dev->event_lock, flags); 316 work = crtc->unpin_work; 317 if (work == NULL) { 318 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 319 pipe, plane); 320 } else { 321 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 322 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 323 pipe, plane); 324 } else { 325 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 326 pipe, plane); 327 } 328 if (work->enable_stall_check) 329 seq_printf(m, "Stall check enabled, "); 330 else 331 seq_printf(m, "Stall check waiting for page flip ioctl, "); 332 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 333 334 if (work->old_fb_obj) { 335 struct drm_i915_gem_object *obj = work->old_fb_obj; 336 if (obj) 337 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 338 } 339 if (work->pending_flip_obj) { 340 struct drm_i915_gem_object *obj = work->pending_flip_obj; 341 if (obj) 342 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 343 } 344 } 345 spin_unlock_irqrestore(&dev->event_lock, flags); 346 } 347 348 return 0; 349 } 350 351 static int i915_gem_request_info(struct seq_file *m, void *data) 352 { 353 struct drm_info_node *node = (struct drm_info_node *) m->private; 354 struct drm_device *dev = node->minor->dev; 355 drm_i915_private_t *dev_priv = dev->dev_private; 356 struct intel_ring_buffer *ring; 357 struct drm_i915_gem_request *gem_request; 358 int ret, count, i; 359 360 ret = mutex_lock_interruptible(&dev->struct_mutex); 361 if (ret) 362 return ret; 363 364 count = 0; 365 for_each_ring(ring, dev_priv, i) { 366 if (list_empty(&ring->request_list)) 367 continue; 368 369 seq_printf(m, "%s requests:\n", ring->name); 370 list_for_each_entry(gem_request, 371 &ring->request_list, 372 list) { 373 seq_printf(m, " %d @ %d\n", 374 gem_request->seqno, 375 (int) (jiffies - gem_request->emitted_jiffies)); 376 } 377 count++; 378 } 379 mutex_unlock(&dev->struct_mutex); 380 381 if (count == 0) 382 seq_printf(m, "No requests\n"); 383 384 return 0; 385 } 386 387 static void i915_ring_seqno_info(struct seq_file *m, 388 struct intel_ring_buffer *ring) 389 { 390 if (ring->get_seqno) { 391 seq_printf(m, "Current sequence (%s): %d\n", 392 ring->name, ring->get_seqno(ring, false)); 393 } 394 } 395 396 static int i915_gem_seqno_info(struct seq_file *m, void *data) 397 { 398 struct drm_info_node *node = (struct drm_info_node *) m->private; 399 struct drm_device *dev = node->minor->dev; 400 drm_i915_private_t *dev_priv = dev->dev_private; 401 struct intel_ring_buffer *ring; 402 int ret, i; 403 404 ret = mutex_lock_interruptible(&dev->struct_mutex); 405 if (ret) 406 return ret; 407 408 for_each_ring(ring, dev_priv, i) 409 i915_ring_seqno_info(m, ring); 410 411 mutex_unlock(&dev->struct_mutex); 412 413 return 0; 414 } 415 416 417 static int i915_interrupt_info(struct seq_file *m, void *data) 418 { 419 struct drm_info_node *node = (struct drm_info_node *) m->private; 420 struct drm_device *dev = node->minor->dev; 421 drm_i915_private_t *dev_priv = dev->dev_private; 422 struct intel_ring_buffer *ring; 423 int ret, i, pipe; 424 425 ret = mutex_lock_interruptible(&dev->struct_mutex); 426 if (ret) 427 return ret; 428 429 if (IS_VALLEYVIEW(dev)) { 430 seq_printf(m, "Display IER:\t%08x\n", 431 I915_READ(VLV_IER)); 432 seq_printf(m, "Display IIR:\t%08x\n", 433 I915_READ(VLV_IIR)); 434 seq_printf(m, "Display IIR_RW:\t%08x\n", 435 I915_READ(VLV_IIR_RW)); 436 seq_printf(m, "Display IMR:\t%08x\n", 437 I915_READ(VLV_IMR)); 438 for_each_pipe(pipe) 439 seq_printf(m, "Pipe %c stat:\t%08x\n", 440 pipe_name(pipe), 441 I915_READ(PIPESTAT(pipe))); 442 443 seq_printf(m, "Master IER:\t%08x\n", 444 I915_READ(VLV_MASTER_IER)); 445 446 seq_printf(m, "Render IER:\t%08x\n", 447 I915_READ(GTIER)); 448 seq_printf(m, "Render IIR:\t%08x\n", 449 I915_READ(GTIIR)); 450 seq_printf(m, "Render IMR:\t%08x\n", 451 I915_READ(GTIMR)); 452 453 seq_printf(m, "PM IER:\t\t%08x\n", 454 I915_READ(GEN6_PMIER)); 455 seq_printf(m, "PM IIR:\t\t%08x\n", 456 I915_READ(GEN6_PMIIR)); 457 seq_printf(m, "PM IMR:\t\t%08x\n", 458 I915_READ(GEN6_PMIMR)); 459 460 seq_printf(m, "Port hotplug:\t%08x\n", 461 I915_READ(PORT_HOTPLUG_EN)); 462 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 463 I915_READ(VLV_DPFLIPSTAT)); 464 seq_printf(m, "DPINVGTT:\t%08x\n", 465 I915_READ(DPINVGTT)); 466 467 } else if (!HAS_PCH_SPLIT(dev)) { 468 seq_printf(m, "Interrupt enable: %08x\n", 469 I915_READ(IER)); 470 seq_printf(m, "Interrupt identity: %08x\n", 471 I915_READ(IIR)); 472 seq_printf(m, "Interrupt mask: %08x\n", 473 I915_READ(IMR)); 474 for_each_pipe(pipe) 475 seq_printf(m, "Pipe %c stat: %08x\n", 476 pipe_name(pipe), 477 I915_READ(PIPESTAT(pipe))); 478 } else { 479 seq_printf(m, "North Display Interrupt enable: %08x\n", 480 I915_READ(DEIER)); 481 seq_printf(m, "North Display Interrupt identity: %08x\n", 482 I915_READ(DEIIR)); 483 seq_printf(m, "North Display Interrupt mask: %08x\n", 484 I915_READ(DEIMR)); 485 seq_printf(m, "South Display Interrupt enable: %08x\n", 486 I915_READ(SDEIER)); 487 seq_printf(m, "South Display Interrupt identity: %08x\n", 488 I915_READ(SDEIIR)); 489 seq_printf(m, "South Display Interrupt mask: %08x\n", 490 I915_READ(SDEIMR)); 491 seq_printf(m, "Graphics Interrupt enable: %08x\n", 492 I915_READ(GTIER)); 493 seq_printf(m, "Graphics Interrupt identity: %08x\n", 494 I915_READ(GTIIR)); 495 seq_printf(m, "Graphics Interrupt mask: %08x\n", 496 I915_READ(GTIMR)); 497 } 498 seq_printf(m, "Interrupts received: %d\n", 499 atomic_read(&dev_priv->irq_received)); 500 for_each_ring(ring, dev_priv, i) { 501 if (IS_GEN6(dev) || IS_GEN7(dev)) { 502 seq_printf(m, 503 "Graphics Interrupt mask (%s): %08x\n", 504 ring->name, I915_READ_IMR(ring)); 505 } 506 i915_ring_seqno_info(m, ring); 507 } 508 mutex_unlock(&dev->struct_mutex); 509 510 return 0; 511 } 512 513 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 514 { 515 struct drm_info_node *node = (struct drm_info_node *) m->private; 516 struct drm_device *dev = node->minor->dev; 517 drm_i915_private_t *dev_priv = dev->dev_private; 518 int i, ret; 519 520 ret = mutex_lock_interruptible(&dev->struct_mutex); 521 if (ret) 522 return ret; 523 524 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 525 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 526 for (i = 0; i < dev_priv->num_fence_regs; i++) { 527 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 528 529 seq_printf(m, "Fence %d, pin count = %d, object = ", 530 i, dev_priv->fence_regs[i].pin_count); 531 if (obj == NULL) 532 seq_printf(m, "unused"); 533 else 534 describe_obj(m, obj); 535 seq_printf(m, "\n"); 536 } 537 538 mutex_unlock(&dev->struct_mutex); 539 return 0; 540 } 541 542 static int i915_hws_info(struct seq_file *m, void *data) 543 { 544 struct drm_info_node *node = (struct drm_info_node *) m->private; 545 struct drm_device *dev = node->minor->dev; 546 drm_i915_private_t *dev_priv = dev->dev_private; 547 struct intel_ring_buffer *ring; 548 const volatile u32 __iomem *hws; 549 int i; 550 551 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 552 hws = (volatile u32 __iomem *)ring->status_page.page_addr; 553 if (hws == NULL) 554 return 0; 555 556 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 557 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 558 i * 4, 559 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 560 } 561 return 0; 562 } 563 564 static const char *ring_str(int ring) 565 { 566 switch (ring) { 567 case RCS: return "render"; 568 case VCS: return "bsd"; 569 case BCS: return "blt"; 570 default: return ""; 571 } 572 } 573 574 static const char *pin_flag(int pinned) 575 { 576 if (pinned > 0) 577 return " P"; 578 else if (pinned < 0) 579 return " p"; 580 else 581 return ""; 582 } 583 584 static const char *tiling_flag(int tiling) 585 { 586 switch (tiling) { 587 default: 588 case I915_TILING_NONE: return ""; 589 case I915_TILING_X: return " X"; 590 case I915_TILING_Y: return " Y"; 591 } 592 } 593 594 static const char *dirty_flag(int dirty) 595 { 596 return dirty ? " dirty" : ""; 597 } 598 599 static const char *purgeable_flag(int purgeable) 600 { 601 return purgeable ? " purgeable" : ""; 602 } 603 604 static void print_error_buffers(struct seq_file *m, 605 const char *name, 606 struct drm_i915_error_buffer *err, 607 int count) 608 { 609 seq_printf(m, "%s [%d]:\n", name, count); 610 611 while (count--) { 612 seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s", 613 err->gtt_offset, 614 err->size, 615 err->read_domains, 616 err->write_domain, 617 err->rseqno, err->wseqno, 618 pin_flag(err->pinned), 619 tiling_flag(err->tiling), 620 dirty_flag(err->dirty), 621 purgeable_flag(err->purgeable), 622 err->ring != -1 ? " " : "", 623 ring_str(err->ring), 624 cache_level_str(err->cache_level)); 625 626 if (err->name) 627 seq_printf(m, " (name: %d)", err->name); 628 if (err->fence_reg != I915_FENCE_REG_NONE) 629 seq_printf(m, " (fence: %d)", err->fence_reg); 630 631 seq_printf(m, "\n"); 632 err++; 633 } 634 } 635 636 static void i915_ring_error_state(struct seq_file *m, 637 struct drm_device *dev, 638 struct drm_i915_error_state *error, 639 unsigned ring) 640 { 641 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ 642 seq_printf(m, "%s command stream:\n", ring_str(ring)); 643 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 644 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 645 seq_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); 646 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 647 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 648 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 649 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 650 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) 651 seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); 652 653 if (INTEL_INFO(dev)->gen >= 4) 654 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 655 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 656 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); 657 if (INTEL_INFO(dev)->gen >= 6) { 658 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); 659 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 660 seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", 661 error->semaphore_mboxes[ring][0], 662 error->semaphore_seqno[ring][0]); 663 seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", 664 error->semaphore_mboxes[ring][1], 665 error->semaphore_seqno[ring][1]); 666 } 667 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 668 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 669 seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 670 seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 671 } 672 673 struct i915_error_state_file_priv { 674 struct drm_device *dev; 675 struct drm_i915_error_state *error; 676 }; 677 678 static int i915_error_state(struct seq_file *m, void *unused) 679 { 680 struct i915_error_state_file_priv *error_priv = m->private; 681 struct drm_device *dev = error_priv->dev; 682 drm_i915_private_t *dev_priv = dev->dev_private; 683 struct drm_i915_error_state *error = error_priv->error; 684 struct intel_ring_buffer *ring; 685 int i, j, page, offset, elt; 686 687 if (!error) { 688 seq_printf(m, "no error state collected\n"); 689 return 0; 690 } 691 692 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 693 error->time.tv_usec); 694 seq_printf(m, "Kernel: " UTS_RELEASE); 695 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 696 seq_printf(m, "EIR: 0x%08x\n", error->eir); 697 seq_printf(m, "IER: 0x%08x\n", error->ier); 698 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 699 seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); 700 seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr); 701 seq_printf(m, "CCID: 0x%08x\n", error->ccid); 702 703 for (i = 0; i < dev_priv->num_fence_regs; i++) 704 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 705 706 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) 707 seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]); 708 709 if (INTEL_INFO(dev)->gen >= 6) { 710 seq_printf(m, "ERROR: 0x%08x\n", error->error); 711 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 712 } 713 714 if (INTEL_INFO(dev)->gen == 7) 715 seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 716 717 for_each_ring(ring, dev_priv, i) 718 i915_ring_error_state(m, dev, error, i); 719 720 if (error->active_bo) 721 print_error_buffers(m, "Active", 722 error->active_bo, 723 error->active_bo_count); 724 725 if (error->pinned_bo) 726 print_error_buffers(m, "Pinned", 727 error->pinned_bo, 728 error->pinned_bo_count); 729 730 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 731 struct drm_i915_error_object *obj; 732 733 if ((obj = error->ring[i].batchbuffer)) { 734 seq_printf(m, "%s --- gtt_offset = 0x%08x\n", 735 dev_priv->ring[i].name, 736 obj->gtt_offset); 737 offset = 0; 738 for (page = 0; page < obj->page_count; page++) { 739 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 740 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); 741 offset += 4; 742 } 743 } 744 } 745 746 if (error->ring[i].num_requests) { 747 seq_printf(m, "%s --- %d requests\n", 748 dev_priv->ring[i].name, 749 error->ring[i].num_requests); 750 for (j = 0; j < error->ring[i].num_requests; j++) { 751 seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", 752 error->ring[i].requests[j].seqno, 753 error->ring[i].requests[j].jiffies, 754 error->ring[i].requests[j].tail); 755 } 756 } 757 758 if ((obj = error->ring[i].ringbuffer)) { 759 seq_printf(m, "%s --- ringbuffer = 0x%08x\n", 760 dev_priv->ring[i].name, 761 obj->gtt_offset); 762 offset = 0; 763 for (page = 0; page < obj->page_count; page++) { 764 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 765 seq_printf(m, "%08x : %08x\n", 766 offset, 767 obj->pages[page][elt]); 768 offset += 4; 769 } 770 } 771 } 772 } 773 774 if (error->overlay) 775 intel_overlay_print_error_state(m, error->overlay); 776 777 if (error->display) 778 intel_display_print_error_state(m, dev, error->display); 779 780 return 0; 781 } 782 783 static ssize_t 784 i915_error_state_write(struct file *filp, 785 const char __user *ubuf, 786 size_t cnt, 787 loff_t *ppos) 788 { 789 struct seq_file *m = filp->private_data; 790 struct i915_error_state_file_priv *error_priv = m->private; 791 struct drm_device *dev = error_priv->dev; 792 int ret; 793 794 DRM_DEBUG_DRIVER("Resetting error state\n"); 795 796 ret = mutex_lock_interruptible(&dev->struct_mutex); 797 if (ret) 798 return ret; 799 800 i915_destroy_error_state(dev); 801 mutex_unlock(&dev->struct_mutex); 802 803 return cnt; 804 } 805 806 static int i915_error_state_open(struct inode *inode, struct file *file) 807 { 808 struct drm_device *dev = inode->i_private; 809 drm_i915_private_t *dev_priv = dev->dev_private; 810 struct i915_error_state_file_priv *error_priv; 811 unsigned long flags; 812 813 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 814 if (!error_priv) 815 return -ENOMEM; 816 817 error_priv->dev = dev; 818 819 spin_lock_irqsave(&dev_priv->error_lock, flags); 820 error_priv->error = dev_priv->first_error; 821 if (error_priv->error) 822 kref_get(&error_priv->error->ref); 823 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 824 825 return single_open(file, i915_error_state, error_priv); 826 } 827 828 static int i915_error_state_release(struct inode *inode, struct file *file) 829 { 830 struct seq_file *m = file->private_data; 831 struct i915_error_state_file_priv *error_priv = m->private; 832 833 if (error_priv->error) 834 kref_put(&error_priv->error->ref, i915_error_state_free); 835 kfree(error_priv); 836 837 return single_release(inode, file); 838 } 839 840 static const struct file_operations i915_error_state_fops = { 841 .owner = THIS_MODULE, 842 .open = i915_error_state_open, 843 .read = seq_read, 844 .write = i915_error_state_write, 845 .llseek = default_llseek, 846 .release = i915_error_state_release, 847 }; 848 849 static int i915_rstdby_delays(struct seq_file *m, void *unused) 850 { 851 struct drm_info_node *node = (struct drm_info_node *) m->private; 852 struct drm_device *dev = node->minor->dev; 853 drm_i915_private_t *dev_priv = dev->dev_private; 854 u16 crstanddelay; 855 int ret; 856 857 ret = mutex_lock_interruptible(&dev->struct_mutex); 858 if (ret) 859 return ret; 860 861 crstanddelay = I915_READ16(CRSTANDVID); 862 863 mutex_unlock(&dev->struct_mutex); 864 865 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 866 867 return 0; 868 } 869 870 static int i915_cur_delayinfo(struct seq_file *m, void *unused) 871 { 872 struct drm_info_node *node = (struct drm_info_node *) m->private; 873 struct drm_device *dev = node->minor->dev; 874 drm_i915_private_t *dev_priv = dev->dev_private; 875 int ret; 876 877 if (IS_GEN5(dev)) { 878 u16 rgvswctl = I915_READ16(MEMSWCTL); 879 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 880 881 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 882 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 883 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 884 MEMSTAT_VID_SHIFT); 885 seq_printf(m, "Current P-state: %d\n", 886 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 887 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 888 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 889 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 890 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 891 u32 rpstat; 892 u32 rpupei, rpcurup, rpprevup; 893 u32 rpdownei, rpcurdown, rpprevdown; 894 int max_freq; 895 896 /* RPSTAT1 is in the GT power well */ 897 ret = mutex_lock_interruptible(&dev->struct_mutex); 898 if (ret) 899 return ret; 900 901 gen6_gt_force_wake_get(dev_priv); 902 903 rpstat = I915_READ(GEN6_RPSTAT1); 904 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 905 rpcurup = I915_READ(GEN6_RP_CUR_UP); 906 rpprevup = I915_READ(GEN6_RP_PREV_UP); 907 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 908 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 909 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 910 911 gen6_gt_force_wake_put(dev_priv); 912 mutex_unlock(&dev->struct_mutex); 913 914 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 915 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 916 seq_printf(m, "Render p-state ratio: %d\n", 917 (gt_perf_status & 0xff00) >> 8); 918 seq_printf(m, "Render p-state VID: %d\n", 919 gt_perf_status & 0xff); 920 seq_printf(m, "Render p-state limit: %d\n", 921 rp_state_limits & 0xff); 922 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 923 GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER); 924 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 925 GEN6_CURICONT_MASK); 926 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 927 GEN6_CURBSYTAVG_MASK); 928 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 929 GEN6_CURBSYTAVG_MASK); 930 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 931 GEN6_CURIAVG_MASK); 932 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 933 GEN6_CURBSYTAVG_MASK); 934 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 935 GEN6_CURBSYTAVG_MASK); 936 937 max_freq = (rp_state_cap & 0xff0000) >> 16; 938 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 939 max_freq * GT_FREQUENCY_MULTIPLIER); 940 941 max_freq = (rp_state_cap & 0xff00) >> 8; 942 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 943 max_freq * GT_FREQUENCY_MULTIPLIER); 944 945 max_freq = rp_state_cap & 0xff; 946 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 947 max_freq * GT_FREQUENCY_MULTIPLIER); 948 } else { 949 seq_printf(m, "no P-state info available\n"); 950 } 951 952 return 0; 953 } 954 955 static int i915_delayfreq_table(struct seq_file *m, void *unused) 956 { 957 struct drm_info_node *node = (struct drm_info_node *) m->private; 958 struct drm_device *dev = node->minor->dev; 959 drm_i915_private_t *dev_priv = dev->dev_private; 960 u32 delayfreq; 961 int ret, i; 962 963 ret = mutex_lock_interruptible(&dev->struct_mutex); 964 if (ret) 965 return ret; 966 967 for (i = 0; i < 16; i++) { 968 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 969 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 970 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 971 } 972 973 mutex_unlock(&dev->struct_mutex); 974 975 return 0; 976 } 977 978 static inline int MAP_TO_MV(int map) 979 { 980 return 1250 - (map * 25); 981 } 982 983 static int i915_inttoext_table(struct seq_file *m, void *unused) 984 { 985 struct drm_info_node *node = (struct drm_info_node *) m->private; 986 struct drm_device *dev = node->minor->dev; 987 drm_i915_private_t *dev_priv = dev->dev_private; 988 u32 inttoext; 989 int ret, i; 990 991 ret = mutex_lock_interruptible(&dev->struct_mutex); 992 if (ret) 993 return ret; 994 995 for (i = 1; i <= 32; i++) { 996 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 997 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 998 } 999 1000 mutex_unlock(&dev->struct_mutex); 1001 1002 return 0; 1003 } 1004 1005 static int ironlake_drpc_info(struct seq_file *m) 1006 { 1007 struct drm_info_node *node = (struct drm_info_node *) m->private; 1008 struct drm_device *dev = node->minor->dev; 1009 drm_i915_private_t *dev_priv = dev->dev_private; 1010 u32 rgvmodectl, rstdbyctl; 1011 u16 crstandvid; 1012 int ret; 1013 1014 ret = mutex_lock_interruptible(&dev->struct_mutex); 1015 if (ret) 1016 return ret; 1017 1018 rgvmodectl = I915_READ(MEMMODECTL); 1019 rstdbyctl = I915_READ(RSTDBYCTL); 1020 crstandvid = I915_READ16(CRSTANDVID); 1021 1022 mutex_unlock(&dev->struct_mutex); 1023 1024 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1025 "yes" : "no"); 1026 seq_printf(m, "Boost freq: %d\n", 1027 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1028 MEMMODE_BOOST_FREQ_SHIFT); 1029 seq_printf(m, "HW control enabled: %s\n", 1030 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 1031 seq_printf(m, "SW control enabled: %s\n", 1032 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 1033 seq_printf(m, "Gated voltage change: %s\n", 1034 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 1035 seq_printf(m, "Starting frequency: P%d\n", 1036 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1037 seq_printf(m, "Max P-state: P%d\n", 1038 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1039 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1040 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1041 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1042 seq_printf(m, "Render standby enabled: %s\n", 1043 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1044 seq_printf(m, "Current RS state: "); 1045 switch (rstdbyctl & RSX_STATUS_MASK) { 1046 case RSX_STATUS_ON: 1047 seq_printf(m, "on\n"); 1048 break; 1049 case RSX_STATUS_RC1: 1050 seq_printf(m, "RC1\n"); 1051 break; 1052 case RSX_STATUS_RC1E: 1053 seq_printf(m, "RC1E\n"); 1054 break; 1055 case RSX_STATUS_RS1: 1056 seq_printf(m, "RS1\n"); 1057 break; 1058 case RSX_STATUS_RS2: 1059 seq_printf(m, "RS2 (RC6)\n"); 1060 break; 1061 case RSX_STATUS_RS3: 1062 seq_printf(m, "RC3 (RC6+)\n"); 1063 break; 1064 default: 1065 seq_printf(m, "unknown\n"); 1066 break; 1067 } 1068 1069 return 0; 1070 } 1071 1072 static int gen6_drpc_info(struct seq_file *m) 1073 { 1074 1075 struct drm_info_node *node = (struct drm_info_node *) m->private; 1076 struct drm_device *dev = node->minor->dev; 1077 struct drm_i915_private *dev_priv = dev->dev_private; 1078 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1079 unsigned forcewake_count; 1080 int count=0, ret; 1081 1082 1083 ret = mutex_lock_interruptible(&dev->struct_mutex); 1084 if (ret) 1085 return ret; 1086 1087 spin_lock_irq(&dev_priv->gt_lock); 1088 forcewake_count = dev_priv->forcewake_count; 1089 spin_unlock_irq(&dev_priv->gt_lock); 1090 1091 if (forcewake_count) { 1092 seq_printf(m, "RC information inaccurate because somebody " 1093 "holds a forcewake reference \n"); 1094 } else { 1095 /* NB: we cannot use forcewake, else we read the wrong values */ 1096 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1097 udelay(10); 1098 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1099 } 1100 1101 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1102 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); 1103 1104 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1105 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1106 mutex_unlock(&dev->struct_mutex); 1107 mutex_lock(&dev_priv->rps.hw_lock); 1108 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1109 mutex_unlock(&dev_priv->rps.hw_lock); 1110 1111 seq_printf(m, "Video Turbo Mode: %s\n", 1112 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1113 seq_printf(m, "HW control enabled: %s\n", 1114 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1115 seq_printf(m, "SW control enabled: %s\n", 1116 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1117 GEN6_RP_MEDIA_SW_MODE)); 1118 seq_printf(m, "RC1e Enabled: %s\n", 1119 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1120 seq_printf(m, "RC6 Enabled: %s\n", 1121 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1122 seq_printf(m, "Deep RC6 Enabled: %s\n", 1123 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1124 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1125 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1126 seq_printf(m, "Current RC state: "); 1127 switch (gt_core_status & GEN6_RCn_MASK) { 1128 case GEN6_RC0: 1129 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1130 seq_printf(m, "Core Power Down\n"); 1131 else 1132 seq_printf(m, "on\n"); 1133 break; 1134 case GEN6_RC3: 1135 seq_printf(m, "RC3\n"); 1136 break; 1137 case GEN6_RC6: 1138 seq_printf(m, "RC6\n"); 1139 break; 1140 case GEN6_RC7: 1141 seq_printf(m, "RC7\n"); 1142 break; 1143 default: 1144 seq_printf(m, "Unknown\n"); 1145 break; 1146 } 1147 1148 seq_printf(m, "Core Power Down: %s\n", 1149 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1150 1151 /* Not exactly sure what this is */ 1152 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1153 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1154 seq_printf(m, "RC6 residency since boot: %u\n", 1155 I915_READ(GEN6_GT_GFX_RC6)); 1156 seq_printf(m, "RC6+ residency since boot: %u\n", 1157 I915_READ(GEN6_GT_GFX_RC6p)); 1158 seq_printf(m, "RC6++ residency since boot: %u\n", 1159 I915_READ(GEN6_GT_GFX_RC6pp)); 1160 1161 seq_printf(m, "RC6 voltage: %dmV\n", 1162 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1163 seq_printf(m, "RC6+ voltage: %dmV\n", 1164 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1165 seq_printf(m, "RC6++ voltage: %dmV\n", 1166 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1167 return 0; 1168 } 1169 1170 static int i915_drpc_info(struct seq_file *m, void *unused) 1171 { 1172 struct drm_info_node *node = (struct drm_info_node *) m->private; 1173 struct drm_device *dev = node->minor->dev; 1174 1175 if (IS_GEN6(dev) || IS_GEN7(dev)) 1176 return gen6_drpc_info(m); 1177 else 1178 return ironlake_drpc_info(m); 1179 } 1180 1181 static int i915_fbc_status(struct seq_file *m, void *unused) 1182 { 1183 struct drm_info_node *node = (struct drm_info_node *) m->private; 1184 struct drm_device *dev = node->minor->dev; 1185 drm_i915_private_t *dev_priv = dev->dev_private; 1186 1187 if (!I915_HAS_FBC(dev)) { 1188 seq_printf(m, "FBC unsupported on this chipset\n"); 1189 return 0; 1190 } 1191 1192 if (intel_fbc_enabled(dev)) { 1193 seq_printf(m, "FBC enabled\n"); 1194 } else { 1195 seq_printf(m, "FBC disabled: "); 1196 switch (dev_priv->no_fbc_reason) { 1197 case FBC_NO_OUTPUT: 1198 seq_printf(m, "no outputs"); 1199 break; 1200 case FBC_STOLEN_TOO_SMALL: 1201 seq_printf(m, "not enough stolen memory"); 1202 break; 1203 case FBC_UNSUPPORTED_MODE: 1204 seq_printf(m, "mode not supported"); 1205 break; 1206 case FBC_MODE_TOO_LARGE: 1207 seq_printf(m, "mode too large"); 1208 break; 1209 case FBC_BAD_PLANE: 1210 seq_printf(m, "FBC unsupported on plane"); 1211 break; 1212 case FBC_NOT_TILED: 1213 seq_printf(m, "scanout buffer not tiled"); 1214 break; 1215 case FBC_MULTIPLE_PIPES: 1216 seq_printf(m, "multiple pipes are enabled"); 1217 break; 1218 case FBC_MODULE_PARAM: 1219 seq_printf(m, "disabled per module param (default off)"); 1220 break; 1221 default: 1222 seq_printf(m, "unknown reason"); 1223 } 1224 seq_printf(m, "\n"); 1225 } 1226 return 0; 1227 } 1228 1229 static int i915_sr_status(struct seq_file *m, void *unused) 1230 { 1231 struct drm_info_node *node = (struct drm_info_node *) m->private; 1232 struct drm_device *dev = node->minor->dev; 1233 drm_i915_private_t *dev_priv = dev->dev_private; 1234 bool sr_enabled = false; 1235 1236 if (HAS_PCH_SPLIT(dev)) 1237 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1238 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1239 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1240 else if (IS_I915GM(dev)) 1241 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1242 else if (IS_PINEVIEW(dev)) 1243 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1244 1245 seq_printf(m, "self-refresh: %s\n", 1246 sr_enabled ? "enabled" : "disabled"); 1247 1248 return 0; 1249 } 1250 1251 static int i915_emon_status(struct seq_file *m, void *unused) 1252 { 1253 struct drm_info_node *node = (struct drm_info_node *) m->private; 1254 struct drm_device *dev = node->minor->dev; 1255 drm_i915_private_t *dev_priv = dev->dev_private; 1256 unsigned long temp, chipset, gfx; 1257 int ret; 1258 1259 if (!IS_GEN5(dev)) 1260 return -ENODEV; 1261 1262 ret = mutex_lock_interruptible(&dev->struct_mutex); 1263 if (ret) 1264 return ret; 1265 1266 temp = i915_mch_val(dev_priv); 1267 chipset = i915_chipset_val(dev_priv); 1268 gfx = i915_gfx_val(dev_priv); 1269 mutex_unlock(&dev->struct_mutex); 1270 1271 seq_printf(m, "GMCH temp: %ld\n", temp); 1272 seq_printf(m, "Chipset power: %ld\n", chipset); 1273 seq_printf(m, "GFX power: %ld\n", gfx); 1274 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1275 1276 return 0; 1277 } 1278 1279 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1280 { 1281 struct drm_info_node *node = (struct drm_info_node *) m->private; 1282 struct drm_device *dev = node->minor->dev; 1283 drm_i915_private_t *dev_priv = dev->dev_private; 1284 int ret; 1285 int gpu_freq, ia_freq; 1286 1287 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1288 seq_printf(m, "unsupported on this chipset\n"); 1289 return 0; 1290 } 1291 1292 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1293 if (ret) 1294 return ret; 1295 1296 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); 1297 1298 for (gpu_freq = dev_priv->rps.min_delay; 1299 gpu_freq <= dev_priv->rps.max_delay; 1300 gpu_freq++) { 1301 ia_freq = gpu_freq; 1302 sandybridge_pcode_read(dev_priv, 1303 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1304 &ia_freq); 1305 seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100); 1306 } 1307 1308 mutex_unlock(&dev_priv->rps.hw_lock); 1309 1310 return 0; 1311 } 1312 1313 static int i915_gfxec(struct seq_file *m, void *unused) 1314 { 1315 struct drm_info_node *node = (struct drm_info_node *) m->private; 1316 struct drm_device *dev = node->minor->dev; 1317 drm_i915_private_t *dev_priv = dev->dev_private; 1318 int ret; 1319 1320 ret = mutex_lock_interruptible(&dev->struct_mutex); 1321 if (ret) 1322 return ret; 1323 1324 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1325 1326 mutex_unlock(&dev->struct_mutex); 1327 1328 return 0; 1329 } 1330 1331 static int i915_opregion(struct seq_file *m, void *unused) 1332 { 1333 struct drm_info_node *node = (struct drm_info_node *) m->private; 1334 struct drm_device *dev = node->minor->dev; 1335 drm_i915_private_t *dev_priv = dev->dev_private; 1336 struct intel_opregion *opregion = &dev_priv->opregion; 1337 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); 1338 int ret; 1339 1340 if (data == NULL) 1341 return -ENOMEM; 1342 1343 ret = mutex_lock_interruptible(&dev->struct_mutex); 1344 if (ret) 1345 goto out; 1346 1347 if (opregion->header) { 1348 memcpy_fromio(data, opregion->header, OPREGION_SIZE); 1349 seq_write(m, data, OPREGION_SIZE); 1350 } 1351 1352 mutex_unlock(&dev->struct_mutex); 1353 1354 out: 1355 kfree(data); 1356 return 0; 1357 } 1358 1359 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1360 { 1361 struct drm_info_node *node = (struct drm_info_node *) m->private; 1362 struct drm_device *dev = node->minor->dev; 1363 drm_i915_private_t *dev_priv = dev->dev_private; 1364 struct intel_fbdev *ifbdev; 1365 struct intel_framebuffer *fb; 1366 int ret; 1367 1368 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1369 if (ret) 1370 return ret; 1371 1372 ifbdev = dev_priv->fbdev; 1373 fb = to_intel_framebuffer(ifbdev->helper.fb); 1374 1375 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", 1376 fb->base.width, 1377 fb->base.height, 1378 fb->base.depth, 1379 fb->base.bits_per_pixel); 1380 describe_obj(m, fb->obj); 1381 seq_printf(m, "\n"); 1382 1383 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1384 if (&fb->base == ifbdev->helper.fb) 1385 continue; 1386 1387 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", 1388 fb->base.width, 1389 fb->base.height, 1390 fb->base.depth, 1391 fb->base.bits_per_pixel); 1392 describe_obj(m, fb->obj); 1393 seq_printf(m, "\n"); 1394 } 1395 1396 mutex_unlock(&dev->mode_config.mutex); 1397 1398 return 0; 1399 } 1400 1401 static int i915_context_status(struct seq_file *m, void *unused) 1402 { 1403 struct drm_info_node *node = (struct drm_info_node *) m->private; 1404 struct drm_device *dev = node->minor->dev; 1405 drm_i915_private_t *dev_priv = dev->dev_private; 1406 int ret; 1407 1408 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1409 if (ret) 1410 return ret; 1411 1412 if (dev_priv->ips.pwrctx) { 1413 seq_printf(m, "power context "); 1414 describe_obj(m, dev_priv->ips.pwrctx); 1415 seq_printf(m, "\n"); 1416 } 1417 1418 if (dev_priv->ips.renderctx) { 1419 seq_printf(m, "render context "); 1420 describe_obj(m, dev_priv->ips.renderctx); 1421 seq_printf(m, "\n"); 1422 } 1423 1424 mutex_unlock(&dev->mode_config.mutex); 1425 1426 return 0; 1427 } 1428 1429 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1430 { 1431 struct drm_info_node *node = (struct drm_info_node *) m->private; 1432 struct drm_device *dev = node->minor->dev; 1433 struct drm_i915_private *dev_priv = dev->dev_private; 1434 unsigned forcewake_count; 1435 1436 spin_lock_irq(&dev_priv->gt_lock); 1437 forcewake_count = dev_priv->forcewake_count; 1438 spin_unlock_irq(&dev_priv->gt_lock); 1439 1440 seq_printf(m, "forcewake count = %u\n", forcewake_count); 1441 1442 return 0; 1443 } 1444 1445 static const char *swizzle_string(unsigned swizzle) 1446 { 1447 switch(swizzle) { 1448 case I915_BIT_6_SWIZZLE_NONE: 1449 return "none"; 1450 case I915_BIT_6_SWIZZLE_9: 1451 return "bit9"; 1452 case I915_BIT_6_SWIZZLE_9_10: 1453 return "bit9/bit10"; 1454 case I915_BIT_6_SWIZZLE_9_11: 1455 return "bit9/bit11"; 1456 case I915_BIT_6_SWIZZLE_9_10_11: 1457 return "bit9/bit10/bit11"; 1458 case I915_BIT_6_SWIZZLE_9_17: 1459 return "bit9/bit17"; 1460 case I915_BIT_6_SWIZZLE_9_10_17: 1461 return "bit9/bit10/bit17"; 1462 case I915_BIT_6_SWIZZLE_UNKNOWN: 1463 return "unkown"; 1464 } 1465 1466 return "bug"; 1467 } 1468 1469 static int i915_swizzle_info(struct seq_file *m, void *data) 1470 { 1471 struct drm_info_node *node = (struct drm_info_node *) m->private; 1472 struct drm_device *dev = node->minor->dev; 1473 struct drm_i915_private *dev_priv = dev->dev_private; 1474 int ret; 1475 1476 ret = mutex_lock_interruptible(&dev->struct_mutex); 1477 if (ret) 1478 return ret; 1479 1480 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1481 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1482 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1483 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1484 1485 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1486 seq_printf(m, "DDC = 0x%08x\n", 1487 I915_READ(DCC)); 1488 seq_printf(m, "C0DRB3 = 0x%04x\n", 1489 I915_READ16(C0DRB3)); 1490 seq_printf(m, "C1DRB3 = 0x%04x\n", 1491 I915_READ16(C1DRB3)); 1492 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 1493 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1494 I915_READ(MAD_DIMM_C0)); 1495 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1496 I915_READ(MAD_DIMM_C1)); 1497 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1498 I915_READ(MAD_DIMM_C2)); 1499 seq_printf(m, "TILECTL = 0x%08x\n", 1500 I915_READ(TILECTL)); 1501 seq_printf(m, "ARB_MODE = 0x%08x\n", 1502 I915_READ(ARB_MODE)); 1503 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1504 I915_READ(DISP_ARB_CTL)); 1505 } 1506 mutex_unlock(&dev->struct_mutex); 1507 1508 return 0; 1509 } 1510 1511 static int i915_ppgtt_info(struct seq_file *m, void *data) 1512 { 1513 struct drm_info_node *node = (struct drm_info_node *) m->private; 1514 struct drm_device *dev = node->minor->dev; 1515 struct drm_i915_private *dev_priv = dev->dev_private; 1516 struct intel_ring_buffer *ring; 1517 int i, ret; 1518 1519 1520 ret = mutex_lock_interruptible(&dev->struct_mutex); 1521 if (ret) 1522 return ret; 1523 if (INTEL_INFO(dev)->gen == 6) 1524 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1525 1526 for_each_ring(ring, dev_priv, i) { 1527 seq_printf(m, "%s\n", ring->name); 1528 if (INTEL_INFO(dev)->gen == 7) 1529 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 1530 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 1531 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 1532 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 1533 } 1534 if (dev_priv->mm.aliasing_ppgtt) { 1535 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1536 1537 seq_printf(m, "aliasing PPGTT:\n"); 1538 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1539 } 1540 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1541 mutex_unlock(&dev->struct_mutex); 1542 1543 return 0; 1544 } 1545 1546 static int i915_dpio_info(struct seq_file *m, void *data) 1547 { 1548 struct drm_info_node *node = (struct drm_info_node *) m->private; 1549 struct drm_device *dev = node->minor->dev; 1550 struct drm_i915_private *dev_priv = dev->dev_private; 1551 int ret; 1552 1553 1554 if (!IS_VALLEYVIEW(dev)) { 1555 seq_printf(m, "unsupported\n"); 1556 return 0; 1557 } 1558 1559 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1560 if (ret) 1561 return ret; 1562 1563 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); 1564 1565 seq_printf(m, "DPIO_DIV_A: 0x%08x\n", 1566 intel_dpio_read(dev_priv, _DPIO_DIV_A)); 1567 seq_printf(m, "DPIO_DIV_B: 0x%08x\n", 1568 intel_dpio_read(dev_priv, _DPIO_DIV_B)); 1569 1570 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n", 1571 intel_dpio_read(dev_priv, _DPIO_REFSFR_A)); 1572 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n", 1573 intel_dpio_read(dev_priv, _DPIO_REFSFR_B)); 1574 1575 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", 1576 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A)); 1577 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", 1578 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); 1579 1580 seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n", 1581 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A)); 1582 seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n", 1583 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B)); 1584 1585 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1586 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); 1587 1588 mutex_unlock(&dev->mode_config.mutex); 1589 1590 return 0; 1591 } 1592 1593 static ssize_t 1594 i915_wedged_read(struct file *filp, 1595 char __user *ubuf, 1596 size_t max, 1597 loff_t *ppos) 1598 { 1599 struct drm_device *dev = filp->private_data; 1600 drm_i915_private_t *dev_priv = dev->dev_private; 1601 char buf[80]; 1602 int len; 1603 1604 len = snprintf(buf, sizeof(buf), 1605 "wedged : %d\n", 1606 atomic_read(&dev_priv->mm.wedged)); 1607 1608 if (len > sizeof(buf)) 1609 len = sizeof(buf); 1610 1611 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1612 } 1613 1614 static ssize_t 1615 i915_wedged_write(struct file *filp, 1616 const char __user *ubuf, 1617 size_t cnt, 1618 loff_t *ppos) 1619 { 1620 struct drm_device *dev = filp->private_data; 1621 char buf[20]; 1622 int val = 1; 1623 1624 if (cnt > 0) { 1625 if (cnt > sizeof(buf) - 1) 1626 return -EINVAL; 1627 1628 if (copy_from_user(buf, ubuf, cnt)) 1629 return -EFAULT; 1630 buf[cnt] = 0; 1631 1632 val = simple_strtoul(buf, NULL, 0); 1633 } 1634 1635 DRM_INFO("Manually setting wedged to %d\n", val); 1636 i915_handle_error(dev, val); 1637 1638 return cnt; 1639 } 1640 1641 static const struct file_operations i915_wedged_fops = { 1642 .owner = THIS_MODULE, 1643 .open = simple_open, 1644 .read = i915_wedged_read, 1645 .write = i915_wedged_write, 1646 .llseek = default_llseek, 1647 }; 1648 1649 static ssize_t 1650 i915_ring_stop_read(struct file *filp, 1651 char __user *ubuf, 1652 size_t max, 1653 loff_t *ppos) 1654 { 1655 struct drm_device *dev = filp->private_data; 1656 drm_i915_private_t *dev_priv = dev->dev_private; 1657 char buf[20]; 1658 int len; 1659 1660 len = snprintf(buf, sizeof(buf), 1661 "0x%08x\n", dev_priv->stop_rings); 1662 1663 if (len > sizeof(buf)) 1664 len = sizeof(buf); 1665 1666 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1667 } 1668 1669 static ssize_t 1670 i915_ring_stop_write(struct file *filp, 1671 const char __user *ubuf, 1672 size_t cnt, 1673 loff_t *ppos) 1674 { 1675 struct drm_device *dev = filp->private_data; 1676 struct drm_i915_private *dev_priv = dev->dev_private; 1677 char buf[20]; 1678 int val = 0, ret; 1679 1680 if (cnt > 0) { 1681 if (cnt > sizeof(buf) - 1) 1682 return -EINVAL; 1683 1684 if (copy_from_user(buf, ubuf, cnt)) 1685 return -EFAULT; 1686 buf[cnt] = 0; 1687 1688 val = simple_strtoul(buf, NULL, 0); 1689 } 1690 1691 DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val); 1692 1693 ret = mutex_lock_interruptible(&dev->struct_mutex); 1694 if (ret) 1695 return ret; 1696 1697 dev_priv->stop_rings = val; 1698 mutex_unlock(&dev->struct_mutex); 1699 1700 return cnt; 1701 } 1702 1703 static const struct file_operations i915_ring_stop_fops = { 1704 .owner = THIS_MODULE, 1705 .open = simple_open, 1706 .read = i915_ring_stop_read, 1707 .write = i915_ring_stop_write, 1708 .llseek = default_llseek, 1709 }; 1710 1711 static ssize_t 1712 i915_max_freq_read(struct file *filp, 1713 char __user *ubuf, 1714 size_t max, 1715 loff_t *ppos) 1716 { 1717 struct drm_device *dev = filp->private_data; 1718 drm_i915_private_t *dev_priv = dev->dev_private; 1719 char buf[80]; 1720 int len, ret; 1721 1722 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1723 return -ENODEV; 1724 1725 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1726 if (ret) 1727 return ret; 1728 1729 len = snprintf(buf, sizeof(buf), 1730 "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER); 1731 mutex_unlock(&dev_priv->rps.hw_lock); 1732 1733 if (len > sizeof(buf)) 1734 len = sizeof(buf); 1735 1736 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1737 } 1738 1739 static ssize_t 1740 i915_max_freq_write(struct file *filp, 1741 const char __user *ubuf, 1742 size_t cnt, 1743 loff_t *ppos) 1744 { 1745 struct drm_device *dev = filp->private_data; 1746 struct drm_i915_private *dev_priv = dev->dev_private; 1747 char buf[20]; 1748 int val = 1, ret; 1749 1750 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1751 return -ENODEV; 1752 1753 if (cnt > 0) { 1754 if (cnt > sizeof(buf) - 1) 1755 return -EINVAL; 1756 1757 if (copy_from_user(buf, ubuf, cnt)) 1758 return -EFAULT; 1759 buf[cnt] = 0; 1760 1761 val = simple_strtoul(buf, NULL, 0); 1762 } 1763 1764 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); 1765 1766 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1767 if (ret) 1768 return ret; 1769 1770 /* 1771 * Turbo will still be enabled, but won't go above the set value. 1772 */ 1773 dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER; 1774 1775 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); 1776 mutex_unlock(&dev_priv->rps.hw_lock); 1777 1778 return cnt; 1779 } 1780 1781 static const struct file_operations i915_max_freq_fops = { 1782 .owner = THIS_MODULE, 1783 .open = simple_open, 1784 .read = i915_max_freq_read, 1785 .write = i915_max_freq_write, 1786 .llseek = default_llseek, 1787 }; 1788 1789 static ssize_t 1790 i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max, 1791 loff_t *ppos) 1792 { 1793 struct drm_device *dev = filp->private_data; 1794 drm_i915_private_t *dev_priv = dev->dev_private; 1795 char buf[80]; 1796 int len, ret; 1797 1798 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1799 return -ENODEV; 1800 1801 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1802 if (ret) 1803 return ret; 1804 1805 len = snprintf(buf, sizeof(buf), 1806 "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER); 1807 mutex_unlock(&dev_priv->rps.hw_lock); 1808 1809 if (len > sizeof(buf)) 1810 len = sizeof(buf); 1811 1812 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1813 } 1814 1815 static ssize_t 1816 i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, 1817 loff_t *ppos) 1818 { 1819 struct drm_device *dev = filp->private_data; 1820 struct drm_i915_private *dev_priv = dev->dev_private; 1821 char buf[20]; 1822 int val = 1, ret; 1823 1824 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1825 return -ENODEV; 1826 1827 if (cnt > 0) { 1828 if (cnt > sizeof(buf) - 1) 1829 return -EINVAL; 1830 1831 if (copy_from_user(buf, ubuf, cnt)) 1832 return -EFAULT; 1833 buf[cnt] = 0; 1834 1835 val = simple_strtoul(buf, NULL, 0); 1836 } 1837 1838 DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); 1839 1840 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1841 if (ret) 1842 return ret; 1843 1844 /* 1845 * Turbo will still be enabled, but won't go below the set value. 1846 */ 1847 dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER; 1848 1849 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); 1850 mutex_unlock(&dev_priv->rps.hw_lock); 1851 1852 return cnt; 1853 } 1854 1855 static const struct file_operations i915_min_freq_fops = { 1856 .owner = THIS_MODULE, 1857 .open = simple_open, 1858 .read = i915_min_freq_read, 1859 .write = i915_min_freq_write, 1860 .llseek = default_llseek, 1861 }; 1862 1863 static ssize_t 1864 i915_cache_sharing_read(struct file *filp, 1865 char __user *ubuf, 1866 size_t max, 1867 loff_t *ppos) 1868 { 1869 struct drm_device *dev = filp->private_data; 1870 drm_i915_private_t *dev_priv = dev->dev_private; 1871 char buf[80]; 1872 u32 snpcr; 1873 int len, ret; 1874 1875 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1876 return -ENODEV; 1877 1878 ret = mutex_lock_interruptible(&dev->struct_mutex); 1879 if (ret) 1880 return ret; 1881 1882 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1883 mutex_unlock(&dev_priv->dev->struct_mutex); 1884 1885 len = snprintf(buf, sizeof(buf), 1886 "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >> 1887 GEN6_MBC_SNPCR_SHIFT); 1888 1889 if (len > sizeof(buf)) 1890 len = sizeof(buf); 1891 1892 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1893 } 1894 1895 static ssize_t 1896 i915_cache_sharing_write(struct file *filp, 1897 const char __user *ubuf, 1898 size_t cnt, 1899 loff_t *ppos) 1900 { 1901 struct drm_device *dev = filp->private_data; 1902 struct drm_i915_private *dev_priv = dev->dev_private; 1903 char buf[20]; 1904 u32 snpcr; 1905 int val = 1; 1906 1907 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1908 return -ENODEV; 1909 1910 if (cnt > 0) { 1911 if (cnt > sizeof(buf) - 1) 1912 return -EINVAL; 1913 1914 if (copy_from_user(buf, ubuf, cnt)) 1915 return -EFAULT; 1916 buf[cnt] = 0; 1917 1918 val = simple_strtoul(buf, NULL, 0); 1919 } 1920 1921 if (val < 0 || val > 3) 1922 return -EINVAL; 1923 1924 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val); 1925 1926 /* Update the cache sharing policy here as well */ 1927 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1928 snpcr &= ~GEN6_MBC_SNPCR_MASK; 1929 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 1930 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 1931 1932 return cnt; 1933 } 1934 1935 static const struct file_operations i915_cache_sharing_fops = { 1936 .owner = THIS_MODULE, 1937 .open = simple_open, 1938 .read = i915_cache_sharing_read, 1939 .write = i915_cache_sharing_write, 1940 .llseek = default_llseek, 1941 }; 1942 1943 /* As the drm_debugfs_init() routines are called before dev->dev_private is 1944 * allocated we need to hook into the minor for release. */ 1945 static int 1946 drm_add_fake_info_node(struct drm_minor *minor, 1947 struct dentry *ent, 1948 const void *key) 1949 { 1950 struct drm_info_node *node; 1951 1952 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); 1953 if (node == NULL) { 1954 debugfs_remove(ent); 1955 return -ENOMEM; 1956 } 1957 1958 node->minor = minor; 1959 node->dent = ent; 1960 node->info_ent = (void *) key; 1961 1962 mutex_lock(&minor->debugfs_lock); 1963 list_add(&node->list, &minor->debugfs_list); 1964 mutex_unlock(&minor->debugfs_lock); 1965 1966 return 0; 1967 } 1968 1969 static int i915_forcewake_open(struct inode *inode, struct file *file) 1970 { 1971 struct drm_device *dev = inode->i_private; 1972 struct drm_i915_private *dev_priv = dev->dev_private; 1973 1974 if (INTEL_INFO(dev)->gen < 6) 1975 return 0; 1976 1977 gen6_gt_force_wake_get(dev_priv); 1978 1979 return 0; 1980 } 1981 1982 static int i915_forcewake_release(struct inode *inode, struct file *file) 1983 { 1984 struct drm_device *dev = inode->i_private; 1985 struct drm_i915_private *dev_priv = dev->dev_private; 1986 1987 if (INTEL_INFO(dev)->gen < 6) 1988 return 0; 1989 1990 gen6_gt_force_wake_put(dev_priv); 1991 1992 return 0; 1993 } 1994 1995 static const struct file_operations i915_forcewake_fops = { 1996 .owner = THIS_MODULE, 1997 .open = i915_forcewake_open, 1998 .release = i915_forcewake_release, 1999 }; 2000 2001 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 2002 { 2003 struct drm_device *dev = minor->dev; 2004 struct dentry *ent; 2005 2006 ent = debugfs_create_file("i915_forcewake_user", 2007 S_IRUSR, 2008 root, dev, 2009 &i915_forcewake_fops); 2010 if (IS_ERR(ent)) 2011 return PTR_ERR(ent); 2012 2013 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 2014 } 2015 2016 static int i915_debugfs_create(struct dentry *root, 2017 struct drm_minor *minor, 2018 const char *name, 2019 const struct file_operations *fops) 2020 { 2021 struct drm_device *dev = minor->dev; 2022 struct dentry *ent; 2023 2024 ent = debugfs_create_file(name, 2025 S_IRUGO | S_IWUSR, 2026 root, dev, 2027 fops); 2028 if (IS_ERR(ent)) 2029 return PTR_ERR(ent); 2030 2031 return drm_add_fake_info_node(minor, ent, fops); 2032 } 2033 2034 static struct drm_info_list i915_debugfs_list[] = { 2035 {"i915_capabilities", i915_capabilities, 0}, 2036 {"i915_gem_objects", i915_gem_object_info, 0}, 2037 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 2038 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 2039 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 2040 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 2041 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 2042 {"i915_gem_request", i915_gem_request_info, 0}, 2043 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 2044 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 2045 {"i915_gem_interrupt", i915_interrupt_info, 0}, 2046 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 2047 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 2048 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 2049 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 2050 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 2051 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 2052 {"i915_inttoext_table", i915_inttoext_table, 0}, 2053 {"i915_drpc_info", i915_drpc_info, 0}, 2054 {"i915_emon_status", i915_emon_status, 0}, 2055 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 2056 {"i915_gfxec", i915_gfxec, 0}, 2057 {"i915_fbc_status", i915_fbc_status, 0}, 2058 {"i915_sr_status", i915_sr_status, 0}, 2059 {"i915_opregion", i915_opregion, 0}, 2060 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 2061 {"i915_context_status", i915_context_status, 0}, 2062 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 2063 {"i915_swizzle_info", i915_swizzle_info, 0}, 2064 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 2065 {"i915_dpio", i915_dpio_info, 0}, 2066 }; 2067 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 2068 2069 int i915_debugfs_init(struct drm_minor *minor) 2070 { 2071 int ret; 2072 2073 ret = i915_debugfs_create(minor->debugfs_root, minor, 2074 "i915_wedged", 2075 &i915_wedged_fops); 2076 if (ret) 2077 return ret; 2078 2079 ret = i915_forcewake_create(minor->debugfs_root, minor); 2080 if (ret) 2081 return ret; 2082 2083 ret = i915_debugfs_create(minor->debugfs_root, minor, 2084 "i915_max_freq", 2085 &i915_max_freq_fops); 2086 if (ret) 2087 return ret; 2088 2089 ret = i915_debugfs_create(minor->debugfs_root, minor, 2090 "i915_min_freq", 2091 &i915_min_freq_fops); 2092 if (ret) 2093 return ret; 2094 2095 ret = i915_debugfs_create(minor->debugfs_root, minor, 2096 "i915_cache_sharing", 2097 &i915_cache_sharing_fops); 2098 if (ret) 2099 return ret; 2100 2101 ret = i915_debugfs_create(minor->debugfs_root, minor, 2102 "i915_ring_stop", 2103 &i915_ring_stop_fops); 2104 if (ret) 2105 return ret; 2106 2107 ret = i915_debugfs_create(minor->debugfs_root, minor, 2108 "i915_error_state", 2109 &i915_error_state_fops); 2110 if (ret) 2111 return ret; 2112 2113 return drm_debugfs_create_files(i915_debugfs_list, 2114 I915_DEBUGFS_ENTRIES, 2115 minor->debugfs_root, minor); 2116 } 2117 2118 void i915_debugfs_cleanup(struct drm_minor *minor) 2119 { 2120 drm_debugfs_remove_files(i915_debugfs_list, 2121 I915_DEBUGFS_ENTRIES, minor); 2122 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 2123 1, minor); 2124 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 2125 1, minor); 2126 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, 2127 1, minor); 2128 drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops, 2129 1, minor); 2130 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, 2131 1, minor); 2132 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, 2133 1, minor); 2134 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops, 2135 1, minor); 2136 } 2137 2138 #endif /* CONFIG_DEBUG_FS */ 2139