1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/debugfs.h> 31 #include <linux/slab.h> 32 #include <linux/export.h> 33 #include <generated/utsrelease.h> 34 #include <drm/drmP.h> 35 #include "intel_drv.h" 36 #include "intel_ringbuffer.h" 37 #include <drm/i915_drm.h> 38 #include "i915_drv.h" 39 40 #define DRM_I915_RING_DEBUG 1 41 42 43 #if defined(CONFIG_DEBUG_FS) 44 45 enum { 46 ACTIVE_LIST, 47 INACTIVE_LIST, 48 PINNED_LIST, 49 }; 50 51 static const char *yesno(int v) 52 { 53 return v ? "yes" : "no"; 54 } 55 56 static int i915_capabilities(struct seq_file *m, void *data) 57 { 58 struct drm_info_node *node = (struct drm_info_node *) m->private; 59 struct drm_device *dev = node->minor->dev; 60 const struct intel_device_info *info = INTEL_INFO(dev); 61 62 seq_printf(m, "gen: %d\n", info->gen); 63 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 64 #define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 65 #define DEV_INFO_SEP ; 66 DEV_INFO_FLAGS; 67 #undef DEV_INFO_FLAG 68 #undef DEV_INFO_SEP 69 70 return 0; 71 } 72 73 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 74 { 75 if (obj->user_pin_count > 0) 76 return "P"; 77 else if (obj->pin_count > 0) 78 return "p"; 79 else 80 return " "; 81 } 82 83 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 84 { 85 switch (obj->tiling_mode) { 86 default: 87 case I915_TILING_NONE: return " "; 88 case I915_TILING_X: return "X"; 89 case I915_TILING_Y: return "Y"; 90 } 91 } 92 93 static const char *cache_level_str(int type) 94 { 95 switch (type) { 96 case I915_CACHE_NONE: return " uncached"; 97 case I915_CACHE_LLC: return " snooped (LLC)"; 98 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; 99 default: return ""; 100 } 101 } 102 103 static void 104 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 105 { 106 seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", 107 &obj->base, 108 get_pin_flag(obj), 109 get_tiling_flag(obj), 110 obj->base.size / 1024, 111 obj->base.read_domains, 112 obj->base.write_domain, 113 obj->last_read_seqno, 114 obj->last_write_seqno, 115 obj->last_fenced_seqno, 116 cache_level_str(obj->cache_level), 117 obj->dirty ? " dirty" : "", 118 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 119 if (obj->base.name) 120 seq_printf(m, " (name: %d)", obj->base.name); 121 if (obj->pin_count) 122 seq_printf(m, " (pinned x %d)", obj->pin_count); 123 if (obj->fence_reg != I915_FENCE_REG_NONE) 124 seq_printf(m, " (fence: %d)", obj->fence_reg); 125 if (obj->gtt_space != NULL) 126 seq_printf(m, " (gtt offset: %08x, size: %08x)", 127 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 128 if (obj->stolen) 129 seq_printf(m, " (stolen: %08lx)", obj->stolen->start); 130 if (obj->pin_mappable || obj->fault_mappable) { 131 char s[3], *t = s; 132 if (obj->pin_mappable) 133 *t++ = 'p'; 134 if (obj->fault_mappable) 135 *t++ = 'f'; 136 *t = '\0'; 137 seq_printf(m, " (%s mappable)", s); 138 } 139 if (obj->ring != NULL) 140 seq_printf(m, " (%s)", obj->ring->name); 141 } 142 143 static int i915_gem_object_list_info(struct seq_file *m, void *data) 144 { 145 struct drm_info_node *node = (struct drm_info_node *) m->private; 146 uintptr_t list = (uintptr_t) node->info_ent->data; 147 struct list_head *head; 148 struct drm_device *dev = node->minor->dev; 149 drm_i915_private_t *dev_priv = dev->dev_private; 150 struct drm_i915_gem_object *obj; 151 size_t total_obj_size, total_gtt_size; 152 int count, ret; 153 154 ret = mutex_lock_interruptible(&dev->struct_mutex); 155 if (ret) 156 return ret; 157 158 switch (list) { 159 case ACTIVE_LIST: 160 seq_printf(m, "Active:\n"); 161 head = &dev_priv->mm.active_list; 162 break; 163 case INACTIVE_LIST: 164 seq_printf(m, "Inactive:\n"); 165 head = &dev_priv->mm.inactive_list; 166 break; 167 default: 168 mutex_unlock(&dev->struct_mutex); 169 return -EINVAL; 170 } 171 172 total_obj_size = total_gtt_size = count = 0; 173 list_for_each_entry(obj, head, mm_list) { 174 seq_printf(m, " "); 175 describe_obj(m, obj); 176 seq_printf(m, "\n"); 177 total_obj_size += obj->base.size; 178 total_gtt_size += obj->gtt_space->size; 179 count++; 180 } 181 mutex_unlock(&dev->struct_mutex); 182 183 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 184 count, total_obj_size, total_gtt_size); 185 return 0; 186 } 187 188 #define count_objects(list, member) do { \ 189 list_for_each_entry(obj, list, member) { \ 190 size += obj->gtt_space->size; \ 191 ++count; \ 192 if (obj->map_and_fenceable) { \ 193 mappable_size += obj->gtt_space->size; \ 194 ++mappable_count; \ 195 } \ 196 } \ 197 } while (0) 198 199 static int i915_gem_object_info(struct seq_file *m, void* data) 200 { 201 struct drm_info_node *node = (struct drm_info_node *) m->private; 202 struct drm_device *dev = node->minor->dev; 203 struct drm_i915_private *dev_priv = dev->dev_private; 204 u32 count, mappable_count, purgeable_count; 205 size_t size, mappable_size, purgeable_size; 206 struct drm_i915_gem_object *obj; 207 int ret; 208 209 ret = mutex_lock_interruptible(&dev->struct_mutex); 210 if (ret) 211 return ret; 212 213 seq_printf(m, "%u objects, %zu bytes\n", 214 dev_priv->mm.object_count, 215 dev_priv->mm.object_memory); 216 217 size = count = mappable_size = mappable_count = 0; 218 count_objects(&dev_priv->mm.bound_list, gtt_list); 219 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 220 count, mappable_count, size, mappable_size); 221 222 size = count = mappable_size = mappable_count = 0; 223 count_objects(&dev_priv->mm.active_list, mm_list); 224 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 225 count, mappable_count, size, mappable_size); 226 227 size = count = mappable_size = mappable_count = 0; 228 count_objects(&dev_priv->mm.inactive_list, mm_list); 229 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 230 count, mappable_count, size, mappable_size); 231 232 size = count = purgeable_size = purgeable_count = 0; 233 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) { 234 size += obj->base.size, ++count; 235 if (obj->madv == I915_MADV_DONTNEED) 236 purgeable_size += obj->base.size, ++purgeable_count; 237 } 238 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); 239 240 size = count = mappable_size = mappable_count = 0; 241 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 242 if (obj->fault_mappable) { 243 size += obj->gtt_space->size; 244 ++count; 245 } 246 if (obj->pin_mappable) { 247 mappable_size += obj->gtt_space->size; 248 ++mappable_count; 249 } 250 if (obj->madv == I915_MADV_DONTNEED) { 251 purgeable_size += obj->base.size; 252 ++purgeable_count; 253 } 254 } 255 seq_printf(m, "%u purgeable objects, %zu bytes\n", 256 purgeable_count, purgeable_size); 257 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 258 mappable_count, mappable_size); 259 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 260 count, size); 261 262 seq_printf(m, "%zu [%lu] gtt total\n", 263 dev_priv->gtt.total, 264 dev_priv->gtt.mappable_end - dev_priv->gtt.start); 265 266 mutex_unlock(&dev->struct_mutex); 267 268 return 0; 269 } 270 271 static int i915_gem_gtt_info(struct seq_file *m, void* data) 272 { 273 struct drm_info_node *node = (struct drm_info_node *) m->private; 274 struct drm_device *dev = node->minor->dev; 275 uintptr_t list = (uintptr_t) node->info_ent->data; 276 struct drm_i915_private *dev_priv = dev->dev_private; 277 struct drm_i915_gem_object *obj; 278 size_t total_obj_size, total_gtt_size; 279 int count, ret; 280 281 ret = mutex_lock_interruptible(&dev->struct_mutex); 282 if (ret) 283 return ret; 284 285 total_obj_size = total_gtt_size = count = 0; 286 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 287 if (list == PINNED_LIST && obj->pin_count == 0) 288 continue; 289 290 seq_printf(m, " "); 291 describe_obj(m, obj); 292 seq_printf(m, "\n"); 293 total_obj_size += obj->base.size; 294 total_gtt_size += obj->gtt_space->size; 295 count++; 296 } 297 298 mutex_unlock(&dev->struct_mutex); 299 300 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 301 count, total_obj_size, total_gtt_size); 302 303 return 0; 304 } 305 306 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 307 { 308 struct drm_info_node *node = (struct drm_info_node *) m->private; 309 struct drm_device *dev = node->minor->dev; 310 unsigned long flags; 311 struct intel_crtc *crtc; 312 313 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 314 const char pipe = pipe_name(crtc->pipe); 315 const char plane = plane_name(crtc->plane); 316 struct intel_unpin_work *work; 317 318 spin_lock_irqsave(&dev->event_lock, flags); 319 work = crtc->unpin_work; 320 if (work == NULL) { 321 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 322 pipe, plane); 323 } else { 324 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 325 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 326 pipe, plane); 327 } else { 328 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 329 pipe, plane); 330 } 331 if (work->enable_stall_check) 332 seq_printf(m, "Stall check enabled, "); 333 else 334 seq_printf(m, "Stall check waiting for page flip ioctl, "); 335 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 336 337 if (work->old_fb_obj) { 338 struct drm_i915_gem_object *obj = work->old_fb_obj; 339 if (obj) 340 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 341 } 342 if (work->pending_flip_obj) { 343 struct drm_i915_gem_object *obj = work->pending_flip_obj; 344 if (obj) 345 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 346 } 347 } 348 spin_unlock_irqrestore(&dev->event_lock, flags); 349 } 350 351 return 0; 352 } 353 354 static int i915_gem_request_info(struct seq_file *m, void *data) 355 { 356 struct drm_info_node *node = (struct drm_info_node *) m->private; 357 struct drm_device *dev = node->minor->dev; 358 drm_i915_private_t *dev_priv = dev->dev_private; 359 struct intel_ring_buffer *ring; 360 struct drm_i915_gem_request *gem_request; 361 int ret, count, i; 362 363 ret = mutex_lock_interruptible(&dev->struct_mutex); 364 if (ret) 365 return ret; 366 367 count = 0; 368 for_each_ring(ring, dev_priv, i) { 369 if (list_empty(&ring->request_list)) 370 continue; 371 372 seq_printf(m, "%s requests:\n", ring->name); 373 list_for_each_entry(gem_request, 374 &ring->request_list, 375 list) { 376 seq_printf(m, " %d @ %d\n", 377 gem_request->seqno, 378 (int) (jiffies - gem_request->emitted_jiffies)); 379 } 380 count++; 381 } 382 mutex_unlock(&dev->struct_mutex); 383 384 if (count == 0) 385 seq_printf(m, "No requests\n"); 386 387 return 0; 388 } 389 390 static void i915_ring_seqno_info(struct seq_file *m, 391 struct intel_ring_buffer *ring) 392 { 393 if (ring->get_seqno) { 394 seq_printf(m, "Current sequence (%s): %u\n", 395 ring->name, ring->get_seqno(ring, false)); 396 } 397 } 398 399 static int i915_gem_seqno_info(struct seq_file *m, void *data) 400 { 401 struct drm_info_node *node = (struct drm_info_node *) m->private; 402 struct drm_device *dev = node->minor->dev; 403 drm_i915_private_t *dev_priv = dev->dev_private; 404 struct intel_ring_buffer *ring; 405 int ret, i; 406 407 ret = mutex_lock_interruptible(&dev->struct_mutex); 408 if (ret) 409 return ret; 410 411 for_each_ring(ring, dev_priv, i) 412 i915_ring_seqno_info(m, ring); 413 414 mutex_unlock(&dev->struct_mutex); 415 416 return 0; 417 } 418 419 420 static int i915_interrupt_info(struct seq_file *m, void *data) 421 { 422 struct drm_info_node *node = (struct drm_info_node *) m->private; 423 struct drm_device *dev = node->minor->dev; 424 drm_i915_private_t *dev_priv = dev->dev_private; 425 struct intel_ring_buffer *ring; 426 int ret, i, pipe; 427 428 ret = mutex_lock_interruptible(&dev->struct_mutex); 429 if (ret) 430 return ret; 431 432 if (IS_VALLEYVIEW(dev)) { 433 seq_printf(m, "Display IER:\t%08x\n", 434 I915_READ(VLV_IER)); 435 seq_printf(m, "Display IIR:\t%08x\n", 436 I915_READ(VLV_IIR)); 437 seq_printf(m, "Display IIR_RW:\t%08x\n", 438 I915_READ(VLV_IIR_RW)); 439 seq_printf(m, "Display IMR:\t%08x\n", 440 I915_READ(VLV_IMR)); 441 for_each_pipe(pipe) 442 seq_printf(m, "Pipe %c stat:\t%08x\n", 443 pipe_name(pipe), 444 I915_READ(PIPESTAT(pipe))); 445 446 seq_printf(m, "Master IER:\t%08x\n", 447 I915_READ(VLV_MASTER_IER)); 448 449 seq_printf(m, "Render IER:\t%08x\n", 450 I915_READ(GTIER)); 451 seq_printf(m, "Render IIR:\t%08x\n", 452 I915_READ(GTIIR)); 453 seq_printf(m, "Render IMR:\t%08x\n", 454 I915_READ(GTIMR)); 455 456 seq_printf(m, "PM IER:\t\t%08x\n", 457 I915_READ(GEN6_PMIER)); 458 seq_printf(m, "PM IIR:\t\t%08x\n", 459 I915_READ(GEN6_PMIIR)); 460 seq_printf(m, "PM IMR:\t\t%08x\n", 461 I915_READ(GEN6_PMIMR)); 462 463 seq_printf(m, "Port hotplug:\t%08x\n", 464 I915_READ(PORT_HOTPLUG_EN)); 465 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 466 I915_READ(VLV_DPFLIPSTAT)); 467 seq_printf(m, "DPINVGTT:\t%08x\n", 468 I915_READ(DPINVGTT)); 469 470 } else if (!HAS_PCH_SPLIT(dev)) { 471 seq_printf(m, "Interrupt enable: %08x\n", 472 I915_READ(IER)); 473 seq_printf(m, "Interrupt identity: %08x\n", 474 I915_READ(IIR)); 475 seq_printf(m, "Interrupt mask: %08x\n", 476 I915_READ(IMR)); 477 for_each_pipe(pipe) 478 seq_printf(m, "Pipe %c stat: %08x\n", 479 pipe_name(pipe), 480 I915_READ(PIPESTAT(pipe))); 481 } else { 482 seq_printf(m, "North Display Interrupt enable: %08x\n", 483 I915_READ(DEIER)); 484 seq_printf(m, "North Display Interrupt identity: %08x\n", 485 I915_READ(DEIIR)); 486 seq_printf(m, "North Display Interrupt mask: %08x\n", 487 I915_READ(DEIMR)); 488 seq_printf(m, "South Display Interrupt enable: %08x\n", 489 I915_READ(SDEIER)); 490 seq_printf(m, "South Display Interrupt identity: %08x\n", 491 I915_READ(SDEIIR)); 492 seq_printf(m, "South Display Interrupt mask: %08x\n", 493 I915_READ(SDEIMR)); 494 seq_printf(m, "Graphics Interrupt enable: %08x\n", 495 I915_READ(GTIER)); 496 seq_printf(m, "Graphics Interrupt identity: %08x\n", 497 I915_READ(GTIIR)); 498 seq_printf(m, "Graphics Interrupt mask: %08x\n", 499 I915_READ(GTIMR)); 500 } 501 seq_printf(m, "Interrupts received: %d\n", 502 atomic_read(&dev_priv->irq_received)); 503 for_each_ring(ring, dev_priv, i) { 504 if (IS_GEN6(dev) || IS_GEN7(dev)) { 505 seq_printf(m, 506 "Graphics Interrupt mask (%s): %08x\n", 507 ring->name, I915_READ_IMR(ring)); 508 } 509 i915_ring_seqno_info(m, ring); 510 } 511 mutex_unlock(&dev->struct_mutex); 512 513 return 0; 514 } 515 516 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 517 { 518 struct drm_info_node *node = (struct drm_info_node *) m->private; 519 struct drm_device *dev = node->minor->dev; 520 drm_i915_private_t *dev_priv = dev->dev_private; 521 int i, ret; 522 523 ret = mutex_lock_interruptible(&dev->struct_mutex); 524 if (ret) 525 return ret; 526 527 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 528 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 529 for (i = 0; i < dev_priv->num_fence_regs; i++) { 530 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 531 532 seq_printf(m, "Fence %d, pin count = %d, object = ", 533 i, dev_priv->fence_regs[i].pin_count); 534 if (obj == NULL) 535 seq_printf(m, "unused"); 536 else 537 describe_obj(m, obj); 538 seq_printf(m, "\n"); 539 } 540 541 mutex_unlock(&dev->struct_mutex); 542 return 0; 543 } 544 545 static int i915_hws_info(struct seq_file *m, void *data) 546 { 547 struct drm_info_node *node = (struct drm_info_node *) m->private; 548 struct drm_device *dev = node->minor->dev; 549 drm_i915_private_t *dev_priv = dev->dev_private; 550 struct intel_ring_buffer *ring; 551 const u32 *hws; 552 int i; 553 554 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 555 hws = ring->status_page.page_addr; 556 if (hws == NULL) 557 return 0; 558 559 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 560 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 561 i * 4, 562 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 563 } 564 return 0; 565 } 566 567 static const char *ring_str(int ring) 568 { 569 switch (ring) { 570 case RCS: return "render"; 571 case VCS: return "bsd"; 572 case BCS: return "blt"; 573 default: return ""; 574 } 575 } 576 577 static const char *pin_flag(int pinned) 578 { 579 if (pinned > 0) 580 return " P"; 581 else if (pinned < 0) 582 return " p"; 583 else 584 return ""; 585 } 586 587 static const char *tiling_flag(int tiling) 588 { 589 switch (tiling) { 590 default: 591 case I915_TILING_NONE: return ""; 592 case I915_TILING_X: return " X"; 593 case I915_TILING_Y: return " Y"; 594 } 595 } 596 597 static const char *dirty_flag(int dirty) 598 { 599 return dirty ? " dirty" : ""; 600 } 601 602 static const char *purgeable_flag(int purgeable) 603 { 604 return purgeable ? " purgeable" : ""; 605 } 606 607 static void print_error_buffers(struct seq_file *m, 608 const char *name, 609 struct drm_i915_error_buffer *err, 610 int count) 611 { 612 seq_printf(m, "%s [%d]:\n", name, count); 613 614 while (count--) { 615 seq_printf(m, " %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s", 616 err->gtt_offset, 617 err->size, 618 err->read_domains, 619 err->write_domain, 620 err->rseqno, err->wseqno, 621 pin_flag(err->pinned), 622 tiling_flag(err->tiling), 623 dirty_flag(err->dirty), 624 purgeable_flag(err->purgeable), 625 err->ring != -1 ? " " : "", 626 ring_str(err->ring), 627 cache_level_str(err->cache_level)); 628 629 if (err->name) 630 seq_printf(m, " (name: %d)", err->name); 631 if (err->fence_reg != I915_FENCE_REG_NONE) 632 seq_printf(m, " (fence: %d)", err->fence_reg); 633 634 seq_printf(m, "\n"); 635 err++; 636 } 637 } 638 639 static void i915_ring_error_state(struct seq_file *m, 640 struct drm_device *dev, 641 struct drm_i915_error_state *error, 642 unsigned ring) 643 { 644 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ 645 seq_printf(m, "%s command stream:\n", ring_str(ring)); 646 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 647 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 648 seq_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); 649 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 650 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 651 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 652 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 653 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) 654 seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); 655 656 if (INTEL_INFO(dev)->gen >= 4) 657 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 658 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 659 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); 660 if (INTEL_INFO(dev)->gen >= 6) { 661 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); 662 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 663 seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", 664 error->semaphore_mboxes[ring][0], 665 error->semaphore_seqno[ring][0]); 666 seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", 667 error->semaphore_mboxes[ring][1], 668 error->semaphore_seqno[ring][1]); 669 } 670 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 671 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 672 seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 673 seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 674 } 675 676 struct i915_error_state_file_priv { 677 struct drm_device *dev; 678 struct drm_i915_error_state *error; 679 }; 680 681 static int i915_error_state(struct seq_file *m, void *unused) 682 { 683 struct i915_error_state_file_priv *error_priv = m->private; 684 struct drm_device *dev = error_priv->dev; 685 drm_i915_private_t *dev_priv = dev->dev_private; 686 struct drm_i915_error_state *error = error_priv->error; 687 struct intel_ring_buffer *ring; 688 int i, j, page, offset, elt; 689 690 if (!error) { 691 seq_printf(m, "no error state collected\n"); 692 return 0; 693 } 694 695 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 696 error->time.tv_usec); 697 seq_printf(m, "Kernel: " UTS_RELEASE "\n"); 698 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 699 seq_printf(m, "EIR: 0x%08x\n", error->eir); 700 seq_printf(m, "IER: 0x%08x\n", error->ier); 701 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 702 seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); 703 seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr); 704 seq_printf(m, "CCID: 0x%08x\n", error->ccid); 705 706 for (i = 0; i < dev_priv->num_fence_regs; i++) 707 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 708 709 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) 710 seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]); 711 712 if (INTEL_INFO(dev)->gen >= 6) { 713 seq_printf(m, "ERROR: 0x%08x\n", error->error); 714 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 715 } 716 717 if (INTEL_INFO(dev)->gen == 7) 718 seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 719 720 for_each_ring(ring, dev_priv, i) 721 i915_ring_error_state(m, dev, error, i); 722 723 if (error->active_bo) 724 print_error_buffers(m, "Active", 725 error->active_bo, 726 error->active_bo_count); 727 728 if (error->pinned_bo) 729 print_error_buffers(m, "Pinned", 730 error->pinned_bo, 731 error->pinned_bo_count); 732 733 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 734 struct drm_i915_error_object *obj; 735 736 if ((obj = error->ring[i].batchbuffer)) { 737 seq_printf(m, "%s --- gtt_offset = 0x%08x\n", 738 dev_priv->ring[i].name, 739 obj->gtt_offset); 740 offset = 0; 741 for (page = 0; page < obj->page_count; page++) { 742 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 743 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); 744 offset += 4; 745 } 746 } 747 } 748 749 if (error->ring[i].num_requests) { 750 seq_printf(m, "%s --- %d requests\n", 751 dev_priv->ring[i].name, 752 error->ring[i].num_requests); 753 for (j = 0; j < error->ring[i].num_requests; j++) { 754 seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", 755 error->ring[i].requests[j].seqno, 756 error->ring[i].requests[j].jiffies, 757 error->ring[i].requests[j].tail); 758 } 759 } 760 761 if ((obj = error->ring[i].ringbuffer)) { 762 seq_printf(m, "%s --- ringbuffer = 0x%08x\n", 763 dev_priv->ring[i].name, 764 obj->gtt_offset); 765 offset = 0; 766 for (page = 0; page < obj->page_count; page++) { 767 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 768 seq_printf(m, "%08x : %08x\n", 769 offset, 770 obj->pages[page][elt]); 771 offset += 4; 772 } 773 } 774 } 775 776 obj = error->ring[i].ctx; 777 if (obj) { 778 seq_printf(m, "%s --- HW Context = 0x%08x\n", 779 dev_priv->ring[i].name, 780 obj->gtt_offset); 781 offset = 0; 782 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { 783 seq_printf(m, "[%04x] %08x %08x %08x %08x\n", 784 offset, 785 obj->pages[0][elt], 786 obj->pages[0][elt+1], 787 obj->pages[0][elt+2], 788 obj->pages[0][elt+3]); 789 offset += 16; 790 } 791 } 792 } 793 794 if (error->overlay) 795 intel_overlay_print_error_state(m, error->overlay); 796 797 if (error->display) 798 intel_display_print_error_state(m, dev, error->display); 799 800 return 0; 801 } 802 803 static ssize_t 804 i915_error_state_write(struct file *filp, 805 const char __user *ubuf, 806 size_t cnt, 807 loff_t *ppos) 808 { 809 struct seq_file *m = filp->private_data; 810 struct i915_error_state_file_priv *error_priv = m->private; 811 struct drm_device *dev = error_priv->dev; 812 int ret; 813 814 DRM_DEBUG_DRIVER("Resetting error state\n"); 815 816 ret = mutex_lock_interruptible(&dev->struct_mutex); 817 if (ret) 818 return ret; 819 820 i915_destroy_error_state(dev); 821 mutex_unlock(&dev->struct_mutex); 822 823 return cnt; 824 } 825 826 static int i915_error_state_open(struct inode *inode, struct file *file) 827 { 828 struct drm_device *dev = inode->i_private; 829 drm_i915_private_t *dev_priv = dev->dev_private; 830 struct i915_error_state_file_priv *error_priv; 831 unsigned long flags; 832 833 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 834 if (!error_priv) 835 return -ENOMEM; 836 837 error_priv->dev = dev; 838 839 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 840 error_priv->error = dev_priv->gpu_error.first_error; 841 if (error_priv->error) 842 kref_get(&error_priv->error->ref); 843 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 844 845 return single_open(file, i915_error_state, error_priv); 846 } 847 848 static int i915_error_state_release(struct inode *inode, struct file *file) 849 { 850 struct seq_file *m = file->private_data; 851 struct i915_error_state_file_priv *error_priv = m->private; 852 853 if (error_priv->error) 854 kref_put(&error_priv->error->ref, i915_error_state_free); 855 kfree(error_priv); 856 857 return single_release(inode, file); 858 } 859 860 static const struct file_operations i915_error_state_fops = { 861 .owner = THIS_MODULE, 862 .open = i915_error_state_open, 863 .read = seq_read, 864 .write = i915_error_state_write, 865 .llseek = default_llseek, 866 .release = i915_error_state_release, 867 }; 868 869 static int 870 i915_next_seqno_get(void *data, u64 *val) 871 { 872 struct drm_device *dev = data; 873 drm_i915_private_t *dev_priv = dev->dev_private; 874 int ret; 875 876 ret = mutex_lock_interruptible(&dev->struct_mutex); 877 if (ret) 878 return ret; 879 880 *val = dev_priv->next_seqno; 881 mutex_unlock(&dev->struct_mutex); 882 883 return 0; 884 } 885 886 static int 887 i915_next_seqno_set(void *data, u64 val) 888 { 889 struct drm_device *dev = data; 890 int ret; 891 892 ret = mutex_lock_interruptible(&dev->struct_mutex); 893 if (ret) 894 return ret; 895 896 ret = i915_gem_set_seqno(dev, val); 897 mutex_unlock(&dev->struct_mutex); 898 899 return ret; 900 } 901 902 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 903 i915_next_seqno_get, i915_next_seqno_set, 904 "0x%llx\n"); 905 906 static int i915_rstdby_delays(struct seq_file *m, void *unused) 907 { 908 struct drm_info_node *node = (struct drm_info_node *) m->private; 909 struct drm_device *dev = node->minor->dev; 910 drm_i915_private_t *dev_priv = dev->dev_private; 911 u16 crstanddelay; 912 int ret; 913 914 ret = mutex_lock_interruptible(&dev->struct_mutex); 915 if (ret) 916 return ret; 917 918 crstanddelay = I915_READ16(CRSTANDVID); 919 920 mutex_unlock(&dev->struct_mutex); 921 922 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 923 924 return 0; 925 } 926 927 static int i915_cur_delayinfo(struct seq_file *m, void *unused) 928 { 929 struct drm_info_node *node = (struct drm_info_node *) m->private; 930 struct drm_device *dev = node->minor->dev; 931 drm_i915_private_t *dev_priv = dev->dev_private; 932 int ret; 933 934 if (IS_GEN5(dev)) { 935 u16 rgvswctl = I915_READ16(MEMSWCTL); 936 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 937 938 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 939 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 940 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 941 MEMSTAT_VID_SHIFT); 942 seq_printf(m, "Current P-state: %d\n", 943 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 944 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 945 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 946 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 947 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 948 u32 rpstat, cagf; 949 u32 rpupei, rpcurup, rpprevup; 950 u32 rpdownei, rpcurdown, rpprevdown; 951 int max_freq; 952 953 /* RPSTAT1 is in the GT power well */ 954 ret = mutex_lock_interruptible(&dev->struct_mutex); 955 if (ret) 956 return ret; 957 958 gen6_gt_force_wake_get(dev_priv); 959 960 rpstat = I915_READ(GEN6_RPSTAT1); 961 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 962 rpcurup = I915_READ(GEN6_RP_CUR_UP); 963 rpprevup = I915_READ(GEN6_RP_PREV_UP); 964 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 965 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 966 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 967 if (IS_HASWELL(dev)) 968 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 969 else 970 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 971 cagf *= GT_FREQUENCY_MULTIPLIER; 972 973 gen6_gt_force_wake_put(dev_priv); 974 mutex_unlock(&dev->struct_mutex); 975 976 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 977 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 978 seq_printf(m, "Render p-state ratio: %d\n", 979 (gt_perf_status & 0xff00) >> 8); 980 seq_printf(m, "Render p-state VID: %d\n", 981 gt_perf_status & 0xff); 982 seq_printf(m, "Render p-state limit: %d\n", 983 rp_state_limits & 0xff); 984 seq_printf(m, "CAGF: %dMHz\n", cagf); 985 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 986 GEN6_CURICONT_MASK); 987 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 988 GEN6_CURBSYTAVG_MASK); 989 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 990 GEN6_CURBSYTAVG_MASK); 991 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 992 GEN6_CURIAVG_MASK); 993 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 994 GEN6_CURBSYTAVG_MASK); 995 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 996 GEN6_CURBSYTAVG_MASK); 997 998 max_freq = (rp_state_cap & 0xff0000) >> 16; 999 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1000 max_freq * GT_FREQUENCY_MULTIPLIER); 1001 1002 max_freq = (rp_state_cap & 0xff00) >> 8; 1003 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1004 max_freq * GT_FREQUENCY_MULTIPLIER); 1005 1006 max_freq = rp_state_cap & 0xff; 1007 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1008 max_freq * GT_FREQUENCY_MULTIPLIER); 1009 1010 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1011 dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER); 1012 } else { 1013 seq_printf(m, "no P-state info available\n"); 1014 } 1015 1016 return 0; 1017 } 1018 1019 static int i915_delayfreq_table(struct seq_file *m, void *unused) 1020 { 1021 struct drm_info_node *node = (struct drm_info_node *) m->private; 1022 struct drm_device *dev = node->minor->dev; 1023 drm_i915_private_t *dev_priv = dev->dev_private; 1024 u32 delayfreq; 1025 int ret, i; 1026 1027 ret = mutex_lock_interruptible(&dev->struct_mutex); 1028 if (ret) 1029 return ret; 1030 1031 for (i = 0; i < 16; i++) { 1032 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 1033 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 1034 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 1035 } 1036 1037 mutex_unlock(&dev->struct_mutex); 1038 1039 return 0; 1040 } 1041 1042 static inline int MAP_TO_MV(int map) 1043 { 1044 return 1250 - (map * 25); 1045 } 1046 1047 static int i915_inttoext_table(struct seq_file *m, void *unused) 1048 { 1049 struct drm_info_node *node = (struct drm_info_node *) m->private; 1050 struct drm_device *dev = node->minor->dev; 1051 drm_i915_private_t *dev_priv = dev->dev_private; 1052 u32 inttoext; 1053 int ret, i; 1054 1055 ret = mutex_lock_interruptible(&dev->struct_mutex); 1056 if (ret) 1057 return ret; 1058 1059 for (i = 1; i <= 32; i++) { 1060 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 1061 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 1062 } 1063 1064 mutex_unlock(&dev->struct_mutex); 1065 1066 return 0; 1067 } 1068 1069 static int ironlake_drpc_info(struct seq_file *m) 1070 { 1071 struct drm_info_node *node = (struct drm_info_node *) m->private; 1072 struct drm_device *dev = node->minor->dev; 1073 drm_i915_private_t *dev_priv = dev->dev_private; 1074 u32 rgvmodectl, rstdbyctl; 1075 u16 crstandvid; 1076 int ret; 1077 1078 ret = mutex_lock_interruptible(&dev->struct_mutex); 1079 if (ret) 1080 return ret; 1081 1082 rgvmodectl = I915_READ(MEMMODECTL); 1083 rstdbyctl = I915_READ(RSTDBYCTL); 1084 crstandvid = I915_READ16(CRSTANDVID); 1085 1086 mutex_unlock(&dev->struct_mutex); 1087 1088 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1089 "yes" : "no"); 1090 seq_printf(m, "Boost freq: %d\n", 1091 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1092 MEMMODE_BOOST_FREQ_SHIFT); 1093 seq_printf(m, "HW control enabled: %s\n", 1094 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 1095 seq_printf(m, "SW control enabled: %s\n", 1096 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 1097 seq_printf(m, "Gated voltage change: %s\n", 1098 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 1099 seq_printf(m, "Starting frequency: P%d\n", 1100 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1101 seq_printf(m, "Max P-state: P%d\n", 1102 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1103 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1104 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1105 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1106 seq_printf(m, "Render standby enabled: %s\n", 1107 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1108 seq_printf(m, "Current RS state: "); 1109 switch (rstdbyctl & RSX_STATUS_MASK) { 1110 case RSX_STATUS_ON: 1111 seq_printf(m, "on\n"); 1112 break; 1113 case RSX_STATUS_RC1: 1114 seq_printf(m, "RC1\n"); 1115 break; 1116 case RSX_STATUS_RC1E: 1117 seq_printf(m, "RC1E\n"); 1118 break; 1119 case RSX_STATUS_RS1: 1120 seq_printf(m, "RS1\n"); 1121 break; 1122 case RSX_STATUS_RS2: 1123 seq_printf(m, "RS2 (RC6)\n"); 1124 break; 1125 case RSX_STATUS_RS3: 1126 seq_printf(m, "RC3 (RC6+)\n"); 1127 break; 1128 default: 1129 seq_printf(m, "unknown\n"); 1130 break; 1131 } 1132 1133 return 0; 1134 } 1135 1136 static int gen6_drpc_info(struct seq_file *m) 1137 { 1138 1139 struct drm_info_node *node = (struct drm_info_node *) m->private; 1140 struct drm_device *dev = node->minor->dev; 1141 struct drm_i915_private *dev_priv = dev->dev_private; 1142 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1143 unsigned forcewake_count; 1144 int count=0, ret; 1145 1146 1147 ret = mutex_lock_interruptible(&dev->struct_mutex); 1148 if (ret) 1149 return ret; 1150 1151 spin_lock_irq(&dev_priv->gt_lock); 1152 forcewake_count = dev_priv->forcewake_count; 1153 spin_unlock_irq(&dev_priv->gt_lock); 1154 1155 if (forcewake_count) { 1156 seq_printf(m, "RC information inaccurate because somebody " 1157 "holds a forcewake reference \n"); 1158 } else { 1159 /* NB: we cannot use forcewake, else we read the wrong values */ 1160 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1161 udelay(10); 1162 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1163 } 1164 1165 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1166 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); 1167 1168 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1169 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1170 mutex_unlock(&dev->struct_mutex); 1171 mutex_lock(&dev_priv->rps.hw_lock); 1172 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1173 mutex_unlock(&dev_priv->rps.hw_lock); 1174 1175 seq_printf(m, "Video Turbo Mode: %s\n", 1176 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1177 seq_printf(m, "HW control enabled: %s\n", 1178 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1179 seq_printf(m, "SW control enabled: %s\n", 1180 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1181 GEN6_RP_MEDIA_SW_MODE)); 1182 seq_printf(m, "RC1e Enabled: %s\n", 1183 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1184 seq_printf(m, "RC6 Enabled: %s\n", 1185 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1186 seq_printf(m, "Deep RC6 Enabled: %s\n", 1187 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1188 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1189 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1190 seq_printf(m, "Current RC state: "); 1191 switch (gt_core_status & GEN6_RCn_MASK) { 1192 case GEN6_RC0: 1193 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1194 seq_printf(m, "Core Power Down\n"); 1195 else 1196 seq_printf(m, "on\n"); 1197 break; 1198 case GEN6_RC3: 1199 seq_printf(m, "RC3\n"); 1200 break; 1201 case GEN6_RC6: 1202 seq_printf(m, "RC6\n"); 1203 break; 1204 case GEN6_RC7: 1205 seq_printf(m, "RC7\n"); 1206 break; 1207 default: 1208 seq_printf(m, "Unknown\n"); 1209 break; 1210 } 1211 1212 seq_printf(m, "Core Power Down: %s\n", 1213 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1214 1215 /* Not exactly sure what this is */ 1216 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1217 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1218 seq_printf(m, "RC6 residency since boot: %u\n", 1219 I915_READ(GEN6_GT_GFX_RC6)); 1220 seq_printf(m, "RC6+ residency since boot: %u\n", 1221 I915_READ(GEN6_GT_GFX_RC6p)); 1222 seq_printf(m, "RC6++ residency since boot: %u\n", 1223 I915_READ(GEN6_GT_GFX_RC6pp)); 1224 1225 seq_printf(m, "RC6 voltage: %dmV\n", 1226 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1227 seq_printf(m, "RC6+ voltage: %dmV\n", 1228 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1229 seq_printf(m, "RC6++ voltage: %dmV\n", 1230 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1231 return 0; 1232 } 1233 1234 static int i915_drpc_info(struct seq_file *m, void *unused) 1235 { 1236 struct drm_info_node *node = (struct drm_info_node *) m->private; 1237 struct drm_device *dev = node->minor->dev; 1238 1239 if (IS_GEN6(dev) || IS_GEN7(dev)) 1240 return gen6_drpc_info(m); 1241 else 1242 return ironlake_drpc_info(m); 1243 } 1244 1245 static int i915_fbc_status(struct seq_file *m, void *unused) 1246 { 1247 struct drm_info_node *node = (struct drm_info_node *) m->private; 1248 struct drm_device *dev = node->minor->dev; 1249 drm_i915_private_t *dev_priv = dev->dev_private; 1250 1251 if (!I915_HAS_FBC(dev)) { 1252 seq_printf(m, "FBC unsupported on this chipset\n"); 1253 return 0; 1254 } 1255 1256 if (intel_fbc_enabled(dev)) { 1257 seq_printf(m, "FBC enabled\n"); 1258 } else { 1259 seq_printf(m, "FBC disabled: "); 1260 switch (dev_priv->no_fbc_reason) { 1261 case FBC_NO_OUTPUT: 1262 seq_printf(m, "no outputs"); 1263 break; 1264 case FBC_STOLEN_TOO_SMALL: 1265 seq_printf(m, "not enough stolen memory"); 1266 break; 1267 case FBC_UNSUPPORTED_MODE: 1268 seq_printf(m, "mode not supported"); 1269 break; 1270 case FBC_MODE_TOO_LARGE: 1271 seq_printf(m, "mode too large"); 1272 break; 1273 case FBC_BAD_PLANE: 1274 seq_printf(m, "FBC unsupported on plane"); 1275 break; 1276 case FBC_NOT_TILED: 1277 seq_printf(m, "scanout buffer not tiled"); 1278 break; 1279 case FBC_MULTIPLE_PIPES: 1280 seq_printf(m, "multiple pipes are enabled"); 1281 break; 1282 case FBC_MODULE_PARAM: 1283 seq_printf(m, "disabled per module param (default off)"); 1284 break; 1285 default: 1286 seq_printf(m, "unknown reason"); 1287 } 1288 seq_printf(m, "\n"); 1289 } 1290 return 0; 1291 } 1292 1293 static int i915_sr_status(struct seq_file *m, void *unused) 1294 { 1295 struct drm_info_node *node = (struct drm_info_node *) m->private; 1296 struct drm_device *dev = node->minor->dev; 1297 drm_i915_private_t *dev_priv = dev->dev_private; 1298 bool sr_enabled = false; 1299 1300 if (HAS_PCH_SPLIT(dev)) 1301 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1302 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1303 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1304 else if (IS_I915GM(dev)) 1305 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1306 else if (IS_PINEVIEW(dev)) 1307 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1308 1309 seq_printf(m, "self-refresh: %s\n", 1310 sr_enabled ? "enabled" : "disabled"); 1311 1312 return 0; 1313 } 1314 1315 static int i915_emon_status(struct seq_file *m, void *unused) 1316 { 1317 struct drm_info_node *node = (struct drm_info_node *) m->private; 1318 struct drm_device *dev = node->minor->dev; 1319 drm_i915_private_t *dev_priv = dev->dev_private; 1320 unsigned long temp, chipset, gfx; 1321 int ret; 1322 1323 if (!IS_GEN5(dev)) 1324 return -ENODEV; 1325 1326 ret = mutex_lock_interruptible(&dev->struct_mutex); 1327 if (ret) 1328 return ret; 1329 1330 temp = i915_mch_val(dev_priv); 1331 chipset = i915_chipset_val(dev_priv); 1332 gfx = i915_gfx_val(dev_priv); 1333 mutex_unlock(&dev->struct_mutex); 1334 1335 seq_printf(m, "GMCH temp: %ld\n", temp); 1336 seq_printf(m, "Chipset power: %ld\n", chipset); 1337 seq_printf(m, "GFX power: %ld\n", gfx); 1338 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1339 1340 return 0; 1341 } 1342 1343 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1344 { 1345 struct drm_info_node *node = (struct drm_info_node *) m->private; 1346 struct drm_device *dev = node->minor->dev; 1347 drm_i915_private_t *dev_priv = dev->dev_private; 1348 int ret; 1349 int gpu_freq, ia_freq; 1350 1351 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1352 seq_printf(m, "unsupported on this chipset\n"); 1353 return 0; 1354 } 1355 1356 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1357 if (ret) 1358 return ret; 1359 1360 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1361 1362 for (gpu_freq = dev_priv->rps.min_delay; 1363 gpu_freq <= dev_priv->rps.max_delay; 1364 gpu_freq++) { 1365 ia_freq = gpu_freq; 1366 sandybridge_pcode_read(dev_priv, 1367 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1368 &ia_freq); 1369 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1370 gpu_freq * GT_FREQUENCY_MULTIPLIER, 1371 ((ia_freq >> 0) & 0xff) * 100, 1372 ((ia_freq >> 8) & 0xff) * 100); 1373 } 1374 1375 mutex_unlock(&dev_priv->rps.hw_lock); 1376 1377 return 0; 1378 } 1379 1380 static int i915_gfxec(struct seq_file *m, void *unused) 1381 { 1382 struct drm_info_node *node = (struct drm_info_node *) m->private; 1383 struct drm_device *dev = node->minor->dev; 1384 drm_i915_private_t *dev_priv = dev->dev_private; 1385 int ret; 1386 1387 ret = mutex_lock_interruptible(&dev->struct_mutex); 1388 if (ret) 1389 return ret; 1390 1391 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1392 1393 mutex_unlock(&dev->struct_mutex); 1394 1395 return 0; 1396 } 1397 1398 static int i915_opregion(struct seq_file *m, void *unused) 1399 { 1400 struct drm_info_node *node = (struct drm_info_node *) m->private; 1401 struct drm_device *dev = node->minor->dev; 1402 drm_i915_private_t *dev_priv = dev->dev_private; 1403 struct intel_opregion *opregion = &dev_priv->opregion; 1404 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); 1405 int ret; 1406 1407 if (data == NULL) 1408 return -ENOMEM; 1409 1410 ret = mutex_lock_interruptible(&dev->struct_mutex); 1411 if (ret) 1412 goto out; 1413 1414 if (opregion->header) { 1415 memcpy_fromio(data, opregion->header, OPREGION_SIZE); 1416 seq_write(m, data, OPREGION_SIZE); 1417 } 1418 1419 mutex_unlock(&dev->struct_mutex); 1420 1421 out: 1422 kfree(data); 1423 return 0; 1424 } 1425 1426 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1427 { 1428 struct drm_info_node *node = (struct drm_info_node *) m->private; 1429 struct drm_device *dev = node->minor->dev; 1430 drm_i915_private_t *dev_priv = dev->dev_private; 1431 struct intel_fbdev *ifbdev; 1432 struct intel_framebuffer *fb; 1433 int ret; 1434 1435 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1436 if (ret) 1437 return ret; 1438 1439 ifbdev = dev_priv->fbdev; 1440 fb = to_intel_framebuffer(ifbdev->helper.fb); 1441 1442 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1443 fb->base.width, 1444 fb->base.height, 1445 fb->base.depth, 1446 fb->base.bits_per_pixel, 1447 atomic_read(&fb->base.refcount.refcount)); 1448 describe_obj(m, fb->obj); 1449 seq_printf(m, "\n"); 1450 mutex_unlock(&dev->mode_config.mutex); 1451 1452 mutex_lock(&dev->mode_config.fb_lock); 1453 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1454 if (&fb->base == ifbdev->helper.fb) 1455 continue; 1456 1457 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1458 fb->base.width, 1459 fb->base.height, 1460 fb->base.depth, 1461 fb->base.bits_per_pixel, 1462 atomic_read(&fb->base.refcount.refcount)); 1463 describe_obj(m, fb->obj); 1464 seq_printf(m, "\n"); 1465 } 1466 mutex_unlock(&dev->mode_config.fb_lock); 1467 1468 return 0; 1469 } 1470 1471 static int i915_context_status(struct seq_file *m, void *unused) 1472 { 1473 struct drm_info_node *node = (struct drm_info_node *) m->private; 1474 struct drm_device *dev = node->minor->dev; 1475 drm_i915_private_t *dev_priv = dev->dev_private; 1476 struct intel_ring_buffer *ring; 1477 int ret, i; 1478 1479 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1480 if (ret) 1481 return ret; 1482 1483 if (dev_priv->ips.pwrctx) { 1484 seq_printf(m, "power context "); 1485 describe_obj(m, dev_priv->ips.pwrctx); 1486 seq_printf(m, "\n"); 1487 } 1488 1489 if (dev_priv->ips.renderctx) { 1490 seq_printf(m, "render context "); 1491 describe_obj(m, dev_priv->ips.renderctx); 1492 seq_printf(m, "\n"); 1493 } 1494 1495 for_each_ring(ring, dev_priv, i) { 1496 if (ring->default_context) { 1497 seq_printf(m, "HW default context %s ring ", ring->name); 1498 describe_obj(m, ring->default_context->obj); 1499 seq_printf(m, "\n"); 1500 } 1501 } 1502 1503 mutex_unlock(&dev->mode_config.mutex); 1504 1505 return 0; 1506 } 1507 1508 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1509 { 1510 struct drm_info_node *node = (struct drm_info_node *) m->private; 1511 struct drm_device *dev = node->minor->dev; 1512 struct drm_i915_private *dev_priv = dev->dev_private; 1513 unsigned forcewake_count; 1514 1515 spin_lock_irq(&dev_priv->gt_lock); 1516 forcewake_count = dev_priv->forcewake_count; 1517 spin_unlock_irq(&dev_priv->gt_lock); 1518 1519 seq_printf(m, "forcewake count = %u\n", forcewake_count); 1520 1521 return 0; 1522 } 1523 1524 static const char *swizzle_string(unsigned swizzle) 1525 { 1526 switch(swizzle) { 1527 case I915_BIT_6_SWIZZLE_NONE: 1528 return "none"; 1529 case I915_BIT_6_SWIZZLE_9: 1530 return "bit9"; 1531 case I915_BIT_6_SWIZZLE_9_10: 1532 return "bit9/bit10"; 1533 case I915_BIT_6_SWIZZLE_9_11: 1534 return "bit9/bit11"; 1535 case I915_BIT_6_SWIZZLE_9_10_11: 1536 return "bit9/bit10/bit11"; 1537 case I915_BIT_6_SWIZZLE_9_17: 1538 return "bit9/bit17"; 1539 case I915_BIT_6_SWIZZLE_9_10_17: 1540 return "bit9/bit10/bit17"; 1541 case I915_BIT_6_SWIZZLE_UNKNOWN: 1542 return "unknown"; 1543 } 1544 1545 return "bug"; 1546 } 1547 1548 static int i915_swizzle_info(struct seq_file *m, void *data) 1549 { 1550 struct drm_info_node *node = (struct drm_info_node *) m->private; 1551 struct drm_device *dev = node->minor->dev; 1552 struct drm_i915_private *dev_priv = dev->dev_private; 1553 int ret; 1554 1555 ret = mutex_lock_interruptible(&dev->struct_mutex); 1556 if (ret) 1557 return ret; 1558 1559 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1560 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1561 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1562 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1563 1564 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1565 seq_printf(m, "DDC = 0x%08x\n", 1566 I915_READ(DCC)); 1567 seq_printf(m, "C0DRB3 = 0x%04x\n", 1568 I915_READ16(C0DRB3)); 1569 seq_printf(m, "C1DRB3 = 0x%04x\n", 1570 I915_READ16(C1DRB3)); 1571 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 1572 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1573 I915_READ(MAD_DIMM_C0)); 1574 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1575 I915_READ(MAD_DIMM_C1)); 1576 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1577 I915_READ(MAD_DIMM_C2)); 1578 seq_printf(m, "TILECTL = 0x%08x\n", 1579 I915_READ(TILECTL)); 1580 seq_printf(m, "ARB_MODE = 0x%08x\n", 1581 I915_READ(ARB_MODE)); 1582 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1583 I915_READ(DISP_ARB_CTL)); 1584 } 1585 mutex_unlock(&dev->struct_mutex); 1586 1587 return 0; 1588 } 1589 1590 static int i915_ppgtt_info(struct seq_file *m, void *data) 1591 { 1592 struct drm_info_node *node = (struct drm_info_node *) m->private; 1593 struct drm_device *dev = node->minor->dev; 1594 struct drm_i915_private *dev_priv = dev->dev_private; 1595 struct intel_ring_buffer *ring; 1596 int i, ret; 1597 1598 1599 ret = mutex_lock_interruptible(&dev->struct_mutex); 1600 if (ret) 1601 return ret; 1602 if (INTEL_INFO(dev)->gen == 6) 1603 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1604 1605 for_each_ring(ring, dev_priv, i) { 1606 seq_printf(m, "%s\n", ring->name); 1607 if (INTEL_INFO(dev)->gen == 7) 1608 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 1609 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 1610 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 1611 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 1612 } 1613 if (dev_priv->mm.aliasing_ppgtt) { 1614 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1615 1616 seq_printf(m, "aliasing PPGTT:\n"); 1617 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1618 } 1619 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1620 mutex_unlock(&dev->struct_mutex); 1621 1622 return 0; 1623 } 1624 1625 static int i915_dpio_info(struct seq_file *m, void *data) 1626 { 1627 struct drm_info_node *node = (struct drm_info_node *) m->private; 1628 struct drm_device *dev = node->minor->dev; 1629 struct drm_i915_private *dev_priv = dev->dev_private; 1630 int ret; 1631 1632 1633 if (!IS_VALLEYVIEW(dev)) { 1634 seq_printf(m, "unsupported\n"); 1635 return 0; 1636 } 1637 1638 ret = mutex_lock_interruptible(&dev_priv->dpio_lock); 1639 if (ret) 1640 return ret; 1641 1642 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); 1643 1644 seq_printf(m, "DPIO_DIV_A: 0x%08x\n", 1645 intel_dpio_read(dev_priv, _DPIO_DIV_A)); 1646 seq_printf(m, "DPIO_DIV_B: 0x%08x\n", 1647 intel_dpio_read(dev_priv, _DPIO_DIV_B)); 1648 1649 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n", 1650 intel_dpio_read(dev_priv, _DPIO_REFSFR_A)); 1651 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n", 1652 intel_dpio_read(dev_priv, _DPIO_REFSFR_B)); 1653 1654 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", 1655 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A)); 1656 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", 1657 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); 1658 1659 seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n", 1660 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A)); 1661 seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n", 1662 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B)); 1663 1664 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1665 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); 1666 1667 mutex_unlock(&dev_priv->dpio_lock); 1668 1669 return 0; 1670 } 1671 1672 static int 1673 i915_wedged_get(void *data, u64 *val) 1674 { 1675 struct drm_device *dev = data; 1676 drm_i915_private_t *dev_priv = dev->dev_private; 1677 1678 *val = atomic_read(&dev_priv->gpu_error.reset_counter); 1679 1680 return 0; 1681 } 1682 1683 static int 1684 i915_wedged_set(void *data, u64 val) 1685 { 1686 struct drm_device *dev = data; 1687 1688 DRM_INFO("Manually setting wedged to %llu\n", val); 1689 i915_handle_error(dev, val); 1690 1691 return 0; 1692 } 1693 1694 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 1695 i915_wedged_get, i915_wedged_set, 1696 "%llu\n"); 1697 1698 static int 1699 i915_ring_stop_get(void *data, u64 *val) 1700 { 1701 struct drm_device *dev = data; 1702 drm_i915_private_t *dev_priv = dev->dev_private; 1703 1704 *val = dev_priv->gpu_error.stop_rings; 1705 1706 return 0; 1707 } 1708 1709 static int 1710 i915_ring_stop_set(void *data, u64 val) 1711 { 1712 struct drm_device *dev = data; 1713 struct drm_i915_private *dev_priv = dev->dev_private; 1714 int ret; 1715 1716 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); 1717 1718 ret = mutex_lock_interruptible(&dev->struct_mutex); 1719 if (ret) 1720 return ret; 1721 1722 dev_priv->gpu_error.stop_rings = val; 1723 mutex_unlock(&dev->struct_mutex); 1724 1725 return 0; 1726 } 1727 1728 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, 1729 i915_ring_stop_get, i915_ring_stop_set, 1730 "0x%08llx\n"); 1731 1732 #define DROP_UNBOUND 0x1 1733 #define DROP_BOUND 0x2 1734 #define DROP_RETIRE 0x4 1735 #define DROP_ACTIVE 0x8 1736 #define DROP_ALL (DROP_UNBOUND | \ 1737 DROP_BOUND | \ 1738 DROP_RETIRE | \ 1739 DROP_ACTIVE) 1740 static int 1741 i915_drop_caches_get(void *data, u64 *val) 1742 { 1743 *val = DROP_ALL; 1744 1745 return 0; 1746 } 1747 1748 static int 1749 i915_drop_caches_set(void *data, u64 val) 1750 { 1751 struct drm_device *dev = data; 1752 struct drm_i915_private *dev_priv = dev->dev_private; 1753 struct drm_i915_gem_object *obj, *next; 1754 int ret; 1755 1756 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); 1757 1758 /* No need to check and wait for gpu resets, only libdrm auto-restarts 1759 * on ioctls on -EAGAIN. */ 1760 ret = mutex_lock_interruptible(&dev->struct_mutex); 1761 if (ret) 1762 return ret; 1763 1764 if (val & DROP_ACTIVE) { 1765 ret = i915_gpu_idle(dev); 1766 if (ret) 1767 goto unlock; 1768 } 1769 1770 if (val & (DROP_RETIRE | DROP_ACTIVE)) 1771 i915_gem_retire_requests(dev); 1772 1773 if (val & DROP_BOUND) { 1774 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) 1775 if (obj->pin_count == 0) { 1776 ret = i915_gem_object_unbind(obj); 1777 if (ret) 1778 goto unlock; 1779 } 1780 } 1781 1782 if (val & DROP_UNBOUND) { 1783 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) 1784 if (obj->pages_pin_count == 0) { 1785 ret = i915_gem_object_put_pages(obj); 1786 if (ret) 1787 goto unlock; 1788 } 1789 } 1790 1791 unlock: 1792 mutex_unlock(&dev->struct_mutex); 1793 1794 return ret; 1795 } 1796 1797 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 1798 i915_drop_caches_get, i915_drop_caches_set, 1799 "0x%08llx\n"); 1800 1801 static int 1802 i915_max_freq_get(void *data, u64 *val) 1803 { 1804 struct drm_device *dev = data; 1805 drm_i915_private_t *dev_priv = dev->dev_private; 1806 int ret; 1807 1808 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1809 return -ENODEV; 1810 1811 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1812 if (ret) 1813 return ret; 1814 1815 *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; 1816 mutex_unlock(&dev_priv->rps.hw_lock); 1817 1818 return 0; 1819 } 1820 1821 static int 1822 i915_max_freq_set(void *data, u64 val) 1823 { 1824 struct drm_device *dev = data; 1825 struct drm_i915_private *dev_priv = dev->dev_private; 1826 int ret; 1827 1828 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1829 return -ENODEV; 1830 1831 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 1832 1833 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1834 if (ret) 1835 return ret; 1836 1837 /* 1838 * Turbo will still be enabled, but won't go above the set value. 1839 */ 1840 do_div(val, GT_FREQUENCY_MULTIPLIER); 1841 dev_priv->rps.max_delay = val; 1842 gen6_set_rps(dev, val); 1843 mutex_unlock(&dev_priv->rps.hw_lock); 1844 1845 return 0; 1846 } 1847 1848 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 1849 i915_max_freq_get, i915_max_freq_set, 1850 "%llu\n"); 1851 1852 static int 1853 i915_min_freq_get(void *data, u64 *val) 1854 { 1855 struct drm_device *dev = data; 1856 drm_i915_private_t *dev_priv = dev->dev_private; 1857 int ret; 1858 1859 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1860 return -ENODEV; 1861 1862 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1863 if (ret) 1864 return ret; 1865 1866 *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; 1867 mutex_unlock(&dev_priv->rps.hw_lock); 1868 1869 return 0; 1870 } 1871 1872 static int 1873 i915_min_freq_set(void *data, u64 val) 1874 { 1875 struct drm_device *dev = data; 1876 struct drm_i915_private *dev_priv = dev->dev_private; 1877 int ret; 1878 1879 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1880 return -ENODEV; 1881 1882 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 1883 1884 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1885 if (ret) 1886 return ret; 1887 1888 /* 1889 * Turbo will still be enabled, but won't go below the set value. 1890 */ 1891 do_div(val, GT_FREQUENCY_MULTIPLIER); 1892 dev_priv->rps.min_delay = val; 1893 gen6_set_rps(dev, val); 1894 mutex_unlock(&dev_priv->rps.hw_lock); 1895 1896 return 0; 1897 } 1898 1899 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 1900 i915_min_freq_get, i915_min_freq_set, 1901 "%llu\n"); 1902 1903 static int 1904 i915_cache_sharing_get(void *data, u64 *val) 1905 { 1906 struct drm_device *dev = data; 1907 drm_i915_private_t *dev_priv = dev->dev_private; 1908 u32 snpcr; 1909 int ret; 1910 1911 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1912 return -ENODEV; 1913 1914 ret = mutex_lock_interruptible(&dev->struct_mutex); 1915 if (ret) 1916 return ret; 1917 1918 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1919 mutex_unlock(&dev_priv->dev->struct_mutex); 1920 1921 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 1922 1923 return 0; 1924 } 1925 1926 static int 1927 i915_cache_sharing_set(void *data, u64 val) 1928 { 1929 struct drm_device *dev = data; 1930 struct drm_i915_private *dev_priv = dev->dev_private; 1931 u32 snpcr; 1932 1933 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1934 return -ENODEV; 1935 1936 if (val > 3) 1937 return -EINVAL; 1938 1939 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 1940 1941 /* Update the cache sharing policy here as well */ 1942 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1943 snpcr &= ~GEN6_MBC_SNPCR_MASK; 1944 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 1945 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 1946 1947 return 0; 1948 } 1949 1950 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 1951 i915_cache_sharing_get, i915_cache_sharing_set, 1952 "%llu\n"); 1953 1954 /* As the drm_debugfs_init() routines are called before dev->dev_private is 1955 * allocated we need to hook into the minor for release. */ 1956 static int 1957 drm_add_fake_info_node(struct drm_minor *minor, 1958 struct dentry *ent, 1959 const void *key) 1960 { 1961 struct drm_info_node *node; 1962 1963 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); 1964 if (node == NULL) { 1965 debugfs_remove(ent); 1966 return -ENOMEM; 1967 } 1968 1969 node->minor = minor; 1970 node->dent = ent; 1971 node->info_ent = (void *) key; 1972 1973 mutex_lock(&minor->debugfs_lock); 1974 list_add(&node->list, &minor->debugfs_list); 1975 mutex_unlock(&minor->debugfs_lock); 1976 1977 return 0; 1978 } 1979 1980 static int i915_forcewake_open(struct inode *inode, struct file *file) 1981 { 1982 struct drm_device *dev = inode->i_private; 1983 struct drm_i915_private *dev_priv = dev->dev_private; 1984 1985 if (INTEL_INFO(dev)->gen < 6) 1986 return 0; 1987 1988 gen6_gt_force_wake_get(dev_priv); 1989 1990 return 0; 1991 } 1992 1993 static int i915_forcewake_release(struct inode *inode, struct file *file) 1994 { 1995 struct drm_device *dev = inode->i_private; 1996 struct drm_i915_private *dev_priv = dev->dev_private; 1997 1998 if (INTEL_INFO(dev)->gen < 6) 1999 return 0; 2000 2001 gen6_gt_force_wake_put(dev_priv); 2002 2003 return 0; 2004 } 2005 2006 static const struct file_operations i915_forcewake_fops = { 2007 .owner = THIS_MODULE, 2008 .open = i915_forcewake_open, 2009 .release = i915_forcewake_release, 2010 }; 2011 2012 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 2013 { 2014 struct drm_device *dev = minor->dev; 2015 struct dentry *ent; 2016 2017 ent = debugfs_create_file("i915_forcewake_user", 2018 S_IRUSR, 2019 root, dev, 2020 &i915_forcewake_fops); 2021 if (IS_ERR(ent)) 2022 return PTR_ERR(ent); 2023 2024 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 2025 } 2026 2027 static int i915_debugfs_create(struct dentry *root, 2028 struct drm_minor *minor, 2029 const char *name, 2030 const struct file_operations *fops) 2031 { 2032 struct drm_device *dev = minor->dev; 2033 struct dentry *ent; 2034 2035 ent = debugfs_create_file(name, 2036 S_IRUGO | S_IWUSR, 2037 root, dev, 2038 fops); 2039 if (IS_ERR(ent)) 2040 return PTR_ERR(ent); 2041 2042 return drm_add_fake_info_node(minor, ent, fops); 2043 } 2044 2045 static struct drm_info_list i915_debugfs_list[] = { 2046 {"i915_capabilities", i915_capabilities, 0}, 2047 {"i915_gem_objects", i915_gem_object_info, 0}, 2048 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 2049 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 2050 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 2051 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 2052 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 2053 {"i915_gem_request", i915_gem_request_info, 0}, 2054 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 2055 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 2056 {"i915_gem_interrupt", i915_interrupt_info, 0}, 2057 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 2058 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 2059 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 2060 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 2061 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 2062 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 2063 {"i915_inttoext_table", i915_inttoext_table, 0}, 2064 {"i915_drpc_info", i915_drpc_info, 0}, 2065 {"i915_emon_status", i915_emon_status, 0}, 2066 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 2067 {"i915_gfxec", i915_gfxec, 0}, 2068 {"i915_fbc_status", i915_fbc_status, 0}, 2069 {"i915_sr_status", i915_sr_status, 0}, 2070 {"i915_opregion", i915_opregion, 0}, 2071 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 2072 {"i915_context_status", i915_context_status, 0}, 2073 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 2074 {"i915_swizzle_info", i915_swizzle_info, 0}, 2075 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 2076 {"i915_dpio", i915_dpio_info, 0}, 2077 }; 2078 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 2079 2080 int i915_debugfs_init(struct drm_minor *minor) 2081 { 2082 int ret; 2083 2084 ret = i915_debugfs_create(minor->debugfs_root, minor, 2085 "i915_wedged", 2086 &i915_wedged_fops); 2087 if (ret) 2088 return ret; 2089 2090 ret = i915_forcewake_create(minor->debugfs_root, minor); 2091 if (ret) 2092 return ret; 2093 2094 ret = i915_debugfs_create(minor->debugfs_root, minor, 2095 "i915_max_freq", 2096 &i915_max_freq_fops); 2097 if (ret) 2098 return ret; 2099 2100 ret = i915_debugfs_create(minor->debugfs_root, minor, 2101 "i915_min_freq", 2102 &i915_min_freq_fops); 2103 if (ret) 2104 return ret; 2105 2106 ret = i915_debugfs_create(minor->debugfs_root, minor, 2107 "i915_cache_sharing", 2108 &i915_cache_sharing_fops); 2109 if (ret) 2110 return ret; 2111 2112 ret = i915_debugfs_create(minor->debugfs_root, minor, 2113 "i915_ring_stop", 2114 &i915_ring_stop_fops); 2115 if (ret) 2116 return ret; 2117 2118 ret = i915_debugfs_create(minor->debugfs_root, minor, 2119 "i915_gem_drop_caches", 2120 &i915_drop_caches_fops); 2121 if (ret) 2122 return ret; 2123 2124 ret = i915_debugfs_create(minor->debugfs_root, minor, 2125 "i915_error_state", 2126 &i915_error_state_fops); 2127 if (ret) 2128 return ret; 2129 2130 ret = i915_debugfs_create(minor->debugfs_root, minor, 2131 "i915_next_seqno", 2132 &i915_next_seqno_fops); 2133 if (ret) 2134 return ret; 2135 2136 return drm_debugfs_create_files(i915_debugfs_list, 2137 I915_DEBUGFS_ENTRIES, 2138 minor->debugfs_root, minor); 2139 } 2140 2141 void i915_debugfs_cleanup(struct drm_minor *minor) 2142 { 2143 drm_debugfs_remove_files(i915_debugfs_list, 2144 I915_DEBUGFS_ENTRIES, minor); 2145 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 2146 1, minor); 2147 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 2148 1, minor); 2149 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, 2150 1, minor); 2151 drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops, 2152 1, minor); 2153 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, 2154 1, minor); 2155 drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops, 2156 1, minor); 2157 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, 2158 1, minor); 2159 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops, 2160 1, minor); 2161 drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops, 2162 1, minor); 2163 } 2164 2165 #endif /* CONFIG_DEBUG_FS */ 2166