1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/debugfs.h> 31 #include <linux/slab.h> 32 #include <linux/export.h> 33 #include <generated/utsrelease.h> 34 #include <drm/drmP.h> 35 #include "intel_drv.h" 36 #include "intel_ringbuffer.h" 37 #include <drm/i915_drm.h> 38 #include "i915_drv.h" 39 40 #define DRM_I915_RING_DEBUG 1 41 42 43 #if defined(CONFIG_DEBUG_FS) 44 45 enum { 46 ACTIVE_LIST, 47 INACTIVE_LIST, 48 PINNED_LIST, 49 }; 50 51 static const char *yesno(int v) 52 { 53 return v ? "yes" : "no"; 54 } 55 56 static int i915_capabilities(struct seq_file *m, void *data) 57 { 58 struct drm_info_node *node = (struct drm_info_node *) m->private; 59 struct drm_device *dev = node->minor->dev; 60 const struct intel_device_info *info = INTEL_INFO(dev); 61 62 seq_printf(m, "gen: %d\n", info->gen); 63 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 64 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 65 #define SEP_SEMICOLON ; 66 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON); 67 #undef PRINT_FLAG 68 #undef SEP_SEMICOLON 69 70 return 0; 71 } 72 73 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 74 { 75 if (obj->user_pin_count > 0) 76 return "P"; 77 else if (obj->pin_count > 0) 78 return "p"; 79 else 80 return " "; 81 } 82 83 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 84 { 85 switch (obj->tiling_mode) { 86 default: 87 case I915_TILING_NONE: return " "; 88 case I915_TILING_X: return "X"; 89 case I915_TILING_Y: return "Y"; 90 } 91 } 92 93 static const char *cache_level_str(int type) 94 { 95 switch (type) { 96 case I915_CACHE_NONE: return " uncached"; 97 case I915_CACHE_LLC: return " snooped (LLC)"; 98 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; 99 default: return ""; 100 } 101 } 102 103 static void 104 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 105 { 106 seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", 107 &obj->base, 108 get_pin_flag(obj), 109 get_tiling_flag(obj), 110 obj->base.size / 1024, 111 obj->base.read_domains, 112 obj->base.write_domain, 113 obj->last_read_seqno, 114 obj->last_write_seqno, 115 obj->last_fenced_seqno, 116 cache_level_str(obj->cache_level), 117 obj->dirty ? " dirty" : "", 118 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 119 if (obj->base.name) 120 seq_printf(m, " (name: %d)", obj->base.name); 121 if (obj->pin_count) 122 seq_printf(m, " (pinned x %d)", obj->pin_count); 123 if (obj->fence_reg != I915_FENCE_REG_NONE) 124 seq_printf(m, " (fence: %d)", obj->fence_reg); 125 if (obj->gtt_space != NULL) 126 seq_printf(m, " (gtt offset: %08x, size: %08x)", 127 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 128 if (obj->stolen) 129 seq_printf(m, " (stolen: %08lx)", obj->stolen->start); 130 if (obj->pin_mappable || obj->fault_mappable) { 131 char s[3], *t = s; 132 if (obj->pin_mappable) 133 *t++ = 'p'; 134 if (obj->fault_mappable) 135 *t++ = 'f'; 136 *t = '\0'; 137 seq_printf(m, " (%s mappable)", s); 138 } 139 if (obj->ring != NULL) 140 seq_printf(m, " (%s)", obj->ring->name); 141 } 142 143 static int i915_gem_object_list_info(struct seq_file *m, void *data) 144 { 145 struct drm_info_node *node = (struct drm_info_node *) m->private; 146 uintptr_t list = (uintptr_t) node->info_ent->data; 147 struct list_head *head; 148 struct drm_device *dev = node->minor->dev; 149 drm_i915_private_t *dev_priv = dev->dev_private; 150 struct drm_i915_gem_object *obj; 151 size_t total_obj_size, total_gtt_size; 152 int count, ret; 153 154 ret = mutex_lock_interruptible(&dev->struct_mutex); 155 if (ret) 156 return ret; 157 158 switch (list) { 159 case ACTIVE_LIST: 160 seq_printf(m, "Active:\n"); 161 head = &dev_priv->mm.active_list; 162 break; 163 case INACTIVE_LIST: 164 seq_printf(m, "Inactive:\n"); 165 head = &dev_priv->mm.inactive_list; 166 break; 167 default: 168 mutex_unlock(&dev->struct_mutex); 169 return -EINVAL; 170 } 171 172 total_obj_size = total_gtt_size = count = 0; 173 list_for_each_entry(obj, head, mm_list) { 174 seq_printf(m, " "); 175 describe_obj(m, obj); 176 seq_printf(m, "\n"); 177 total_obj_size += obj->base.size; 178 total_gtt_size += obj->gtt_space->size; 179 count++; 180 } 181 mutex_unlock(&dev->struct_mutex); 182 183 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 184 count, total_obj_size, total_gtt_size); 185 return 0; 186 } 187 188 #define count_objects(list, member) do { \ 189 list_for_each_entry(obj, list, member) { \ 190 size += obj->gtt_space->size; \ 191 ++count; \ 192 if (obj->map_and_fenceable) { \ 193 mappable_size += obj->gtt_space->size; \ 194 ++mappable_count; \ 195 } \ 196 } \ 197 } while (0) 198 199 struct file_stats { 200 int count; 201 size_t total, active, inactive, unbound; 202 }; 203 204 static int per_file_stats(int id, void *ptr, void *data) 205 { 206 struct drm_i915_gem_object *obj = ptr; 207 struct file_stats *stats = data; 208 209 stats->count++; 210 stats->total += obj->base.size; 211 212 if (obj->gtt_space) { 213 if (!list_empty(&obj->ring_list)) 214 stats->active += obj->base.size; 215 else 216 stats->inactive += obj->base.size; 217 } else { 218 if (!list_empty(&obj->global_list)) 219 stats->unbound += obj->base.size; 220 } 221 222 return 0; 223 } 224 225 static int i915_gem_object_info(struct seq_file *m, void* data) 226 { 227 struct drm_info_node *node = (struct drm_info_node *) m->private; 228 struct drm_device *dev = node->minor->dev; 229 struct drm_i915_private *dev_priv = dev->dev_private; 230 u32 count, mappable_count, purgeable_count; 231 size_t size, mappable_size, purgeable_size; 232 struct drm_i915_gem_object *obj; 233 struct drm_file *file; 234 int ret; 235 236 ret = mutex_lock_interruptible(&dev->struct_mutex); 237 if (ret) 238 return ret; 239 240 seq_printf(m, "%u objects, %zu bytes\n", 241 dev_priv->mm.object_count, 242 dev_priv->mm.object_memory); 243 244 size = count = mappable_size = mappable_count = 0; 245 count_objects(&dev_priv->mm.bound_list, global_list); 246 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 247 count, mappable_count, size, mappable_size); 248 249 size = count = mappable_size = mappable_count = 0; 250 count_objects(&dev_priv->mm.active_list, mm_list); 251 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 252 count, mappable_count, size, mappable_size); 253 254 size = count = mappable_size = mappable_count = 0; 255 count_objects(&dev_priv->mm.inactive_list, mm_list); 256 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 257 count, mappable_count, size, mappable_size); 258 259 size = count = purgeable_size = purgeable_count = 0; 260 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 261 size += obj->base.size, ++count; 262 if (obj->madv == I915_MADV_DONTNEED) 263 purgeable_size += obj->base.size, ++purgeable_count; 264 } 265 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); 266 267 size = count = mappable_size = mappable_count = 0; 268 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 269 if (obj->fault_mappable) { 270 size += obj->gtt_space->size; 271 ++count; 272 } 273 if (obj->pin_mappable) { 274 mappable_size += obj->gtt_space->size; 275 ++mappable_count; 276 } 277 if (obj->madv == I915_MADV_DONTNEED) { 278 purgeable_size += obj->base.size; 279 ++purgeable_count; 280 } 281 } 282 seq_printf(m, "%u purgeable objects, %zu bytes\n", 283 purgeable_count, purgeable_size); 284 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 285 mappable_count, mappable_size); 286 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 287 count, size); 288 289 seq_printf(m, "%zu [%lu] gtt total\n", 290 dev_priv->gtt.total, 291 dev_priv->gtt.mappable_end - dev_priv->gtt.start); 292 293 seq_printf(m, "\n"); 294 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 295 struct file_stats stats; 296 297 memset(&stats, 0, sizeof(stats)); 298 idr_for_each(&file->object_idr, per_file_stats, &stats); 299 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n", 300 get_pid_task(file->pid, PIDTYPE_PID)->comm, 301 stats.count, 302 stats.total, 303 stats.active, 304 stats.inactive, 305 stats.unbound); 306 } 307 308 mutex_unlock(&dev->struct_mutex); 309 310 return 0; 311 } 312 313 static int i915_gem_gtt_info(struct seq_file *m, void* data) 314 { 315 struct drm_info_node *node = (struct drm_info_node *) m->private; 316 struct drm_device *dev = node->minor->dev; 317 uintptr_t list = (uintptr_t) node->info_ent->data; 318 struct drm_i915_private *dev_priv = dev->dev_private; 319 struct drm_i915_gem_object *obj; 320 size_t total_obj_size, total_gtt_size; 321 int count, ret; 322 323 ret = mutex_lock_interruptible(&dev->struct_mutex); 324 if (ret) 325 return ret; 326 327 total_obj_size = total_gtt_size = count = 0; 328 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 329 if (list == PINNED_LIST && obj->pin_count == 0) 330 continue; 331 332 seq_printf(m, " "); 333 describe_obj(m, obj); 334 seq_printf(m, "\n"); 335 total_obj_size += obj->base.size; 336 total_gtt_size += obj->gtt_space->size; 337 count++; 338 } 339 340 mutex_unlock(&dev->struct_mutex); 341 342 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 343 count, total_obj_size, total_gtt_size); 344 345 return 0; 346 } 347 348 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 349 { 350 struct drm_info_node *node = (struct drm_info_node *) m->private; 351 struct drm_device *dev = node->minor->dev; 352 unsigned long flags; 353 struct intel_crtc *crtc; 354 355 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 356 const char pipe = pipe_name(crtc->pipe); 357 const char plane = plane_name(crtc->plane); 358 struct intel_unpin_work *work; 359 360 spin_lock_irqsave(&dev->event_lock, flags); 361 work = crtc->unpin_work; 362 if (work == NULL) { 363 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 364 pipe, plane); 365 } else { 366 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 367 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 368 pipe, plane); 369 } else { 370 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 371 pipe, plane); 372 } 373 if (work->enable_stall_check) 374 seq_printf(m, "Stall check enabled, "); 375 else 376 seq_printf(m, "Stall check waiting for page flip ioctl, "); 377 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 378 379 if (work->old_fb_obj) { 380 struct drm_i915_gem_object *obj = work->old_fb_obj; 381 if (obj) 382 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 383 } 384 if (work->pending_flip_obj) { 385 struct drm_i915_gem_object *obj = work->pending_flip_obj; 386 if (obj) 387 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 388 } 389 } 390 spin_unlock_irqrestore(&dev->event_lock, flags); 391 } 392 393 return 0; 394 } 395 396 static int i915_gem_request_info(struct seq_file *m, void *data) 397 { 398 struct drm_info_node *node = (struct drm_info_node *) m->private; 399 struct drm_device *dev = node->minor->dev; 400 drm_i915_private_t *dev_priv = dev->dev_private; 401 struct intel_ring_buffer *ring; 402 struct drm_i915_gem_request *gem_request; 403 int ret, count, i; 404 405 ret = mutex_lock_interruptible(&dev->struct_mutex); 406 if (ret) 407 return ret; 408 409 count = 0; 410 for_each_ring(ring, dev_priv, i) { 411 if (list_empty(&ring->request_list)) 412 continue; 413 414 seq_printf(m, "%s requests:\n", ring->name); 415 list_for_each_entry(gem_request, 416 &ring->request_list, 417 list) { 418 seq_printf(m, " %d @ %d\n", 419 gem_request->seqno, 420 (int) (jiffies - gem_request->emitted_jiffies)); 421 } 422 count++; 423 } 424 mutex_unlock(&dev->struct_mutex); 425 426 if (count == 0) 427 seq_printf(m, "No requests\n"); 428 429 return 0; 430 } 431 432 static void i915_ring_seqno_info(struct seq_file *m, 433 struct intel_ring_buffer *ring) 434 { 435 if (ring->get_seqno) { 436 seq_printf(m, "Current sequence (%s): %u\n", 437 ring->name, ring->get_seqno(ring, false)); 438 } 439 } 440 441 static int i915_gem_seqno_info(struct seq_file *m, void *data) 442 { 443 struct drm_info_node *node = (struct drm_info_node *) m->private; 444 struct drm_device *dev = node->minor->dev; 445 drm_i915_private_t *dev_priv = dev->dev_private; 446 struct intel_ring_buffer *ring; 447 int ret, i; 448 449 ret = mutex_lock_interruptible(&dev->struct_mutex); 450 if (ret) 451 return ret; 452 453 for_each_ring(ring, dev_priv, i) 454 i915_ring_seqno_info(m, ring); 455 456 mutex_unlock(&dev->struct_mutex); 457 458 return 0; 459 } 460 461 462 static int i915_interrupt_info(struct seq_file *m, void *data) 463 { 464 struct drm_info_node *node = (struct drm_info_node *) m->private; 465 struct drm_device *dev = node->minor->dev; 466 drm_i915_private_t *dev_priv = dev->dev_private; 467 struct intel_ring_buffer *ring; 468 int ret, i, pipe; 469 470 ret = mutex_lock_interruptible(&dev->struct_mutex); 471 if (ret) 472 return ret; 473 474 if (IS_VALLEYVIEW(dev)) { 475 seq_printf(m, "Display IER:\t%08x\n", 476 I915_READ(VLV_IER)); 477 seq_printf(m, "Display IIR:\t%08x\n", 478 I915_READ(VLV_IIR)); 479 seq_printf(m, "Display IIR_RW:\t%08x\n", 480 I915_READ(VLV_IIR_RW)); 481 seq_printf(m, "Display IMR:\t%08x\n", 482 I915_READ(VLV_IMR)); 483 for_each_pipe(pipe) 484 seq_printf(m, "Pipe %c stat:\t%08x\n", 485 pipe_name(pipe), 486 I915_READ(PIPESTAT(pipe))); 487 488 seq_printf(m, "Master IER:\t%08x\n", 489 I915_READ(VLV_MASTER_IER)); 490 491 seq_printf(m, "Render IER:\t%08x\n", 492 I915_READ(GTIER)); 493 seq_printf(m, "Render IIR:\t%08x\n", 494 I915_READ(GTIIR)); 495 seq_printf(m, "Render IMR:\t%08x\n", 496 I915_READ(GTIMR)); 497 498 seq_printf(m, "PM IER:\t\t%08x\n", 499 I915_READ(GEN6_PMIER)); 500 seq_printf(m, "PM IIR:\t\t%08x\n", 501 I915_READ(GEN6_PMIIR)); 502 seq_printf(m, "PM IMR:\t\t%08x\n", 503 I915_READ(GEN6_PMIMR)); 504 505 seq_printf(m, "Port hotplug:\t%08x\n", 506 I915_READ(PORT_HOTPLUG_EN)); 507 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 508 I915_READ(VLV_DPFLIPSTAT)); 509 seq_printf(m, "DPINVGTT:\t%08x\n", 510 I915_READ(DPINVGTT)); 511 512 } else if (!HAS_PCH_SPLIT(dev)) { 513 seq_printf(m, "Interrupt enable: %08x\n", 514 I915_READ(IER)); 515 seq_printf(m, "Interrupt identity: %08x\n", 516 I915_READ(IIR)); 517 seq_printf(m, "Interrupt mask: %08x\n", 518 I915_READ(IMR)); 519 for_each_pipe(pipe) 520 seq_printf(m, "Pipe %c stat: %08x\n", 521 pipe_name(pipe), 522 I915_READ(PIPESTAT(pipe))); 523 } else { 524 seq_printf(m, "North Display Interrupt enable: %08x\n", 525 I915_READ(DEIER)); 526 seq_printf(m, "North Display Interrupt identity: %08x\n", 527 I915_READ(DEIIR)); 528 seq_printf(m, "North Display Interrupt mask: %08x\n", 529 I915_READ(DEIMR)); 530 seq_printf(m, "South Display Interrupt enable: %08x\n", 531 I915_READ(SDEIER)); 532 seq_printf(m, "South Display Interrupt identity: %08x\n", 533 I915_READ(SDEIIR)); 534 seq_printf(m, "South Display Interrupt mask: %08x\n", 535 I915_READ(SDEIMR)); 536 seq_printf(m, "Graphics Interrupt enable: %08x\n", 537 I915_READ(GTIER)); 538 seq_printf(m, "Graphics Interrupt identity: %08x\n", 539 I915_READ(GTIIR)); 540 seq_printf(m, "Graphics Interrupt mask: %08x\n", 541 I915_READ(GTIMR)); 542 } 543 seq_printf(m, "Interrupts received: %d\n", 544 atomic_read(&dev_priv->irq_received)); 545 for_each_ring(ring, dev_priv, i) { 546 if (IS_GEN6(dev) || IS_GEN7(dev)) { 547 seq_printf(m, 548 "Graphics Interrupt mask (%s): %08x\n", 549 ring->name, I915_READ_IMR(ring)); 550 } 551 i915_ring_seqno_info(m, ring); 552 } 553 mutex_unlock(&dev->struct_mutex); 554 555 return 0; 556 } 557 558 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 559 { 560 struct drm_info_node *node = (struct drm_info_node *) m->private; 561 struct drm_device *dev = node->minor->dev; 562 drm_i915_private_t *dev_priv = dev->dev_private; 563 int i, ret; 564 565 ret = mutex_lock_interruptible(&dev->struct_mutex); 566 if (ret) 567 return ret; 568 569 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 570 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 571 for (i = 0; i < dev_priv->num_fence_regs; i++) { 572 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 573 574 seq_printf(m, "Fence %d, pin count = %d, object = ", 575 i, dev_priv->fence_regs[i].pin_count); 576 if (obj == NULL) 577 seq_printf(m, "unused"); 578 else 579 describe_obj(m, obj); 580 seq_printf(m, "\n"); 581 } 582 583 mutex_unlock(&dev->struct_mutex); 584 return 0; 585 } 586 587 static int i915_hws_info(struct seq_file *m, void *data) 588 { 589 struct drm_info_node *node = (struct drm_info_node *) m->private; 590 struct drm_device *dev = node->minor->dev; 591 drm_i915_private_t *dev_priv = dev->dev_private; 592 struct intel_ring_buffer *ring; 593 const u32 *hws; 594 int i; 595 596 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 597 hws = ring->status_page.page_addr; 598 if (hws == NULL) 599 return 0; 600 601 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 602 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 603 i * 4, 604 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 605 } 606 return 0; 607 } 608 609 static const char *ring_str(int ring) 610 { 611 switch (ring) { 612 case RCS: return "render"; 613 case VCS: return "bsd"; 614 case BCS: return "blt"; 615 case VECS: return "vebox"; 616 default: return ""; 617 } 618 } 619 620 static const char *pin_flag(int pinned) 621 { 622 if (pinned > 0) 623 return " P"; 624 else if (pinned < 0) 625 return " p"; 626 else 627 return ""; 628 } 629 630 static const char *tiling_flag(int tiling) 631 { 632 switch (tiling) { 633 default: 634 case I915_TILING_NONE: return ""; 635 case I915_TILING_X: return " X"; 636 case I915_TILING_Y: return " Y"; 637 } 638 } 639 640 static const char *dirty_flag(int dirty) 641 { 642 return dirty ? " dirty" : ""; 643 } 644 645 static const char *purgeable_flag(int purgeable) 646 { 647 return purgeable ? " purgeable" : ""; 648 } 649 650 static bool __i915_error_ok(struct drm_i915_error_state_buf *e) 651 { 652 653 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) { 654 e->err = -ENOSPC; 655 return false; 656 } 657 658 if (e->bytes == e->size - 1 || e->err) 659 return false; 660 661 return true; 662 } 663 664 static bool __i915_error_seek(struct drm_i915_error_state_buf *e, 665 unsigned len) 666 { 667 if (e->pos + len <= e->start) { 668 e->pos += len; 669 return false; 670 } 671 672 /* First vsnprintf needs to fit in its entirety for memmove */ 673 if (len >= e->size) { 674 e->err = -EIO; 675 return false; 676 } 677 678 return true; 679 } 680 681 static void __i915_error_advance(struct drm_i915_error_state_buf *e, 682 unsigned len) 683 { 684 /* If this is first printf in this window, adjust it so that 685 * start position matches start of the buffer 686 */ 687 688 if (e->pos < e->start) { 689 const size_t off = e->start - e->pos; 690 691 /* Should not happen but be paranoid */ 692 if (off > len || e->bytes) { 693 e->err = -EIO; 694 return; 695 } 696 697 memmove(e->buf, e->buf + off, len - off); 698 e->bytes = len - off; 699 e->pos = e->start; 700 return; 701 } 702 703 e->bytes += len; 704 e->pos += len; 705 } 706 707 static void i915_error_vprintf(struct drm_i915_error_state_buf *e, 708 const char *f, va_list args) 709 { 710 unsigned len; 711 712 if (!__i915_error_ok(e)) 713 return; 714 715 /* Seek the first printf which is hits start position */ 716 if (e->pos < e->start) { 717 len = vsnprintf(NULL, 0, f, args); 718 if (!__i915_error_seek(e, len)) 719 return; 720 } 721 722 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args); 723 if (len >= e->size - e->bytes) 724 len = e->size - e->bytes - 1; 725 726 __i915_error_advance(e, len); 727 } 728 729 static void i915_error_puts(struct drm_i915_error_state_buf *e, 730 const char *str) 731 { 732 unsigned len; 733 734 if (!__i915_error_ok(e)) 735 return; 736 737 len = strlen(str); 738 739 /* Seek the first printf which is hits start position */ 740 if (e->pos < e->start) { 741 if (!__i915_error_seek(e, len)) 742 return; 743 } 744 745 if (len >= e->size - e->bytes) 746 len = e->size - e->bytes - 1; 747 memcpy(e->buf + e->bytes, str, len); 748 749 __i915_error_advance(e, len); 750 } 751 752 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) 753 { 754 va_list args; 755 756 va_start(args, f); 757 i915_error_vprintf(e, f, args); 758 va_end(args); 759 } 760 761 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 762 #define err_puts(e, s) i915_error_puts(e, s) 763 764 static void print_error_buffers(struct drm_i915_error_state_buf *m, 765 const char *name, 766 struct drm_i915_error_buffer *err, 767 int count) 768 { 769 err_printf(m, "%s [%d]:\n", name, count); 770 771 while (count--) { 772 err_printf(m, " %08x %8u %02x %02x %x %x", 773 err->gtt_offset, 774 err->size, 775 err->read_domains, 776 err->write_domain, 777 err->rseqno, err->wseqno); 778 err_puts(m, pin_flag(err->pinned)); 779 err_puts(m, tiling_flag(err->tiling)); 780 err_puts(m, dirty_flag(err->dirty)); 781 err_puts(m, purgeable_flag(err->purgeable)); 782 err_puts(m, err->ring != -1 ? " " : ""); 783 err_puts(m, ring_str(err->ring)); 784 err_puts(m, cache_level_str(err->cache_level)); 785 786 if (err->name) 787 err_printf(m, " (name: %d)", err->name); 788 if (err->fence_reg != I915_FENCE_REG_NONE) 789 err_printf(m, " (fence: %d)", err->fence_reg); 790 791 err_puts(m, "\n"); 792 err++; 793 } 794 } 795 796 static void i915_ring_error_state(struct drm_i915_error_state_buf *m, 797 struct drm_device *dev, 798 struct drm_i915_error_state *error, 799 unsigned ring) 800 { 801 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ 802 err_printf(m, "%s command stream:\n", ring_str(ring)); 803 err_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 804 err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 805 err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); 806 err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 807 err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 808 err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 809 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 810 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) 811 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); 812 813 if (INTEL_INFO(dev)->gen >= 4) 814 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 815 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 816 err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); 817 if (INTEL_INFO(dev)->gen >= 6) { 818 err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); 819 err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 820 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", 821 error->semaphore_mboxes[ring][0], 822 error->semaphore_seqno[ring][0]); 823 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", 824 error->semaphore_mboxes[ring][1], 825 error->semaphore_seqno[ring][1]); 826 } 827 err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 828 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 829 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 830 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 831 } 832 833 struct i915_error_state_file_priv { 834 struct drm_device *dev; 835 struct drm_i915_error_state *error; 836 }; 837 838 839 static int i915_error_state(struct i915_error_state_file_priv *error_priv, 840 struct drm_i915_error_state_buf *m) 841 842 { 843 struct drm_device *dev = error_priv->dev; 844 drm_i915_private_t *dev_priv = dev->dev_private; 845 struct drm_i915_error_state *error = error_priv->error; 846 struct intel_ring_buffer *ring; 847 int i, j, page, offset, elt; 848 849 if (!error) { 850 err_printf(m, "no error state collected\n"); 851 return 0; 852 } 853 854 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 855 error->time.tv_usec); 856 err_printf(m, "Kernel: " UTS_RELEASE "\n"); 857 err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 858 err_printf(m, "EIR: 0x%08x\n", error->eir); 859 err_printf(m, "IER: 0x%08x\n", error->ier); 860 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 861 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); 862 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); 863 err_printf(m, "CCID: 0x%08x\n", error->ccid); 864 865 for (i = 0; i < dev_priv->num_fence_regs; i++) 866 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 867 868 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) 869 err_printf(m, " INSTDONE_%d: 0x%08x\n", i, 870 error->extra_instdone[i]); 871 872 if (INTEL_INFO(dev)->gen >= 6) { 873 err_printf(m, "ERROR: 0x%08x\n", error->error); 874 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 875 } 876 877 if (INTEL_INFO(dev)->gen == 7) 878 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 879 880 for_each_ring(ring, dev_priv, i) 881 i915_ring_error_state(m, dev, error, i); 882 883 if (error->active_bo) 884 print_error_buffers(m, "Active", 885 error->active_bo, 886 error->active_bo_count); 887 888 if (error->pinned_bo) 889 print_error_buffers(m, "Pinned", 890 error->pinned_bo, 891 error->pinned_bo_count); 892 893 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 894 struct drm_i915_error_object *obj; 895 896 if ((obj = error->ring[i].batchbuffer)) { 897 err_printf(m, "%s --- gtt_offset = 0x%08x\n", 898 dev_priv->ring[i].name, 899 obj->gtt_offset); 900 offset = 0; 901 for (page = 0; page < obj->page_count; page++) { 902 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 903 err_printf(m, "%08x : %08x\n", offset, 904 obj->pages[page][elt]); 905 offset += 4; 906 } 907 } 908 } 909 910 if (error->ring[i].num_requests) { 911 err_printf(m, "%s --- %d requests\n", 912 dev_priv->ring[i].name, 913 error->ring[i].num_requests); 914 for (j = 0; j < error->ring[i].num_requests; j++) { 915 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", 916 error->ring[i].requests[j].seqno, 917 error->ring[i].requests[j].jiffies, 918 error->ring[i].requests[j].tail); 919 } 920 } 921 922 if ((obj = error->ring[i].ringbuffer)) { 923 err_printf(m, "%s --- ringbuffer = 0x%08x\n", 924 dev_priv->ring[i].name, 925 obj->gtt_offset); 926 offset = 0; 927 for (page = 0; page < obj->page_count; page++) { 928 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 929 err_printf(m, "%08x : %08x\n", 930 offset, 931 obj->pages[page][elt]); 932 offset += 4; 933 } 934 } 935 } 936 937 obj = error->ring[i].ctx; 938 if (obj) { 939 err_printf(m, "%s --- HW Context = 0x%08x\n", 940 dev_priv->ring[i].name, 941 obj->gtt_offset); 942 offset = 0; 943 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { 944 err_printf(m, "[%04x] %08x %08x %08x %08x\n", 945 offset, 946 obj->pages[0][elt], 947 obj->pages[0][elt+1], 948 obj->pages[0][elt+2], 949 obj->pages[0][elt+3]); 950 offset += 16; 951 } 952 } 953 } 954 955 if (error->overlay) 956 intel_overlay_print_error_state(m, error->overlay); 957 958 if (error->display) 959 intel_display_print_error_state(m, dev, error->display); 960 961 return 0; 962 } 963 964 static ssize_t 965 i915_error_state_write(struct file *filp, 966 const char __user *ubuf, 967 size_t cnt, 968 loff_t *ppos) 969 { 970 struct i915_error_state_file_priv *error_priv = filp->private_data; 971 struct drm_device *dev = error_priv->dev; 972 int ret; 973 974 DRM_DEBUG_DRIVER("Resetting error state\n"); 975 976 ret = mutex_lock_interruptible(&dev->struct_mutex); 977 if (ret) 978 return ret; 979 980 i915_destroy_error_state(dev); 981 mutex_unlock(&dev->struct_mutex); 982 983 return cnt; 984 } 985 986 static int i915_error_state_open(struct inode *inode, struct file *file) 987 { 988 struct drm_device *dev = inode->i_private; 989 drm_i915_private_t *dev_priv = dev->dev_private; 990 struct i915_error_state_file_priv *error_priv; 991 unsigned long flags; 992 993 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 994 if (!error_priv) 995 return -ENOMEM; 996 997 error_priv->dev = dev; 998 999 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1000 error_priv->error = dev_priv->gpu_error.first_error; 1001 if (error_priv->error) 1002 kref_get(&error_priv->error->ref); 1003 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1004 1005 file->private_data = error_priv; 1006 1007 return 0; 1008 } 1009 1010 static int i915_error_state_release(struct inode *inode, struct file *file) 1011 { 1012 struct i915_error_state_file_priv *error_priv = file->private_data; 1013 1014 if (error_priv->error) 1015 kref_put(&error_priv->error->ref, i915_error_state_free); 1016 kfree(error_priv); 1017 1018 return 0; 1019 } 1020 1021 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, 1022 size_t count, loff_t *pos) 1023 { 1024 struct i915_error_state_file_priv *error_priv = file->private_data; 1025 struct drm_i915_error_state_buf error_str; 1026 loff_t tmp_pos = 0; 1027 ssize_t ret_count = 0; 1028 int ret = 0; 1029 1030 memset(&error_str, 0, sizeof(error_str)); 1031 1032 /* We need to have enough room to store any i915_error_state printf 1033 * so that we can move it to start position. 1034 */ 1035 error_str.size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; 1036 error_str.buf = kmalloc(error_str.size, 1037 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN); 1038 1039 if (error_str.buf == NULL) { 1040 error_str.size = PAGE_SIZE; 1041 error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY); 1042 } 1043 1044 if (error_str.buf == NULL) { 1045 error_str.size = 128; 1046 error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY); 1047 } 1048 1049 if (error_str.buf == NULL) 1050 return -ENOMEM; 1051 1052 error_str.start = *pos; 1053 1054 ret = i915_error_state(error_priv, &error_str); 1055 if (ret) 1056 goto out; 1057 1058 if (error_str.bytes == 0 && error_str.err) { 1059 ret = error_str.err; 1060 goto out; 1061 } 1062 1063 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, 1064 error_str.buf, 1065 error_str.bytes); 1066 1067 if (ret_count < 0) 1068 ret = ret_count; 1069 else 1070 *pos = error_str.start + ret_count; 1071 out: 1072 kfree(error_str.buf); 1073 return ret ?: ret_count; 1074 } 1075 1076 static const struct file_operations i915_error_state_fops = { 1077 .owner = THIS_MODULE, 1078 .open = i915_error_state_open, 1079 .read = i915_error_state_read, 1080 .write = i915_error_state_write, 1081 .llseek = default_llseek, 1082 .release = i915_error_state_release, 1083 }; 1084 1085 static int 1086 i915_next_seqno_get(void *data, u64 *val) 1087 { 1088 struct drm_device *dev = data; 1089 drm_i915_private_t *dev_priv = dev->dev_private; 1090 int ret; 1091 1092 ret = mutex_lock_interruptible(&dev->struct_mutex); 1093 if (ret) 1094 return ret; 1095 1096 *val = dev_priv->next_seqno; 1097 mutex_unlock(&dev->struct_mutex); 1098 1099 return 0; 1100 } 1101 1102 static int 1103 i915_next_seqno_set(void *data, u64 val) 1104 { 1105 struct drm_device *dev = data; 1106 int ret; 1107 1108 ret = mutex_lock_interruptible(&dev->struct_mutex); 1109 if (ret) 1110 return ret; 1111 1112 ret = i915_gem_set_seqno(dev, val); 1113 mutex_unlock(&dev->struct_mutex); 1114 1115 return ret; 1116 } 1117 1118 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 1119 i915_next_seqno_get, i915_next_seqno_set, 1120 "0x%llx\n"); 1121 1122 static int i915_rstdby_delays(struct seq_file *m, void *unused) 1123 { 1124 struct drm_info_node *node = (struct drm_info_node *) m->private; 1125 struct drm_device *dev = node->minor->dev; 1126 drm_i915_private_t *dev_priv = dev->dev_private; 1127 u16 crstanddelay; 1128 int ret; 1129 1130 ret = mutex_lock_interruptible(&dev->struct_mutex); 1131 if (ret) 1132 return ret; 1133 1134 crstanddelay = I915_READ16(CRSTANDVID); 1135 1136 mutex_unlock(&dev->struct_mutex); 1137 1138 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 1139 1140 return 0; 1141 } 1142 1143 static int i915_cur_delayinfo(struct seq_file *m, void *unused) 1144 { 1145 struct drm_info_node *node = (struct drm_info_node *) m->private; 1146 struct drm_device *dev = node->minor->dev; 1147 drm_i915_private_t *dev_priv = dev->dev_private; 1148 int ret; 1149 1150 if (IS_GEN5(dev)) { 1151 u16 rgvswctl = I915_READ16(MEMSWCTL); 1152 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1153 1154 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 1155 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 1156 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 1157 MEMSTAT_VID_SHIFT); 1158 seq_printf(m, "Current P-state: %d\n", 1159 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1160 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { 1161 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1162 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1163 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1164 u32 rpstat, cagf; 1165 u32 rpupei, rpcurup, rpprevup; 1166 u32 rpdownei, rpcurdown, rpprevdown; 1167 int max_freq; 1168 1169 /* RPSTAT1 is in the GT power well */ 1170 ret = mutex_lock_interruptible(&dev->struct_mutex); 1171 if (ret) 1172 return ret; 1173 1174 gen6_gt_force_wake_get(dev_priv); 1175 1176 rpstat = I915_READ(GEN6_RPSTAT1); 1177 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 1178 rpcurup = I915_READ(GEN6_RP_CUR_UP); 1179 rpprevup = I915_READ(GEN6_RP_PREV_UP); 1180 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 1181 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 1182 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 1183 if (IS_HASWELL(dev)) 1184 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1185 else 1186 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1187 cagf *= GT_FREQUENCY_MULTIPLIER; 1188 1189 gen6_gt_force_wake_put(dev_priv); 1190 mutex_unlock(&dev->struct_mutex); 1191 1192 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1193 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1194 seq_printf(m, "Render p-state ratio: %d\n", 1195 (gt_perf_status & 0xff00) >> 8); 1196 seq_printf(m, "Render p-state VID: %d\n", 1197 gt_perf_status & 0xff); 1198 seq_printf(m, "Render p-state limit: %d\n", 1199 rp_state_limits & 0xff); 1200 seq_printf(m, "CAGF: %dMHz\n", cagf); 1201 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 1202 GEN6_CURICONT_MASK); 1203 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 1204 GEN6_CURBSYTAVG_MASK); 1205 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 1206 GEN6_CURBSYTAVG_MASK); 1207 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 1208 GEN6_CURIAVG_MASK); 1209 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 1210 GEN6_CURBSYTAVG_MASK); 1211 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 1212 GEN6_CURBSYTAVG_MASK); 1213 1214 max_freq = (rp_state_cap & 0xff0000) >> 16; 1215 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1216 max_freq * GT_FREQUENCY_MULTIPLIER); 1217 1218 max_freq = (rp_state_cap & 0xff00) >> 8; 1219 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1220 max_freq * GT_FREQUENCY_MULTIPLIER); 1221 1222 max_freq = rp_state_cap & 0xff; 1223 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1224 max_freq * GT_FREQUENCY_MULTIPLIER); 1225 1226 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1227 dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER); 1228 } else if (IS_VALLEYVIEW(dev)) { 1229 u32 freq_sts, val; 1230 1231 mutex_lock(&dev_priv->rps.hw_lock); 1232 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1233 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1234 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1235 1236 val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1); 1237 seq_printf(m, "max GPU freq: %d MHz\n", 1238 vlv_gpu_freq(dev_priv->mem_freq, val)); 1239 1240 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM); 1241 seq_printf(m, "min GPU freq: %d MHz\n", 1242 vlv_gpu_freq(dev_priv->mem_freq, val)); 1243 1244 seq_printf(m, "current GPU freq: %d MHz\n", 1245 vlv_gpu_freq(dev_priv->mem_freq, 1246 (freq_sts >> 8) & 0xff)); 1247 mutex_unlock(&dev_priv->rps.hw_lock); 1248 } else { 1249 seq_printf(m, "no P-state info available\n"); 1250 } 1251 1252 return 0; 1253 } 1254 1255 static int i915_delayfreq_table(struct seq_file *m, void *unused) 1256 { 1257 struct drm_info_node *node = (struct drm_info_node *) m->private; 1258 struct drm_device *dev = node->minor->dev; 1259 drm_i915_private_t *dev_priv = dev->dev_private; 1260 u32 delayfreq; 1261 int ret, i; 1262 1263 ret = mutex_lock_interruptible(&dev->struct_mutex); 1264 if (ret) 1265 return ret; 1266 1267 for (i = 0; i < 16; i++) { 1268 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 1269 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 1270 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 1271 } 1272 1273 mutex_unlock(&dev->struct_mutex); 1274 1275 return 0; 1276 } 1277 1278 static inline int MAP_TO_MV(int map) 1279 { 1280 return 1250 - (map * 25); 1281 } 1282 1283 static int i915_inttoext_table(struct seq_file *m, void *unused) 1284 { 1285 struct drm_info_node *node = (struct drm_info_node *) m->private; 1286 struct drm_device *dev = node->minor->dev; 1287 drm_i915_private_t *dev_priv = dev->dev_private; 1288 u32 inttoext; 1289 int ret, i; 1290 1291 ret = mutex_lock_interruptible(&dev->struct_mutex); 1292 if (ret) 1293 return ret; 1294 1295 for (i = 1; i <= 32; i++) { 1296 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 1297 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 1298 } 1299 1300 mutex_unlock(&dev->struct_mutex); 1301 1302 return 0; 1303 } 1304 1305 static int ironlake_drpc_info(struct seq_file *m) 1306 { 1307 struct drm_info_node *node = (struct drm_info_node *) m->private; 1308 struct drm_device *dev = node->minor->dev; 1309 drm_i915_private_t *dev_priv = dev->dev_private; 1310 u32 rgvmodectl, rstdbyctl; 1311 u16 crstandvid; 1312 int ret; 1313 1314 ret = mutex_lock_interruptible(&dev->struct_mutex); 1315 if (ret) 1316 return ret; 1317 1318 rgvmodectl = I915_READ(MEMMODECTL); 1319 rstdbyctl = I915_READ(RSTDBYCTL); 1320 crstandvid = I915_READ16(CRSTANDVID); 1321 1322 mutex_unlock(&dev->struct_mutex); 1323 1324 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1325 "yes" : "no"); 1326 seq_printf(m, "Boost freq: %d\n", 1327 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1328 MEMMODE_BOOST_FREQ_SHIFT); 1329 seq_printf(m, "HW control enabled: %s\n", 1330 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 1331 seq_printf(m, "SW control enabled: %s\n", 1332 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 1333 seq_printf(m, "Gated voltage change: %s\n", 1334 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 1335 seq_printf(m, "Starting frequency: P%d\n", 1336 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1337 seq_printf(m, "Max P-state: P%d\n", 1338 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1339 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1340 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1341 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1342 seq_printf(m, "Render standby enabled: %s\n", 1343 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1344 seq_printf(m, "Current RS state: "); 1345 switch (rstdbyctl & RSX_STATUS_MASK) { 1346 case RSX_STATUS_ON: 1347 seq_printf(m, "on\n"); 1348 break; 1349 case RSX_STATUS_RC1: 1350 seq_printf(m, "RC1\n"); 1351 break; 1352 case RSX_STATUS_RC1E: 1353 seq_printf(m, "RC1E\n"); 1354 break; 1355 case RSX_STATUS_RS1: 1356 seq_printf(m, "RS1\n"); 1357 break; 1358 case RSX_STATUS_RS2: 1359 seq_printf(m, "RS2 (RC6)\n"); 1360 break; 1361 case RSX_STATUS_RS3: 1362 seq_printf(m, "RC3 (RC6+)\n"); 1363 break; 1364 default: 1365 seq_printf(m, "unknown\n"); 1366 break; 1367 } 1368 1369 return 0; 1370 } 1371 1372 static int gen6_drpc_info(struct seq_file *m) 1373 { 1374 1375 struct drm_info_node *node = (struct drm_info_node *) m->private; 1376 struct drm_device *dev = node->minor->dev; 1377 struct drm_i915_private *dev_priv = dev->dev_private; 1378 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1379 unsigned forcewake_count; 1380 int count=0, ret; 1381 1382 1383 ret = mutex_lock_interruptible(&dev->struct_mutex); 1384 if (ret) 1385 return ret; 1386 1387 spin_lock_irq(&dev_priv->gt_lock); 1388 forcewake_count = dev_priv->forcewake_count; 1389 spin_unlock_irq(&dev_priv->gt_lock); 1390 1391 if (forcewake_count) { 1392 seq_printf(m, "RC information inaccurate because somebody " 1393 "holds a forcewake reference \n"); 1394 } else { 1395 /* NB: we cannot use forcewake, else we read the wrong values */ 1396 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1397 udelay(10); 1398 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1399 } 1400 1401 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1402 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); 1403 1404 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1405 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1406 mutex_unlock(&dev->struct_mutex); 1407 mutex_lock(&dev_priv->rps.hw_lock); 1408 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1409 mutex_unlock(&dev_priv->rps.hw_lock); 1410 1411 seq_printf(m, "Video Turbo Mode: %s\n", 1412 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1413 seq_printf(m, "HW control enabled: %s\n", 1414 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1415 seq_printf(m, "SW control enabled: %s\n", 1416 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1417 GEN6_RP_MEDIA_SW_MODE)); 1418 seq_printf(m, "RC1e Enabled: %s\n", 1419 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1420 seq_printf(m, "RC6 Enabled: %s\n", 1421 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1422 seq_printf(m, "Deep RC6 Enabled: %s\n", 1423 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1424 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1425 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1426 seq_printf(m, "Current RC state: "); 1427 switch (gt_core_status & GEN6_RCn_MASK) { 1428 case GEN6_RC0: 1429 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1430 seq_printf(m, "Core Power Down\n"); 1431 else 1432 seq_printf(m, "on\n"); 1433 break; 1434 case GEN6_RC3: 1435 seq_printf(m, "RC3\n"); 1436 break; 1437 case GEN6_RC6: 1438 seq_printf(m, "RC6\n"); 1439 break; 1440 case GEN6_RC7: 1441 seq_printf(m, "RC7\n"); 1442 break; 1443 default: 1444 seq_printf(m, "Unknown\n"); 1445 break; 1446 } 1447 1448 seq_printf(m, "Core Power Down: %s\n", 1449 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1450 1451 /* Not exactly sure what this is */ 1452 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1453 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1454 seq_printf(m, "RC6 residency since boot: %u\n", 1455 I915_READ(GEN6_GT_GFX_RC6)); 1456 seq_printf(m, "RC6+ residency since boot: %u\n", 1457 I915_READ(GEN6_GT_GFX_RC6p)); 1458 seq_printf(m, "RC6++ residency since boot: %u\n", 1459 I915_READ(GEN6_GT_GFX_RC6pp)); 1460 1461 seq_printf(m, "RC6 voltage: %dmV\n", 1462 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1463 seq_printf(m, "RC6+ voltage: %dmV\n", 1464 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1465 seq_printf(m, "RC6++ voltage: %dmV\n", 1466 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1467 return 0; 1468 } 1469 1470 static int i915_drpc_info(struct seq_file *m, void *unused) 1471 { 1472 struct drm_info_node *node = (struct drm_info_node *) m->private; 1473 struct drm_device *dev = node->minor->dev; 1474 1475 if (IS_GEN6(dev) || IS_GEN7(dev)) 1476 return gen6_drpc_info(m); 1477 else 1478 return ironlake_drpc_info(m); 1479 } 1480 1481 static int i915_fbc_status(struct seq_file *m, void *unused) 1482 { 1483 struct drm_info_node *node = (struct drm_info_node *) m->private; 1484 struct drm_device *dev = node->minor->dev; 1485 drm_i915_private_t *dev_priv = dev->dev_private; 1486 1487 if (!I915_HAS_FBC(dev)) { 1488 seq_printf(m, "FBC unsupported on this chipset\n"); 1489 return 0; 1490 } 1491 1492 if (intel_fbc_enabled(dev)) { 1493 seq_printf(m, "FBC enabled\n"); 1494 } else { 1495 seq_printf(m, "FBC disabled: "); 1496 switch (dev_priv->no_fbc_reason) { 1497 case FBC_NO_OUTPUT: 1498 seq_printf(m, "no outputs"); 1499 break; 1500 case FBC_STOLEN_TOO_SMALL: 1501 seq_printf(m, "not enough stolen memory"); 1502 break; 1503 case FBC_UNSUPPORTED_MODE: 1504 seq_printf(m, "mode not supported"); 1505 break; 1506 case FBC_MODE_TOO_LARGE: 1507 seq_printf(m, "mode too large"); 1508 break; 1509 case FBC_BAD_PLANE: 1510 seq_printf(m, "FBC unsupported on plane"); 1511 break; 1512 case FBC_NOT_TILED: 1513 seq_printf(m, "scanout buffer not tiled"); 1514 break; 1515 case FBC_MULTIPLE_PIPES: 1516 seq_printf(m, "multiple pipes are enabled"); 1517 break; 1518 case FBC_MODULE_PARAM: 1519 seq_printf(m, "disabled per module param (default off)"); 1520 break; 1521 default: 1522 seq_printf(m, "unknown reason"); 1523 } 1524 seq_printf(m, "\n"); 1525 } 1526 return 0; 1527 } 1528 1529 static int i915_ips_status(struct seq_file *m, void *unused) 1530 { 1531 struct drm_info_node *node = (struct drm_info_node *) m->private; 1532 struct drm_device *dev = node->minor->dev; 1533 struct drm_i915_private *dev_priv = dev->dev_private; 1534 1535 if (!HAS_IPS(dev)) { 1536 seq_puts(m, "not supported\n"); 1537 return 0; 1538 } 1539 1540 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1541 seq_puts(m, "enabled\n"); 1542 else 1543 seq_puts(m, "disabled\n"); 1544 1545 return 0; 1546 } 1547 1548 static int i915_sr_status(struct seq_file *m, void *unused) 1549 { 1550 struct drm_info_node *node = (struct drm_info_node *) m->private; 1551 struct drm_device *dev = node->minor->dev; 1552 drm_i915_private_t *dev_priv = dev->dev_private; 1553 bool sr_enabled = false; 1554 1555 if (HAS_PCH_SPLIT(dev)) 1556 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1557 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1558 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1559 else if (IS_I915GM(dev)) 1560 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1561 else if (IS_PINEVIEW(dev)) 1562 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1563 1564 seq_printf(m, "self-refresh: %s\n", 1565 sr_enabled ? "enabled" : "disabled"); 1566 1567 return 0; 1568 } 1569 1570 static int i915_emon_status(struct seq_file *m, void *unused) 1571 { 1572 struct drm_info_node *node = (struct drm_info_node *) m->private; 1573 struct drm_device *dev = node->minor->dev; 1574 drm_i915_private_t *dev_priv = dev->dev_private; 1575 unsigned long temp, chipset, gfx; 1576 int ret; 1577 1578 if (!IS_GEN5(dev)) 1579 return -ENODEV; 1580 1581 ret = mutex_lock_interruptible(&dev->struct_mutex); 1582 if (ret) 1583 return ret; 1584 1585 temp = i915_mch_val(dev_priv); 1586 chipset = i915_chipset_val(dev_priv); 1587 gfx = i915_gfx_val(dev_priv); 1588 mutex_unlock(&dev->struct_mutex); 1589 1590 seq_printf(m, "GMCH temp: %ld\n", temp); 1591 seq_printf(m, "Chipset power: %ld\n", chipset); 1592 seq_printf(m, "GFX power: %ld\n", gfx); 1593 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1594 1595 return 0; 1596 } 1597 1598 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1599 { 1600 struct drm_info_node *node = (struct drm_info_node *) m->private; 1601 struct drm_device *dev = node->minor->dev; 1602 drm_i915_private_t *dev_priv = dev->dev_private; 1603 int ret; 1604 int gpu_freq, ia_freq; 1605 1606 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1607 seq_printf(m, "unsupported on this chipset\n"); 1608 return 0; 1609 } 1610 1611 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1612 if (ret) 1613 return ret; 1614 1615 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1616 1617 for (gpu_freq = dev_priv->rps.min_delay; 1618 gpu_freq <= dev_priv->rps.max_delay; 1619 gpu_freq++) { 1620 ia_freq = gpu_freq; 1621 sandybridge_pcode_read(dev_priv, 1622 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1623 &ia_freq); 1624 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1625 gpu_freq * GT_FREQUENCY_MULTIPLIER, 1626 ((ia_freq >> 0) & 0xff) * 100, 1627 ((ia_freq >> 8) & 0xff) * 100); 1628 } 1629 1630 mutex_unlock(&dev_priv->rps.hw_lock); 1631 1632 return 0; 1633 } 1634 1635 static int i915_gfxec(struct seq_file *m, void *unused) 1636 { 1637 struct drm_info_node *node = (struct drm_info_node *) m->private; 1638 struct drm_device *dev = node->minor->dev; 1639 drm_i915_private_t *dev_priv = dev->dev_private; 1640 int ret; 1641 1642 ret = mutex_lock_interruptible(&dev->struct_mutex); 1643 if (ret) 1644 return ret; 1645 1646 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1647 1648 mutex_unlock(&dev->struct_mutex); 1649 1650 return 0; 1651 } 1652 1653 static int i915_opregion(struct seq_file *m, void *unused) 1654 { 1655 struct drm_info_node *node = (struct drm_info_node *) m->private; 1656 struct drm_device *dev = node->minor->dev; 1657 drm_i915_private_t *dev_priv = dev->dev_private; 1658 struct intel_opregion *opregion = &dev_priv->opregion; 1659 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); 1660 int ret; 1661 1662 if (data == NULL) 1663 return -ENOMEM; 1664 1665 ret = mutex_lock_interruptible(&dev->struct_mutex); 1666 if (ret) 1667 goto out; 1668 1669 if (opregion->header) { 1670 memcpy_fromio(data, opregion->header, OPREGION_SIZE); 1671 seq_write(m, data, OPREGION_SIZE); 1672 } 1673 1674 mutex_unlock(&dev->struct_mutex); 1675 1676 out: 1677 kfree(data); 1678 return 0; 1679 } 1680 1681 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1682 { 1683 struct drm_info_node *node = (struct drm_info_node *) m->private; 1684 struct drm_device *dev = node->minor->dev; 1685 drm_i915_private_t *dev_priv = dev->dev_private; 1686 struct intel_fbdev *ifbdev; 1687 struct intel_framebuffer *fb; 1688 int ret; 1689 1690 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1691 if (ret) 1692 return ret; 1693 1694 ifbdev = dev_priv->fbdev; 1695 fb = to_intel_framebuffer(ifbdev->helper.fb); 1696 1697 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1698 fb->base.width, 1699 fb->base.height, 1700 fb->base.depth, 1701 fb->base.bits_per_pixel, 1702 atomic_read(&fb->base.refcount.refcount)); 1703 describe_obj(m, fb->obj); 1704 seq_printf(m, "\n"); 1705 mutex_unlock(&dev->mode_config.mutex); 1706 1707 mutex_lock(&dev->mode_config.fb_lock); 1708 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1709 if (&fb->base == ifbdev->helper.fb) 1710 continue; 1711 1712 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1713 fb->base.width, 1714 fb->base.height, 1715 fb->base.depth, 1716 fb->base.bits_per_pixel, 1717 atomic_read(&fb->base.refcount.refcount)); 1718 describe_obj(m, fb->obj); 1719 seq_printf(m, "\n"); 1720 } 1721 mutex_unlock(&dev->mode_config.fb_lock); 1722 1723 return 0; 1724 } 1725 1726 static int i915_context_status(struct seq_file *m, void *unused) 1727 { 1728 struct drm_info_node *node = (struct drm_info_node *) m->private; 1729 struct drm_device *dev = node->minor->dev; 1730 drm_i915_private_t *dev_priv = dev->dev_private; 1731 struct intel_ring_buffer *ring; 1732 int ret, i; 1733 1734 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1735 if (ret) 1736 return ret; 1737 1738 if (dev_priv->ips.pwrctx) { 1739 seq_printf(m, "power context "); 1740 describe_obj(m, dev_priv->ips.pwrctx); 1741 seq_printf(m, "\n"); 1742 } 1743 1744 if (dev_priv->ips.renderctx) { 1745 seq_printf(m, "render context "); 1746 describe_obj(m, dev_priv->ips.renderctx); 1747 seq_printf(m, "\n"); 1748 } 1749 1750 for_each_ring(ring, dev_priv, i) { 1751 if (ring->default_context) { 1752 seq_printf(m, "HW default context %s ring ", ring->name); 1753 describe_obj(m, ring->default_context->obj); 1754 seq_printf(m, "\n"); 1755 } 1756 } 1757 1758 mutex_unlock(&dev->mode_config.mutex); 1759 1760 return 0; 1761 } 1762 1763 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1764 { 1765 struct drm_info_node *node = (struct drm_info_node *) m->private; 1766 struct drm_device *dev = node->minor->dev; 1767 struct drm_i915_private *dev_priv = dev->dev_private; 1768 unsigned forcewake_count; 1769 1770 spin_lock_irq(&dev_priv->gt_lock); 1771 forcewake_count = dev_priv->forcewake_count; 1772 spin_unlock_irq(&dev_priv->gt_lock); 1773 1774 seq_printf(m, "forcewake count = %u\n", forcewake_count); 1775 1776 return 0; 1777 } 1778 1779 static const char *swizzle_string(unsigned swizzle) 1780 { 1781 switch(swizzle) { 1782 case I915_BIT_6_SWIZZLE_NONE: 1783 return "none"; 1784 case I915_BIT_6_SWIZZLE_9: 1785 return "bit9"; 1786 case I915_BIT_6_SWIZZLE_9_10: 1787 return "bit9/bit10"; 1788 case I915_BIT_6_SWIZZLE_9_11: 1789 return "bit9/bit11"; 1790 case I915_BIT_6_SWIZZLE_9_10_11: 1791 return "bit9/bit10/bit11"; 1792 case I915_BIT_6_SWIZZLE_9_17: 1793 return "bit9/bit17"; 1794 case I915_BIT_6_SWIZZLE_9_10_17: 1795 return "bit9/bit10/bit17"; 1796 case I915_BIT_6_SWIZZLE_UNKNOWN: 1797 return "unknown"; 1798 } 1799 1800 return "bug"; 1801 } 1802 1803 static int i915_swizzle_info(struct seq_file *m, void *data) 1804 { 1805 struct drm_info_node *node = (struct drm_info_node *) m->private; 1806 struct drm_device *dev = node->minor->dev; 1807 struct drm_i915_private *dev_priv = dev->dev_private; 1808 int ret; 1809 1810 ret = mutex_lock_interruptible(&dev->struct_mutex); 1811 if (ret) 1812 return ret; 1813 1814 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1815 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1816 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1817 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1818 1819 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1820 seq_printf(m, "DDC = 0x%08x\n", 1821 I915_READ(DCC)); 1822 seq_printf(m, "C0DRB3 = 0x%04x\n", 1823 I915_READ16(C0DRB3)); 1824 seq_printf(m, "C1DRB3 = 0x%04x\n", 1825 I915_READ16(C1DRB3)); 1826 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 1827 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1828 I915_READ(MAD_DIMM_C0)); 1829 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1830 I915_READ(MAD_DIMM_C1)); 1831 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1832 I915_READ(MAD_DIMM_C2)); 1833 seq_printf(m, "TILECTL = 0x%08x\n", 1834 I915_READ(TILECTL)); 1835 seq_printf(m, "ARB_MODE = 0x%08x\n", 1836 I915_READ(ARB_MODE)); 1837 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1838 I915_READ(DISP_ARB_CTL)); 1839 } 1840 mutex_unlock(&dev->struct_mutex); 1841 1842 return 0; 1843 } 1844 1845 static int i915_ppgtt_info(struct seq_file *m, void *data) 1846 { 1847 struct drm_info_node *node = (struct drm_info_node *) m->private; 1848 struct drm_device *dev = node->minor->dev; 1849 struct drm_i915_private *dev_priv = dev->dev_private; 1850 struct intel_ring_buffer *ring; 1851 int i, ret; 1852 1853 1854 ret = mutex_lock_interruptible(&dev->struct_mutex); 1855 if (ret) 1856 return ret; 1857 if (INTEL_INFO(dev)->gen == 6) 1858 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1859 1860 for_each_ring(ring, dev_priv, i) { 1861 seq_printf(m, "%s\n", ring->name); 1862 if (INTEL_INFO(dev)->gen == 7) 1863 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 1864 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 1865 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 1866 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 1867 } 1868 if (dev_priv->mm.aliasing_ppgtt) { 1869 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1870 1871 seq_printf(m, "aliasing PPGTT:\n"); 1872 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1873 } 1874 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1875 mutex_unlock(&dev->struct_mutex); 1876 1877 return 0; 1878 } 1879 1880 static int i915_dpio_info(struct seq_file *m, void *data) 1881 { 1882 struct drm_info_node *node = (struct drm_info_node *) m->private; 1883 struct drm_device *dev = node->minor->dev; 1884 struct drm_i915_private *dev_priv = dev->dev_private; 1885 int ret; 1886 1887 1888 if (!IS_VALLEYVIEW(dev)) { 1889 seq_printf(m, "unsupported\n"); 1890 return 0; 1891 } 1892 1893 ret = mutex_lock_interruptible(&dev_priv->dpio_lock); 1894 if (ret) 1895 return ret; 1896 1897 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); 1898 1899 seq_printf(m, "DPIO_DIV_A: 0x%08x\n", 1900 vlv_dpio_read(dev_priv, _DPIO_DIV_A)); 1901 seq_printf(m, "DPIO_DIV_B: 0x%08x\n", 1902 vlv_dpio_read(dev_priv, _DPIO_DIV_B)); 1903 1904 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n", 1905 vlv_dpio_read(dev_priv, _DPIO_REFSFR_A)); 1906 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n", 1907 vlv_dpio_read(dev_priv, _DPIO_REFSFR_B)); 1908 1909 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", 1910 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A)); 1911 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", 1912 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); 1913 1914 seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n", 1915 vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A)); 1916 seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n", 1917 vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B)); 1918 1919 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1920 vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); 1921 1922 mutex_unlock(&dev_priv->dpio_lock); 1923 1924 return 0; 1925 } 1926 1927 static int 1928 i915_wedged_get(void *data, u64 *val) 1929 { 1930 struct drm_device *dev = data; 1931 drm_i915_private_t *dev_priv = dev->dev_private; 1932 1933 *val = atomic_read(&dev_priv->gpu_error.reset_counter); 1934 1935 return 0; 1936 } 1937 1938 static int 1939 i915_wedged_set(void *data, u64 val) 1940 { 1941 struct drm_device *dev = data; 1942 1943 DRM_INFO("Manually setting wedged to %llu\n", val); 1944 i915_handle_error(dev, val); 1945 1946 return 0; 1947 } 1948 1949 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 1950 i915_wedged_get, i915_wedged_set, 1951 "%llu\n"); 1952 1953 static int 1954 i915_ring_stop_get(void *data, u64 *val) 1955 { 1956 struct drm_device *dev = data; 1957 drm_i915_private_t *dev_priv = dev->dev_private; 1958 1959 *val = dev_priv->gpu_error.stop_rings; 1960 1961 return 0; 1962 } 1963 1964 static int 1965 i915_ring_stop_set(void *data, u64 val) 1966 { 1967 struct drm_device *dev = data; 1968 struct drm_i915_private *dev_priv = dev->dev_private; 1969 int ret; 1970 1971 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); 1972 1973 ret = mutex_lock_interruptible(&dev->struct_mutex); 1974 if (ret) 1975 return ret; 1976 1977 dev_priv->gpu_error.stop_rings = val; 1978 mutex_unlock(&dev->struct_mutex); 1979 1980 return 0; 1981 } 1982 1983 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, 1984 i915_ring_stop_get, i915_ring_stop_set, 1985 "0x%08llx\n"); 1986 1987 #define DROP_UNBOUND 0x1 1988 #define DROP_BOUND 0x2 1989 #define DROP_RETIRE 0x4 1990 #define DROP_ACTIVE 0x8 1991 #define DROP_ALL (DROP_UNBOUND | \ 1992 DROP_BOUND | \ 1993 DROP_RETIRE | \ 1994 DROP_ACTIVE) 1995 static int 1996 i915_drop_caches_get(void *data, u64 *val) 1997 { 1998 *val = DROP_ALL; 1999 2000 return 0; 2001 } 2002 2003 static int 2004 i915_drop_caches_set(void *data, u64 val) 2005 { 2006 struct drm_device *dev = data; 2007 struct drm_i915_private *dev_priv = dev->dev_private; 2008 struct drm_i915_gem_object *obj, *next; 2009 int ret; 2010 2011 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); 2012 2013 /* No need to check and wait for gpu resets, only libdrm auto-restarts 2014 * on ioctls on -EAGAIN. */ 2015 ret = mutex_lock_interruptible(&dev->struct_mutex); 2016 if (ret) 2017 return ret; 2018 2019 if (val & DROP_ACTIVE) { 2020 ret = i915_gpu_idle(dev); 2021 if (ret) 2022 goto unlock; 2023 } 2024 2025 if (val & (DROP_RETIRE | DROP_ACTIVE)) 2026 i915_gem_retire_requests(dev); 2027 2028 if (val & DROP_BOUND) { 2029 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) 2030 if (obj->pin_count == 0) { 2031 ret = i915_gem_object_unbind(obj); 2032 if (ret) 2033 goto unlock; 2034 } 2035 } 2036 2037 if (val & DROP_UNBOUND) { 2038 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, 2039 global_list) 2040 if (obj->pages_pin_count == 0) { 2041 ret = i915_gem_object_put_pages(obj); 2042 if (ret) 2043 goto unlock; 2044 } 2045 } 2046 2047 unlock: 2048 mutex_unlock(&dev->struct_mutex); 2049 2050 return ret; 2051 } 2052 2053 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 2054 i915_drop_caches_get, i915_drop_caches_set, 2055 "0x%08llx\n"); 2056 2057 static int 2058 i915_max_freq_get(void *data, u64 *val) 2059 { 2060 struct drm_device *dev = data; 2061 drm_i915_private_t *dev_priv = dev->dev_private; 2062 int ret; 2063 2064 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2065 return -ENODEV; 2066 2067 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2068 if (ret) 2069 return ret; 2070 2071 if (IS_VALLEYVIEW(dev)) 2072 *val = vlv_gpu_freq(dev_priv->mem_freq, 2073 dev_priv->rps.max_delay); 2074 else 2075 *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; 2076 mutex_unlock(&dev_priv->rps.hw_lock); 2077 2078 return 0; 2079 } 2080 2081 static int 2082 i915_max_freq_set(void *data, u64 val) 2083 { 2084 struct drm_device *dev = data; 2085 struct drm_i915_private *dev_priv = dev->dev_private; 2086 int ret; 2087 2088 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2089 return -ENODEV; 2090 2091 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 2092 2093 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2094 if (ret) 2095 return ret; 2096 2097 /* 2098 * Turbo will still be enabled, but won't go above the set value. 2099 */ 2100 if (IS_VALLEYVIEW(dev)) { 2101 val = vlv_freq_opcode(dev_priv->mem_freq, val); 2102 dev_priv->rps.max_delay = val; 2103 gen6_set_rps(dev, val); 2104 } else { 2105 do_div(val, GT_FREQUENCY_MULTIPLIER); 2106 dev_priv->rps.max_delay = val; 2107 gen6_set_rps(dev, val); 2108 } 2109 2110 mutex_unlock(&dev_priv->rps.hw_lock); 2111 2112 return 0; 2113 } 2114 2115 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 2116 i915_max_freq_get, i915_max_freq_set, 2117 "%llu\n"); 2118 2119 static int 2120 i915_min_freq_get(void *data, u64 *val) 2121 { 2122 struct drm_device *dev = data; 2123 drm_i915_private_t *dev_priv = dev->dev_private; 2124 int ret; 2125 2126 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2127 return -ENODEV; 2128 2129 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2130 if (ret) 2131 return ret; 2132 2133 if (IS_VALLEYVIEW(dev)) 2134 *val = vlv_gpu_freq(dev_priv->mem_freq, 2135 dev_priv->rps.min_delay); 2136 else 2137 *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; 2138 mutex_unlock(&dev_priv->rps.hw_lock); 2139 2140 return 0; 2141 } 2142 2143 static int 2144 i915_min_freq_set(void *data, u64 val) 2145 { 2146 struct drm_device *dev = data; 2147 struct drm_i915_private *dev_priv = dev->dev_private; 2148 int ret; 2149 2150 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2151 return -ENODEV; 2152 2153 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 2154 2155 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2156 if (ret) 2157 return ret; 2158 2159 /* 2160 * Turbo will still be enabled, but won't go below the set value. 2161 */ 2162 if (IS_VALLEYVIEW(dev)) { 2163 val = vlv_freq_opcode(dev_priv->mem_freq, val); 2164 dev_priv->rps.min_delay = val; 2165 valleyview_set_rps(dev, val); 2166 } else { 2167 do_div(val, GT_FREQUENCY_MULTIPLIER); 2168 dev_priv->rps.min_delay = val; 2169 gen6_set_rps(dev, val); 2170 } 2171 mutex_unlock(&dev_priv->rps.hw_lock); 2172 2173 return 0; 2174 } 2175 2176 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 2177 i915_min_freq_get, i915_min_freq_set, 2178 "%llu\n"); 2179 2180 static int 2181 i915_cache_sharing_get(void *data, u64 *val) 2182 { 2183 struct drm_device *dev = data; 2184 drm_i915_private_t *dev_priv = dev->dev_private; 2185 u32 snpcr; 2186 int ret; 2187 2188 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2189 return -ENODEV; 2190 2191 ret = mutex_lock_interruptible(&dev->struct_mutex); 2192 if (ret) 2193 return ret; 2194 2195 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 2196 mutex_unlock(&dev_priv->dev->struct_mutex); 2197 2198 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 2199 2200 return 0; 2201 } 2202 2203 static int 2204 i915_cache_sharing_set(void *data, u64 val) 2205 { 2206 struct drm_device *dev = data; 2207 struct drm_i915_private *dev_priv = dev->dev_private; 2208 u32 snpcr; 2209 2210 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2211 return -ENODEV; 2212 2213 if (val > 3) 2214 return -EINVAL; 2215 2216 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 2217 2218 /* Update the cache sharing policy here as well */ 2219 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 2220 snpcr &= ~GEN6_MBC_SNPCR_MASK; 2221 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 2222 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 2223 2224 return 0; 2225 } 2226 2227 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 2228 i915_cache_sharing_get, i915_cache_sharing_set, 2229 "%llu\n"); 2230 2231 /* As the drm_debugfs_init() routines are called before dev->dev_private is 2232 * allocated we need to hook into the minor for release. */ 2233 static int 2234 drm_add_fake_info_node(struct drm_minor *minor, 2235 struct dentry *ent, 2236 const void *key) 2237 { 2238 struct drm_info_node *node; 2239 2240 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); 2241 if (node == NULL) { 2242 debugfs_remove(ent); 2243 return -ENOMEM; 2244 } 2245 2246 node->minor = minor; 2247 node->dent = ent; 2248 node->info_ent = (void *) key; 2249 2250 mutex_lock(&minor->debugfs_lock); 2251 list_add(&node->list, &minor->debugfs_list); 2252 mutex_unlock(&minor->debugfs_lock); 2253 2254 return 0; 2255 } 2256 2257 static int i915_forcewake_open(struct inode *inode, struct file *file) 2258 { 2259 struct drm_device *dev = inode->i_private; 2260 struct drm_i915_private *dev_priv = dev->dev_private; 2261 2262 if (INTEL_INFO(dev)->gen < 6) 2263 return 0; 2264 2265 gen6_gt_force_wake_get(dev_priv); 2266 2267 return 0; 2268 } 2269 2270 static int i915_forcewake_release(struct inode *inode, struct file *file) 2271 { 2272 struct drm_device *dev = inode->i_private; 2273 struct drm_i915_private *dev_priv = dev->dev_private; 2274 2275 if (INTEL_INFO(dev)->gen < 6) 2276 return 0; 2277 2278 gen6_gt_force_wake_put(dev_priv); 2279 2280 return 0; 2281 } 2282 2283 static const struct file_operations i915_forcewake_fops = { 2284 .owner = THIS_MODULE, 2285 .open = i915_forcewake_open, 2286 .release = i915_forcewake_release, 2287 }; 2288 2289 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 2290 { 2291 struct drm_device *dev = minor->dev; 2292 struct dentry *ent; 2293 2294 ent = debugfs_create_file("i915_forcewake_user", 2295 S_IRUSR, 2296 root, dev, 2297 &i915_forcewake_fops); 2298 if (IS_ERR(ent)) 2299 return PTR_ERR(ent); 2300 2301 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 2302 } 2303 2304 static int i915_debugfs_create(struct dentry *root, 2305 struct drm_minor *minor, 2306 const char *name, 2307 const struct file_operations *fops) 2308 { 2309 struct drm_device *dev = minor->dev; 2310 struct dentry *ent; 2311 2312 ent = debugfs_create_file(name, 2313 S_IRUGO | S_IWUSR, 2314 root, dev, 2315 fops); 2316 if (IS_ERR(ent)) 2317 return PTR_ERR(ent); 2318 2319 return drm_add_fake_info_node(minor, ent, fops); 2320 } 2321 2322 static struct drm_info_list i915_debugfs_list[] = { 2323 {"i915_capabilities", i915_capabilities, 0}, 2324 {"i915_gem_objects", i915_gem_object_info, 0}, 2325 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 2326 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 2327 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 2328 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 2329 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 2330 {"i915_gem_request", i915_gem_request_info, 0}, 2331 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 2332 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 2333 {"i915_gem_interrupt", i915_interrupt_info, 0}, 2334 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 2335 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 2336 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 2337 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 2338 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 2339 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 2340 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 2341 {"i915_inttoext_table", i915_inttoext_table, 0}, 2342 {"i915_drpc_info", i915_drpc_info, 0}, 2343 {"i915_emon_status", i915_emon_status, 0}, 2344 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 2345 {"i915_gfxec", i915_gfxec, 0}, 2346 {"i915_fbc_status", i915_fbc_status, 0}, 2347 {"i915_ips_status", i915_ips_status, 0}, 2348 {"i915_sr_status", i915_sr_status, 0}, 2349 {"i915_opregion", i915_opregion, 0}, 2350 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 2351 {"i915_context_status", i915_context_status, 0}, 2352 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 2353 {"i915_swizzle_info", i915_swizzle_info, 0}, 2354 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 2355 {"i915_dpio", i915_dpio_info, 0}, 2356 }; 2357 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 2358 2359 int i915_debugfs_init(struct drm_minor *minor) 2360 { 2361 int ret; 2362 2363 ret = i915_debugfs_create(minor->debugfs_root, minor, 2364 "i915_wedged", 2365 &i915_wedged_fops); 2366 if (ret) 2367 return ret; 2368 2369 ret = i915_forcewake_create(minor->debugfs_root, minor); 2370 if (ret) 2371 return ret; 2372 2373 ret = i915_debugfs_create(minor->debugfs_root, minor, 2374 "i915_max_freq", 2375 &i915_max_freq_fops); 2376 if (ret) 2377 return ret; 2378 2379 ret = i915_debugfs_create(minor->debugfs_root, minor, 2380 "i915_min_freq", 2381 &i915_min_freq_fops); 2382 if (ret) 2383 return ret; 2384 2385 ret = i915_debugfs_create(minor->debugfs_root, minor, 2386 "i915_cache_sharing", 2387 &i915_cache_sharing_fops); 2388 if (ret) 2389 return ret; 2390 2391 ret = i915_debugfs_create(minor->debugfs_root, minor, 2392 "i915_ring_stop", 2393 &i915_ring_stop_fops); 2394 if (ret) 2395 return ret; 2396 2397 ret = i915_debugfs_create(minor->debugfs_root, minor, 2398 "i915_gem_drop_caches", 2399 &i915_drop_caches_fops); 2400 if (ret) 2401 return ret; 2402 2403 ret = i915_debugfs_create(minor->debugfs_root, minor, 2404 "i915_error_state", 2405 &i915_error_state_fops); 2406 if (ret) 2407 return ret; 2408 2409 ret = i915_debugfs_create(minor->debugfs_root, minor, 2410 "i915_next_seqno", 2411 &i915_next_seqno_fops); 2412 if (ret) 2413 return ret; 2414 2415 return drm_debugfs_create_files(i915_debugfs_list, 2416 I915_DEBUGFS_ENTRIES, 2417 minor->debugfs_root, minor); 2418 } 2419 2420 void i915_debugfs_cleanup(struct drm_minor *minor) 2421 { 2422 drm_debugfs_remove_files(i915_debugfs_list, 2423 I915_DEBUGFS_ENTRIES, minor); 2424 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 2425 1, minor); 2426 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 2427 1, minor); 2428 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, 2429 1, minor); 2430 drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops, 2431 1, minor); 2432 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, 2433 1, minor); 2434 drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops, 2435 1, minor); 2436 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, 2437 1, minor); 2438 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops, 2439 1, minor); 2440 drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops, 2441 1, minor); 2442 } 2443 2444 #endif /* CONFIG_DEBUG_FS */ 2445