1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/debugfs.h> 31 #include <linux/slab.h> 32 #include "drmP.h" 33 #include "drm.h" 34 #include "intel_drv.h" 35 #include "intel_ringbuffer.h" 36 #include "i915_drm.h" 37 #include "i915_drv.h" 38 39 #define DRM_I915_RING_DEBUG 1 40 41 42 #if defined(CONFIG_DEBUG_FS) 43 44 enum { 45 ACTIVE_LIST, 46 FLUSHING_LIST, 47 INACTIVE_LIST, 48 PINNED_LIST, 49 DEFERRED_FREE_LIST, 50 }; 51 52 static const char *yesno(int v) 53 { 54 return v ? "yes" : "no"; 55 } 56 57 static int i915_capabilities(struct seq_file *m, void *data) 58 { 59 struct drm_info_node *node = (struct drm_info_node *) m->private; 60 struct drm_device *dev = node->minor->dev; 61 const struct intel_device_info *info = INTEL_INFO(dev); 62 63 seq_printf(m, "gen: %d\n", info->gen); 64 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 65 B(is_mobile); 66 B(is_i85x); 67 B(is_i915g); 68 B(is_i945gm); 69 B(is_g33); 70 B(need_gfx_hws); 71 B(is_g4x); 72 B(is_pineview); 73 B(is_broadwater); 74 B(is_crestline); 75 B(has_fbc); 76 B(has_pipe_cxsr); 77 B(has_hotplug); 78 B(cursor_needs_physical); 79 B(has_overlay); 80 B(overlay_needs_physical); 81 B(supports_tv); 82 B(has_bsd_ring); 83 B(has_blt_ring); 84 #undef B 85 86 return 0; 87 } 88 89 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 90 { 91 if (obj->user_pin_count > 0) 92 return "P"; 93 else if (obj->pin_count > 0) 94 return "p"; 95 else 96 return " "; 97 } 98 99 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 100 { 101 switch (obj->tiling_mode) { 102 default: 103 case I915_TILING_NONE: return " "; 104 case I915_TILING_X: return "X"; 105 case I915_TILING_Y: return "Y"; 106 } 107 } 108 109 static const char *cache_level_str(int type) 110 { 111 switch (type) { 112 case I915_CACHE_NONE: return " uncached"; 113 case I915_CACHE_LLC: return " snooped (LLC)"; 114 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; 115 default: return ""; 116 } 117 } 118 119 static void 120 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 121 { 122 seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s", 123 &obj->base, 124 get_pin_flag(obj), 125 get_tiling_flag(obj), 126 obj->base.size, 127 obj->base.read_domains, 128 obj->base.write_domain, 129 obj->last_rendering_seqno, 130 obj->last_fenced_seqno, 131 cache_level_str(obj->cache_level), 132 obj->dirty ? " dirty" : "", 133 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 134 if (obj->base.name) 135 seq_printf(m, " (name: %d)", obj->base.name); 136 if (obj->fence_reg != I915_FENCE_REG_NONE) 137 seq_printf(m, " (fence: %d)", obj->fence_reg); 138 if (obj->gtt_space != NULL) 139 seq_printf(m, " (gtt offset: %08x, size: %08x)", 140 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 141 if (obj->pin_mappable || obj->fault_mappable) { 142 char s[3], *t = s; 143 if (obj->pin_mappable) 144 *t++ = 'p'; 145 if (obj->fault_mappable) 146 *t++ = 'f'; 147 *t = '\0'; 148 seq_printf(m, " (%s mappable)", s); 149 } 150 if (obj->ring != NULL) 151 seq_printf(m, " (%s)", obj->ring->name); 152 } 153 154 static int i915_gem_object_list_info(struct seq_file *m, void *data) 155 { 156 struct drm_info_node *node = (struct drm_info_node *) m->private; 157 uintptr_t list = (uintptr_t) node->info_ent->data; 158 struct list_head *head; 159 struct drm_device *dev = node->minor->dev; 160 drm_i915_private_t *dev_priv = dev->dev_private; 161 struct drm_i915_gem_object *obj; 162 size_t total_obj_size, total_gtt_size; 163 int count, ret; 164 165 ret = mutex_lock_interruptible(&dev->struct_mutex); 166 if (ret) 167 return ret; 168 169 switch (list) { 170 case ACTIVE_LIST: 171 seq_printf(m, "Active:\n"); 172 head = &dev_priv->mm.active_list; 173 break; 174 case INACTIVE_LIST: 175 seq_printf(m, "Inactive:\n"); 176 head = &dev_priv->mm.inactive_list; 177 break; 178 case PINNED_LIST: 179 seq_printf(m, "Pinned:\n"); 180 head = &dev_priv->mm.pinned_list; 181 break; 182 case FLUSHING_LIST: 183 seq_printf(m, "Flushing:\n"); 184 head = &dev_priv->mm.flushing_list; 185 break; 186 case DEFERRED_FREE_LIST: 187 seq_printf(m, "Deferred free:\n"); 188 head = &dev_priv->mm.deferred_free_list; 189 break; 190 default: 191 mutex_unlock(&dev->struct_mutex); 192 return -EINVAL; 193 } 194 195 total_obj_size = total_gtt_size = count = 0; 196 list_for_each_entry(obj, head, mm_list) { 197 seq_printf(m, " "); 198 describe_obj(m, obj); 199 seq_printf(m, "\n"); 200 total_obj_size += obj->base.size; 201 total_gtt_size += obj->gtt_space->size; 202 count++; 203 } 204 mutex_unlock(&dev->struct_mutex); 205 206 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 207 count, total_obj_size, total_gtt_size); 208 return 0; 209 } 210 211 #define count_objects(list, member) do { \ 212 list_for_each_entry(obj, list, member) { \ 213 size += obj->gtt_space->size; \ 214 ++count; \ 215 if (obj->map_and_fenceable) { \ 216 mappable_size += obj->gtt_space->size; \ 217 ++mappable_count; \ 218 } \ 219 } \ 220 } while(0) 221 222 static int i915_gem_object_info(struct seq_file *m, void* data) 223 { 224 struct drm_info_node *node = (struct drm_info_node *) m->private; 225 struct drm_device *dev = node->minor->dev; 226 struct drm_i915_private *dev_priv = dev->dev_private; 227 u32 count, mappable_count; 228 size_t size, mappable_size; 229 struct drm_i915_gem_object *obj; 230 int ret; 231 232 ret = mutex_lock_interruptible(&dev->struct_mutex); 233 if (ret) 234 return ret; 235 236 seq_printf(m, "%u objects, %zu bytes\n", 237 dev_priv->mm.object_count, 238 dev_priv->mm.object_memory); 239 240 size = count = mappable_size = mappable_count = 0; 241 count_objects(&dev_priv->mm.gtt_list, gtt_list); 242 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 243 count, mappable_count, size, mappable_size); 244 245 size = count = mappable_size = mappable_count = 0; 246 count_objects(&dev_priv->mm.active_list, mm_list); 247 count_objects(&dev_priv->mm.flushing_list, mm_list); 248 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 249 count, mappable_count, size, mappable_size); 250 251 size = count = mappable_size = mappable_count = 0; 252 count_objects(&dev_priv->mm.pinned_list, mm_list); 253 seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n", 254 count, mappable_count, size, mappable_size); 255 256 size = count = mappable_size = mappable_count = 0; 257 count_objects(&dev_priv->mm.inactive_list, mm_list); 258 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 259 count, mappable_count, size, mappable_size); 260 261 size = count = mappable_size = mappable_count = 0; 262 count_objects(&dev_priv->mm.deferred_free_list, mm_list); 263 seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n", 264 count, mappable_count, size, mappable_size); 265 266 size = count = mappable_size = mappable_count = 0; 267 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 268 if (obj->fault_mappable) { 269 size += obj->gtt_space->size; 270 ++count; 271 } 272 if (obj->pin_mappable) { 273 mappable_size += obj->gtt_space->size; 274 ++mappable_count; 275 } 276 } 277 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 278 mappable_count, mappable_size); 279 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 280 count, size); 281 282 seq_printf(m, "%zu [%zu] gtt total\n", 283 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); 284 285 mutex_unlock(&dev->struct_mutex); 286 287 return 0; 288 } 289 290 static int i915_gem_gtt_info(struct seq_file *m, void* data) 291 { 292 struct drm_info_node *node = (struct drm_info_node *) m->private; 293 struct drm_device *dev = node->minor->dev; 294 struct drm_i915_private *dev_priv = dev->dev_private; 295 struct drm_i915_gem_object *obj; 296 size_t total_obj_size, total_gtt_size; 297 int count, ret; 298 299 ret = mutex_lock_interruptible(&dev->struct_mutex); 300 if (ret) 301 return ret; 302 303 total_obj_size = total_gtt_size = count = 0; 304 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 305 seq_printf(m, " "); 306 describe_obj(m, obj); 307 seq_printf(m, "\n"); 308 total_obj_size += obj->base.size; 309 total_gtt_size += obj->gtt_space->size; 310 count++; 311 } 312 313 mutex_unlock(&dev->struct_mutex); 314 315 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 316 count, total_obj_size, total_gtt_size); 317 318 return 0; 319 } 320 321 322 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 323 { 324 struct drm_info_node *node = (struct drm_info_node *) m->private; 325 struct drm_device *dev = node->minor->dev; 326 unsigned long flags; 327 struct intel_crtc *crtc; 328 329 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 330 const char pipe = pipe_name(crtc->pipe); 331 const char plane = plane_name(crtc->plane); 332 struct intel_unpin_work *work; 333 334 spin_lock_irqsave(&dev->event_lock, flags); 335 work = crtc->unpin_work; 336 if (work == NULL) { 337 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 338 pipe, plane); 339 } else { 340 if (!work->pending) { 341 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 342 pipe, plane); 343 } else { 344 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 345 pipe, plane); 346 } 347 if (work->enable_stall_check) 348 seq_printf(m, "Stall check enabled, "); 349 else 350 seq_printf(m, "Stall check waiting for page flip ioctl, "); 351 seq_printf(m, "%d prepares\n", work->pending); 352 353 if (work->old_fb_obj) { 354 struct drm_i915_gem_object *obj = work->old_fb_obj; 355 if (obj) 356 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 357 } 358 if (work->pending_flip_obj) { 359 struct drm_i915_gem_object *obj = work->pending_flip_obj; 360 if (obj) 361 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 362 } 363 } 364 spin_unlock_irqrestore(&dev->event_lock, flags); 365 } 366 367 return 0; 368 } 369 370 static int i915_gem_request_info(struct seq_file *m, void *data) 371 { 372 struct drm_info_node *node = (struct drm_info_node *) m->private; 373 struct drm_device *dev = node->minor->dev; 374 drm_i915_private_t *dev_priv = dev->dev_private; 375 struct drm_i915_gem_request *gem_request; 376 int ret, count; 377 378 ret = mutex_lock_interruptible(&dev->struct_mutex); 379 if (ret) 380 return ret; 381 382 count = 0; 383 if (!list_empty(&dev_priv->ring[RCS].request_list)) { 384 seq_printf(m, "Render requests:\n"); 385 list_for_each_entry(gem_request, 386 &dev_priv->ring[RCS].request_list, 387 list) { 388 seq_printf(m, " %d @ %d\n", 389 gem_request->seqno, 390 (int) (jiffies - gem_request->emitted_jiffies)); 391 } 392 count++; 393 } 394 if (!list_empty(&dev_priv->ring[VCS].request_list)) { 395 seq_printf(m, "BSD requests:\n"); 396 list_for_each_entry(gem_request, 397 &dev_priv->ring[VCS].request_list, 398 list) { 399 seq_printf(m, " %d @ %d\n", 400 gem_request->seqno, 401 (int) (jiffies - gem_request->emitted_jiffies)); 402 } 403 count++; 404 } 405 if (!list_empty(&dev_priv->ring[BCS].request_list)) { 406 seq_printf(m, "BLT requests:\n"); 407 list_for_each_entry(gem_request, 408 &dev_priv->ring[BCS].request_list, 409 list) { 410 seq_printf(m, " %d @ %d\n", 411 gem_request->seqno, 412 (int) (jiffies - gem_request->emitted_jiffies)); 413 } 414 count++; 415 } 416 mutex_unlock(&dev->struct_mutex); 417 418 if (count == 0) 419 seq_printf(m, "No requests\n"); 420 421 return 0; 422 } 423 424 static void i915_ring_seqno_info(struct seq_file *m, 425 struct intel_ring_buffer *ring) 426 { 427 if (ring->get_seqno) { 428 seq_printf(m, "Current sequence (%s): %d\n", 429 ring->name, ring->get_seqno(ring)); 430 seq_printf(m, "Waiter sequence (%s): %d\n", 431 ring->name, ring->waiting_seqno); 432 seq_printf(m, "IRQ sequence (%s): %d\n", 433 ring->name, ring->irq_seqno); 434 } 435 } 436 437 static int i915_gem_seqno_info(struct seq_file *m, void *data) 438 { 439 struct drm_info_node *node = (struct drm_info_node *) m->private; 440 struct drm_device *dev = node->minor->dev; 441 drm_i915_private_t *dev_priv = dev->dev_private; 442 int ret, i; 443 444 ret = mutex_lock_interruptible(&dev->struct_mutex); 445 if (ret) 446 return ret; 447 448 for (i = 0; i < I915_NUM_RINGS; i++) 449 i915_ring_seqno_info(m, &dev_priv->ring[i]); 450 451 mutex_unlock(&dev->struct_mutex); 452 453 return 0; 454 } 455 456 457 static int i915_interrupt_info(struct seq_file *m, void *data) 458 { 459 struct drm_info_node *node = (struct drm_info_node *) m->private; 460 struct drm_device *dev = node->minor->dev; 461 drm_i915_private_t *dev_priv = dev->dev_private; 462 int ret, i, pipe; 463 464 ret = mutex_lock_interruptible(&dev->struct_mutex); 465 if (ret) 466 return ret; 467 468 if (!HAS_PCH_SPLIT(dev)) { 469 seq_printf(m, "Interrupt enable: %08x\n", 470 I915_READ(IER)); 471 seq_printf(m, "Interrupt identity: %08x\n", 472 I915_READ(IIR)); 473 seq_printf(m, "Interrupt mask: %08x\n", 474 I915_READ(IMR)); 475 for_each_pipe(pipe) 476 seq_printf(m, "Pipe %c stat: %08x\n", 477 pipe_name(pipe), 478 I915_READ(PIPESTAT(pipe))); 479 } else { 480 seq_printf(m, "North Display Interrupt enable: %08x\n", 481 I915_READ(DEIER)); 482 seq_printf(m, "North Display Interrupt identity: %08x\n", 483 I915_READ(DEIIR)); 484 seq_printf(m, "North Display Interrupt mask: %08x\n", 485 I915_READ(DEIMR)); 486 seq_printf(m, "South Display Interrupt enable: %08x\n", 487 I915_READ(SDEIER)); 488 seq_printf(m, "South Display Interrupt identity: %08x\n", 489 I915_READ(SDEIIR)); 490 seq_printf(m, "South Display Interrupt mask: %08x\n", 491 I915_READ(SDEIMR)); 492 seq_printf(m, "Graphics Interrupt enable: %08x\n", 493 I915_READ(GTIER)); 494 seq_printf(m, "Graphics Interrupt identity: %08x\n", 495 I915_READ(GTIIR)); 496 seq_printf(m, "Graphics Interrupt mask: %08x\n", 497 I915_READ(GTIMR)); 498 } 499 seq_printf(m, "Interrupts received: %d\n", 500 atomic_read(&dev_priv->irq_received)); 501 for (i = 0; i < I915_NUM_RINGS; i++) { 502 if (IS_GEN6(dev)) { 503 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", 504 dev_priv->ring[i].name, 505 I915_READ_IMR(&dev_priv->ring[i])); 506 } 507 i915_ring_seqno_info(m, &dev_priv->ring[i]); 508 } 509 mutex_unlock(&dev->struct_mutex); 510 511 return 0; 512 } 513 514 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 515 { 516 struct drm_info_node *node = (struct drm_info_node *) m->private; 517 struct drm_device *dev = node->minor->dev; 518 drm_i915_private_t *dev_priv = dev->dev_private; 519 int i, ret; 520 521 ret = mutex_lock_interruptible(&dev->struct_mutex); 522 if (ret) 523 return ret; 524 525 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 526 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 527 for (i = 0; i < dev_priv->num_fence_regs; i++) { 528 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 529 530 seq_printf(m, "Fenced object[%2d] = ", i); 531 if (obj == NULL) 532 seq_printf(m, "unused"); 533 else 534 describe_obj(m, obj); 535 seq_printf(m, "\n"); 536 } 537 538 mutex_unlock(&dev->struct_mutex); 539 return 0; 540 } 541 542 static int i915_hws_info(struct seq_file *m, void *data) 543 { 544 struct drm_info_node *node = (struct drm_info_node *) m->private; 545 struct drm_device *dev = node->minor->dev; 546 drm_i915_private_t *dev_priv = dev->dev_private; 547 struct intel_ring_buffer *ring; 548 const volatile u32 __iomem *hws; 549 int i; 550 551 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 552 hws = (volatile u32 __iomem *)ring->status_page.page_addr; 553 if (hws == NULL) 554 return 0; 555 556 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 557 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 558 i * 4, 559 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 560 } 561 return 0; 562 } 563 564 static void i915_dump_object(struct seq_file *m, 565 struct io_mapping *mapping, 566 struct drm_i915_gem_object *obj) 567 { 568 int page, page_count, i; 569 570 page_count = obj->base.size / PAGE_SIZE; 571 for (page = 0; page < page_count; page++) { 572 u32 *mem = io_mapping_map_wc(mapping, 573 obj->gtt_offset + page * PAGE_SIZE); 574 for (i = 0; i < PAGE_SIZE; i += 4) 575 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 576 io_mapping_unmap(mem); 577 } 578 } 579 580 static int i915_batchbuffer_info(struct seq_file *m, void *data) 581 { 582 struct drm_info_node *node = (struct drm_info_node *) m->private; 583 struct drm_device *dev = node->minor->dev; 584 drm_i915_private_t *dev_priv = dev->dev_private; 585 struct drm_i915_gem_object *obj; 586 int ret; 587 588 ret = mutex_lock_interruptible(&dev->struct_mutex); 589 if (ret) 590 return ret; 591 592 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 593 if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) { 594 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); 595 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj); 596 } 597 } 598 599 mutex_unlock(&dev->struct_mutex); 600 return 0; 601 } 602 603 static int i915_ringbuffer_data(struct seq_file *m, void *data) 604 { 605 struct drm_info_node *node = (struct drm_info_node *) m->private; 606 struct drm_device *dev = node->minor->dev; 607 drm_i915_private_t *dev_priv = dev->dev_private; 608 struct intel_ring_buffer *ring; 609 int ret; 610 611 ret = mutex_lock_interruptible(&dev->struct_mutex); 612 if (ret) 613 return ret; 614 615 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 616 if (!ring->obj) { 617 seq_printf(m, "No ringbuffer setup\n"); 618 } else { 619 const u8 __iomem *virt = ring->virtual_start; 620 uint32_t off; 621 622 for (off = 0; off < ring->size; off += 4) { 623 uint32_t *ptr = (uint32_t *)(virt + off); 624 seq_printf(m, "%08x : %08x\n", off, *ptr); 625 } 626 } 627 mutex_unlock(&dev->struct_mutex); 628 629 return 0; 630 } 631 632 static int i915_ringbuffer_info(struct seq_file *m, void *data) 633 { 634 struct drm_info_node *node = (struct drm_info_node *) m->private; 635 struct drm_device *dev = node->minor->dev; 636 drm_i915_private_t *dev_priv = dev->dev_private; 637 struct intel_ring_buffer *ring; 638 639 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 640 if (ring->size == 0) 641 return 0; 642 643 seq_printf(m, "Ring %s:\n", ring->name); 644 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); 645 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); 646 seq_printf(m, " Size : %08x\n", ring->size); 647 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring)); 648 seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring)); 649 if (IS_GEN6(dev)) { 650 seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring)); 651 seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring)); 652 } 653 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); 654 seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); 655 656 return 0; 657 } 658 659 static const char *ring_str(int ring) 660 { 661 switch (ring) { 662 case RING_RENDER: return " render"; 663 case RING_BSD: return " bsd"; 664 case RING_BLT: return " blt"; 665 default: return ""; 666 } 667 } 668 669 static const char *pin_flag(int pinned) 670 { 671 if (pinned > 0) 672 return " P"; 673 else if (pinned < 0) 674 return " p"; 675 else 676 return ""; 677 } 678 679 static const char *tiling_flag(int tiling) 680 { 681 switch (tiling) { 682 default: 683 case I915_TILING_NONE: return ""; 684 case I915_TILING_X: return " X"; 685 case I915_TILING_Y: return " Y"; 686 } 687 } 688 689 static const char *dirty_flag(int dirty) 690 { 691 return dirty ? " dirty" : ""; 692 } 693 694 static const char *purgeable_flag(int purgeable) 695 { 696 return purgeable ? " purgeable" : ""; 697 } 698 699 static void print_error_buffers(struct seq_file *m, 700 const char *name, 701 struct drm_i915_error_buffer *err, 702 int count) 703 { 704 seq_printf(m, "%s [%d]:\n", name, count); 705 706 while (count--) { 707 seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s", 708 err->gtt_offset, 709 err->size, 710 err->read_domains, 711 err->write_domain, 712 err->seqno, 713 pin_flag(err->pinned), 714 tiling_flag(err->tiling), 715 dirty_flag(err->dirty), 716 purgeable_flag(err->purgeable), 717 ring_str(err->ring), 718 cache_level_str(err->cache_level)); 719 720 if (err->name) 721 seq_printf(m, " (name: %d)", err->name); 722 if (err->fence_reg != I915_FENCE_REG_NONE) 723 seq_printf(m, " (fence: %d)", err->fence_reg); 724 725 seq_printf(m, "\n"); 726 err++; 727 } 728 } 729 730 static int i915_error_state(struct seq_file *m, void *unused) 731 { 732 struct drm_info_node *node = (struct drm_info_node *) m->private; 733 struct drm_device *dev = node->minor->dev; 734 drm_i915_private_t *dev_priv = dev->dev_private; 735 struct drm_i915_error_state *error; 736 unsigned long flags; 737 int i, page, offset, elt; 738 739 spin_lock_irqsave(&dev_priv->error_lock, flags); 740 if (!dev_priv->first_error) { 741 seq_printf(m, "no error state collected\n"); 742 goto out; 743 } 744 745 error = dev_priv->first_error; 746 747 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 748 error->time.tv_usec); 749 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 750 seq_printf(m, "EIR: 0x%08x\n", error->eir); 751 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 752 if (INTEL_INFO(dev)->gen >= 6) { 753 seq_printf(m, "ERROR: 0x%08x\n", error->error); 754 seq_printf(m, "Blitter command stream:\n"); 755 seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd); 756 seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); 757 seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); 758 seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); 759 seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); 760 seq_printf(m, "Video (BSD) command stream:\n"); 761 seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd); 762 seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir); 763 seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); 764 seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone); 765 seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno); 766 } 767 seq_printf(m, "Render command stream:\n"); 768 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); 769 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); 770 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); 771 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); 772 if (INTEL_INFO(dev)->gen >= 4) { 773 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 774 seq_printf(m, " INSTPS: 0x%08x\n", error->instps); 775 } 776 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 777 seq_printf(m, " seqno: 0x%08x\n", error->seqno); 778 779 for (i = 0; i < dev_priv->num_fence_regs; i++) 780 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 781 782 if (error->active_bo) 783 print_error_buffers(m, "Active", 784 error->active_bo, 785 error->active_bo_count); 786 787 if (error->pinned_bo) 788 print_error_buffers(m, "Pinned", 789 error->pinned_bo, 790 error->pinned_bo_count); 791 792 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { 793 if (error->batchbuffer[i]) { 794 struct drm_i915_error_object *obj = error->batchbuffer[i]; 795 796 seq_printf(m, "%s --- gtt_offset = 0x%08x\n", 797 dev_priv->ring[i].name, 798 obj->gtt_offset); 799 offset = 0; 800 for (page = 0; page < obj->page_count; page++) { 801 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 802 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); 803 offset += 4; 804 } 805 } 806 } 807 } 808 809 for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) { 810 if (error->ringbuffer[i]) { 811 struct drm_i915_error_object *obj = error->ringbuffer[i]; 812 seq_printf(m, "%s --- ringbuffer = 0x%08x\n", 813 dev_priv->ring[i].name, 814 obj->gtt_offset); 815 offset = 0; 816 for (page = 0; page < obj->page_count; page++) { 817 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 818 seq_printf(m, "%08x : %08x\n", 819 offset, 820 obj->pages[page][elt]); 821 offset += 4; 822 } 823 } 824 } 825 } 826 827 if (error->overlay) 828 intel_overlay_print_error_state(m, error->overlay); 829 830 if (error->display) 831 intel_display_print_error_state(m, dev, error->display); 832 833 out: 834 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 835 836 return 0; 837 } 838 839 static int i915_rstdby_delays(struct seq_file *m, void *unused) 840 { 841 struct drm_info_node *node = (struct drm_info_node *) m->private; 842 struct drm_device *dev = node->minor->dev; 843 drm_i915_private_t *dev_priv = dev->dev_private; 844 u16 crstanddelay = I915_READ16(CRSTANDVID); 845 846 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 847 848 return 0; 849 } 850 851 static int i915_cur_delayinfo(struct seq_file *m, void *unused) 852 { 853 struct drm_info_node *node = (struct drm_info_node *) m->private; 854 struct drm_device *dev = node->minor->dev; 855 drm_i915_private_t *dev_priv = dev->dev_private; 856 int ret; 857 858 if (IS_GEN5(dev)) { 859 u16 rgvswctl = I915_READ16(MEMSWCTL); 860 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 861 862 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 863 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 864 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 865 MEMSTAT_VID_SHIFT); 866 seq_printf(m, "Current P-state: %d\n", 867 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 868 } else if (IS_GEN6(dev)) { 869 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 870 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 871 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 872 u32 rpstat; 873 u32 rpupei, rpcurup, rpprevup; 874 u32 rpdownei, rpcurdown, rpprevdown; 875 int max_freq; 876 877 /* RPSTAT1 is in the GT power well */ 878 ret = mutex_lock_interruptible(&dev->struct_mutex); 879 if (ret) 880 return ret; 881 882 gen6_gt_force_wake_get(dev_priv); 883 884 rpstat = I915_READ(GEN6_RPSTAT1); 885 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 886 rpcurup = I915_READ(GEN6_RP_CUR_UP); 887 rpprevup = I915_READ(GEN6_RP_PREV_UP); 888 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 889 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 890 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 891 892 gen6_gt_force_wake_put(dev_priv); 893 mutex_unlock(&dev->struct_mutex); 894 895 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 896 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 897 seq_printf(m, "Render p-state ratio: %d\n", 898 (gt_perf_status & 0xff00) >> 8); 899 seq_printf(m, "Render p-state VID: %d\n", 900 gt_perf_status & 0xff); 901 seq_printf(m, "Render p-state limit: %d\n", 902 rp_state_limits & 0xff); 903 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 904 GEN6_CAGF_SHIFT) * 50); 905 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 906 GEN6_CURICONT_MASK); 907 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 908 GEN6_CURBSYTAVG_MASK); 909 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 910 GEN6_CURBSYTAVG_MASK); 911 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 912 GEN6_CURIAVG_MASK); 913 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 914 GEN6_CURBSYTAVG_MASK); 915 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 916 GEN6_CURBSYTAVG_MASK); 917 918 max_freq = (rp_state_cap & 0xff0000) >> 16; 919 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 920 max_freq * 50); 921 922 max_freq = (rp_state_cap & 0xff00) >> 8; 923 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 924 max_freq * 50); 925 926 max_freq = rp_state_cap & 0xff; 927 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 928 max_freq * 50); 929 } else { 930 seq_printf(m, "no P-state info available\n"); 931 } 932 933 return 0; 934 } 935 936 static int i915_delayfreq_table(struct seq_file *m, void *unused) 937 { 938 struct drm_info_node *node = (struct drm_info_node *) m->private; 939 struct drm_device *dev = node->minor->dev; 940 drm_i915_private_t *dev_priv = dev->dev_private; 941 u32 delayfreq; 942 int i; 943 944 for (i = 0; i < 16; i++) { 945 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 946 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 947 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 948 } 949 950 return 0; 951 } 952 953 static inline int MAP_TO_MV(int map) 954 { 955 return 1250 - (map * 25); 956 } 957 958 static int i915_inttoext_table(struct seq_file *m, void *unused) 959 { 960 struct drm_info_node *node = (struct drm_info_node *) m->private; 961 struct drm_device *dev = node->minor->dev; 962 drm_i915_private_t *dev_priv = dev->dev_private; 963 u32 inttoext; 964 int i; 965 966 for (i = 1; i <= 32; i++) { 967 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 968 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 969 } 970 971 return 0; 972 } 973 974 static int i915_drpc_info(struct seq_file *m, void *unused) 975 { 976 struct drm_info_node *node = (struct drm_info_node *) m->private; 977 struct drm_device *dev = node->minor->dev; 978 drm_i915_private_t *dev_priv = dev->dev_private; 979 u32 rgvmodectl = I915_READ(MEMMODECTL); 980 u32 rstdbyctl = I915_READ(RSTDBYCTL); 981 u16 crstandvid = I915_READ16(CRSTANDVID); 982 983 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 984 "yes" : "no"); 985 seq_printf(m, "Boost freq: %d\n", 986 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 987 MEMMODE_BOOST_FREQ_SHIFT); 988 seq_printf(m, "HW control enabled: %s\n", 989 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 990 seq_printf(m, "SW control enabled: %s\n", 991 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 992 seq_printf(m, "Gated voltage change: %s\n", 993 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 994 seq_printf(m, "Starting frequency: P%d\n", 995 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 996 seq_printf(m, "Max P-state: P%d\n", 997 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 998 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 999 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1000 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1001 seq_printf(m, "Render standby enabled: %s\n", 1002 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1003 seq_printf(m, "Current RS state: "); 1004 switch (rstdbyctl & RSX_STATUS_MASK) { 1005 case RSX_STATUS_ON: 1006 seq_printf(m, "on\n"); 1007 break; 1008 case RSX_STATUS_RC1: 1009 seq_printf(m, "RC1\n"); 1010 break; 1011 case RSX_STATUS_RC1E: 1012 seq_printf(m, "RC1E\n"); 1013 break; 1014 case RSX_STATUS_RS1: 1015 seq_printf(m, "RS1\n"); 1016 break; 1017 case RSX_STATUS_RS2: 1018 seq_printf(m, "RS2 (RC6)\n"); 1019 break; 1020 case RSX_STATUS_RS3: 1021 seq_printf(m, "RC3 (RC6+)\n"); 1022 break; 1023 default: 1024 seq_printf(m, "unknown\n"); 1025 break; 1026 } 1027 1028 return 0; 1029 } 1030 1031 static int i915_fbc_status(struct seq_file *m, void *unused) 1032 { 1033 struct drm_info_node *node = (struct drm_info_node *) m->private; 1034 struct drm_device *dev = node->minor->dev; 1035 drm_i915_private_t *dev_priv = dev->dev_private; 1036 1037 if (!I915_HAS_FBC(dev)) { 1038 seq_printf(m, "FBC unsupported on this chipset\n"); 1039 return 0; 1040 } 1041 1042 if (intel_fbc_enabled(dev)) { 1043 seq_printf(m, "FBC enabled\n"); 1044 } else { 1045 seq_printf(m, "FBC disabled: "); 1046 switch (dev_priv->no_fbc_reason) { 1047 case FBC_NO_OUTPUT: 1048 seq_printf(m, "no outputs"); 1049 break; 1050 case FBC_STOLEN_TOO_SMALL: 1051 seq_printf(m, "not enough stolen memory"); 1052 break; 1053 case FBC_UNSUPPORTED_MODE: 1054 seq_printf(m, "mode not supported"); 1055 break; 1056 case FBC_MODE_TOO_LARGE: 1057 seq_printf(m, "mode too large"); 1058 break; 1059 case FBC_BAD_PLANE: 1060 seq_printf(m, "FBC unsupported on plane"); 1061 break; 1062 case FBC_NOT_TILED: 1063 seq_printf(m, "scanout buffer not tiled"); 1064 break; 1065 case FBC_MULTIPLE_PIPES: 1066 seq_printf(m, "multiple pipes are enabled"); 1067 break; 1068 case FBC_MODULE_PARAM: 1069 seq_printf(m, "disabled per module param (default off)"); 1070 break; 1071 default: 1072 seq_printf(m, "unknown reason"); 1073 } 1074 seq_printf(m, "\n"); 1075 } 1076 return 0; 1077 } 1078 1079 static int i915_sr_status(struct seq_file *m, void *unused) 1080 { 1081 struct drm_info_node *node = (struct drm_info_node *) m->private; 1082 struct drm_device *dev = node->minor->dev; 1083 drm_i915_private_t *dev_priv = dev->dev_private; 1084 bool sr_enabled = false; 1085 1086 if (HAS_PCH_SPLIT(dev)) 1087 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1088 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1089 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1090 else if (IS_I915GM(dev)) 1091 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1092 else if (IS_PINEVIEW(dev)) 1093 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1094 1095 seq_printf(m, "self-refresh: %s\n", 1096 sr_enabled ? "enabled" : "disabled"); 1097 1098 return 0; 1099 } 1100 1101 static int i915_emon_status(struct seq_file *m, void *unused) 1102 { 1103 struct drm_info_node *node = (struct drm_info_node *) m->private; 1104 struct drm_device *dev = node->minor->dev; 1105 drm_i915_private_t *dev_priv = dev->dev_private; 1106 unsigned long temp, chipset, gfx; 1107 int ret; 1108 1109 ret = mutex_lock_interruptible(&dev->struct_mutex); 1110 if (ret) 1111 return ret; 1112 1113 temp = i915_mch_val(dev_priv); 1114 chipset = i915_chipset_val(dev_priv); 1115 gfx = i915_gfx_val(dev_priv); 1116 mutex_unlock(&dev->struct_mutex); 1117 1118 seq_printf(m, "GMCH temp: %ld\n", temp); 1119 seq_printf(m, "Chipset power: %ld\n", chipset); 1120 seq_printf(m, "GFX power: %ld\n", gfx); 1121 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1122 1123 return 0; 1124 } 1125 1126 static int i915_gfxec(struct seq_file *m, void *unused) 1127 { 1128 struct drm_info_node *node = (struct drm_info_node *) m->private; 1129 struct drm_device *dev = node->minor->dev; 1130 drm_i915_private_t *dev_priv = dev->dev_private; 1131 1132 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1133 1134 return 0; 1135 } 1136 1137 static int i915_opregion(struct seq_file *m, void *unused) 1138 { 1139 struct drm_info_node *node = (struct drm_info_node *) m->private; 1140 struct drm_device *dev = node->minor->dev; 1141 drm_i915_private_t *dev_priv = dev->dev_private; 1142 struct intel_opregion *opregion = &dev_priv->opregion; 1143 int ret; 1144 1145 ret = mutex_lock_interruptible(&dev->struct_mutex); 1146 if (ret) 1147 return ret; 1148 1149 if (opregion->header) 1150 seq_write(m, opregion->header, OPREGION_SIZE); 1151 1152 mutex_unlock(&dev->struct_mutex); 1153 1154 return 0; 1155 } 1156 1157 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1158 { 1159 struct drm_info_node *node = (struct drm_info_node *) m->private; 1160 struct drm_device *dev = node->minor->dev; 1161 drm_i915_private_t *dev_priv = dev->dev_private; 1162 struct intel_fbdev *ifbdev; 1163 struct intel_framebuffer *fb; 1164 int ret; 1165 1166 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1167 if (ret) 1168 return ret; 1169 1170 ifbdev = dev_priv->fbdev; 1171 fb = to_intel_framebuffer(ifbdev->helper.fb); 1172 1173 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", 1174 fb->base.width, 1175 fb->base.height, 1176 fb->base.depth, 1177 fb->base.bits_per_pixel); 1178 describe_obj(m, fb->obj); 1179 seq_printf(m, "\n"); 1180 1181 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1182 if (&fb->base == ifbdev->helper.fb) 1183 continue; 1184 1185 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", 1186 fb->base.width, 1187 fb->base.height, 1188 fb->base.depth, 1189 fb->base.bits_per_pixel); 1190 describe_obj(m, fb->obj); 1191 seq_printf(m, "\n"); 1192 } 1193 1194 mutex_unlock(&dev->mode_config.mutex); 1195 1196 return 0; 1197 } 1198 1199 static int i915_context_status(struct seq_file *m, void *unused) 1200 { 1201 struct drm_info_node *node = (struct drm_info_node *) m->private; 1202 struct drm_device *dev = node->minor->dev; 1203 drm_i915_private_t *dev_priv = dev->dev_private; 1204 int ret; 1205 1206 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1207 if (ret) 1208 return ret; 1209 1210 if (dev_priv->pwrctx) { 1211 seq_printf(m, "power context "); 1212 describe_obj(m, dev_priv->pwrctx); 1213 seq_printf(m, "\n"); 1214 } 1215 1216 if (dev_priv->renderctx) { 1217 seq_printf(m, "render context "); 1218 describe_obj(m, dev_priv->renderctx); 1219 seq_printf(m, "\n"); 1220 } 1221 1222 mutex_unlock(&dev->mode_config.mutex); 1223 1224 return 0; 1225 } 1226 1227 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1228 { 1229 struct drm_info_node *node = (struct drm_info_node *) m->private; 1230 struct drm_device *dev = node->minor->dev; 1231 struct drm_i915_private *dev_priv = dev->dev_private; 1232 1233 seq_printf(m, "forcewake count = %d\n", 1234 atomic_read(&dev_priv->forcewake_count)); 1235 1236 return 0; 1237 } 1238 1239 static int 1240 i915_wedged_open(struct inode *inode, 1241 struct file *filp) 1242 { 1243 filp->private_data = inode->i_private; 1244 return 0; 1245 } 1246 1247 static ssize_t 1248 i915_wedged_read(struct file *filp, 1249 char __user *ubuf, 1250 size_t max, 1251 loff_t *ppos) 1252 { 1253 struct drm_device *dev = filp->private_data; 1254 drm_i915_private_t *dev_priv = dev->dev_private; 1255 char buf[80]; 1256 int len; 1257 1258 len = snprintf(buf, sizeof (buf), 1259 "wedged : %d\n", 1260 atomic_read(&dev_priv->mm.wedged)); 1261 1262 if (len > sizeof (buf)) 1263 len = sizeof (buf); 1264 1265 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1266 } 1267 1268 static ssize_t 1269 i915_wedged_write(struct file *filp, 1270 const char __user *ubuf, 1271 size_t cnt, 1272 loff_t *ppos) 1273 { 1274 struct drm_device *dev = filp->private_data; 1275 char buf[20]; 1276 int val = 1; 1277 1278 if (cnt > 0) { 1279 if (cnt > sizeof (buf) - 1) 1280 return -EINVAL; 1281 1282 if (copy_from_user(buf, ubuf, cnt)) 1283 return -EFAULT; 1284 buf[cnt] = 0; 1285 1286 val = simple_strtoul(buf, NULL, 0); 1287 } 1288 1289 DRM_INFO("Manually setting wedged to %d\n", val); 1290 i915_handle_error(dev, val); 1291 1292 return cnt; 1293 } 1294 1295 static const struct file_operations i915_wedged_fops = { 1296 .owner = THIS_MODULE, 1297 .open = i915_wedged_open, 1298 .read = i915_wedged_read, 1299 .write = i915_wedged_write, 1300 .llseek = default_llseek, 1301 }; 1302 1303 /* As the drm_debugfs_init() routines are called before dev->dev_private is 1304 * allocated we need to hook into the minor for release. */ 1305 static int 1306 drm_add_fake_info_node(struct drm_minor *minor, 1307 struct dentry *ent, 1308 const void *key) 1309 { 1310 struct drm_info_node *node; 1311 1312 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); 1313 if (node == NULL) { 1314 debugfs_remove(ent); 1315 return -ENOMEM; 1316 } 1317 1318 node->minor = minor; 1319 node->dent = ent; 1320 node->info_ent = (void *) key; 1321 list_add(&node->list, &minor->debugfs_nodes.list); 1322 1323 return 0; 1324 } 1325 1326 static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) 1327 { 1328 struct drm_device *dev = minor->dev; 1329 struct dentry *ent; 1330 1331 ent = debugfs_create_file("i915_wedged", 1332 S_IRUGO | S_IWUSR, 1333 root, dev, 1334 &i915_wedged_fops); 1335 if (IS_ERR(ent)) 1336 return PTR_ERR(ent); 1337 1338 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops); 1339 } 1340 1341 static int i915_forcewake_open(struct inode *inode, struct file *file) 1342 { 1343 struct drm_device *dev = inode->i_private; 1344 struct drm_i915_private *dev_priv = dev->dev_private; 1345 int ret; 1346 1347 if (!IS_GEN6(dev)) 1348 return 0; 1349 1350 ret = mutex_lock_interruptible(&dev->struct_mutex); 1351 if (ret) 1352 return ret; 1353 gen6_gt_force_wake_get(dev_priv); 1354 mutex_unlock(&dev->struct_mutex); 1355 1356 return 0; 1357 } 1358 1359 int i915_forcewake_release(struct inode *inode, struct file *file) 1360 { 1361 struct drm_device *dev = inode->i_private; 1362 struct drm_i915_private *dev_priv = dev->dev_private; 1363 1364 if (!IS_GEN6(dev)) 1365 return 0; 1366 1367 /* 1368 * It's bad that we can potentially hang userspace if struct_mutex gets 1369 * forever stuck. However, if we cannot acquire this lock it means that 1370 * almost certainly the driver has hung, is not unload-able. Therefore 1371 * hanging here is probably a minor inconvenience not to be seen my 1372 * almost every user. 1373 */ 1374 mutex_lock(&dev->struct_mutex); 1375 gen6_gt_force_wake_put(dev_priv); 1376 mutex_unlock(&dev->struct_mutex); 1377 1378 return 0; 1379 } 1380 1381 static const struct file_operations i915_forcewake_fops = { 1382 .owner = THIS_MODULE, 1383 .open = i915_forcewake_open, 1384 .release = i915_forcewake_release, 1385 }; 1386 1387 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 1388 { 1389 struct drm_device *dev = minor->dev; 1390 struct dentry *ent; 1391 1392 ent = debugfs_create_file("i915_forcewake_user", 1393 S_IRUSR, 1394 root, dev, 1395 &i915_forcewake_fops); 1396 if (IS_ERR(ent)) 1397 return PTR_ERR(ent); 1398 1399 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 1400 } 1401 1402 static struct drm_info_list i915_debugfs_list[] = { 1403 {"i915_capabilities", i915_capabilities, 0}, 1404 {"i915_gem_objects", i915_gem_object_info, 0}, 1405 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 1406 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 1407 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 1408 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 1409 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST}, 1410 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST}, 1411 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 1412 {"i915_gem_request", i915_gem_request_info, 0}, 1413 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1414 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 1415 {"i915_gem_interrupt", i915_interrupt_info, 0}, 1416 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 1417 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 1418 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 1419 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS}, 1420 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS}, 1421 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS}, 1422 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, 1423 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, 1424 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, 1425 {"i915_batchbuffers", i915_batchbuffer_info, 0}, 1426 {"i915_error_state", i915_error_state, 0}, 1427 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 1428 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 1429 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 1430 {"i915_inttoext_table", i915_inttoext_table, 0}, 1431 {"i915_drpc_info", i915_drpc_info, 0}, 1432 {"i915_emon_status", i915_emon_status, 0}, 1433 {"i915_gfxec", i915_gfxec, 0}, 1434 {"i915_fbc_status", i915_fbc_status, 0}, 1435 {"i915_sr_status", i915_sr_status, 0}, 1436 {"i915_opregion", i915_opregion, 0}, 1437 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 1438 {"i915_context_status", i915_context_status, 0}, 1439 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 1440 }; 1441 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 1442 1443 int i915_debugfs_init(struct drm_minor *minor) 1444 { 1445 int ret; 1446 1447 ret = i915_wedged_create(minor->debugfs_root, minor); 1448 if (ret) 1449 return ret; 1450 1451 ret = i915_forcewake_create(minor->debugfs_root, minor); 1452 if (ret) 1453 return ret; 1454 1455 return drm_debugfs_create_files(i915_debugfs_list, 1456 I915_DEBUGFS_ENTRIES, 1457 minor->debugfs_root, minor); 1458 } 1459 1460 void i915_debugfs_cleanup(struct drm_minor *minor) 1461 { 1462 drm_debugfs_remove_files(i915_debugfs_list, 1463 I915_DEBUGFS_ENTRIES, minor); 1464 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 1465 1, minor); 1466 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 1467 1, minor); 1468 } 1469 1470 #endif /* CONFIG_DEBUG_FS */ 1471