1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/circ_buf.h> 31 #include <linux/ctype.h> 32 #include <linux/debugfs.h> 33 #include <linux/slab.h> 34 #include <linux/export.h> 35 #include <linux/list_sort.h> 36 #include <asm/msr-index.h> 37 #include <drm/drmP.h> 38 #include "intel_drv.h" 39 #include "intel_ringbuffer.h" 40 #include <drm/i915_drm.h> 41 #include "i915_drv.h" 42 43 enum { 44 ACTIVE_LIST, 45 INACTIVE_LIST, 46 PINNED_LIST, 47 }; 48 49 static const char *yesno(int v) 50 { 51 return v ? "yes" : "no"; 52 } 53 54 /* As the drm_debugfs_init() routines are called before dev->dev_private is 55 * allocated we need to hook into the minor for release. */ 56 static int 57 drm_add_fake_info_node(struct drm_minor *minor, 58 struct dentry *ent, 59 const void *key) 60 { 61 struct drm_info_node *node; 62 63 node = kmalloc(sizeof(*node), GFP_KERNEL); 64 if (node == NULL) { 65 debugfs_remove(ent); 66 return -ENOMEM; 67 } 68 69 node->minor = minor; 70 node->dent = ent; 71 node->info_ent = (void *) key; 72 73 mutex_lock(&minor->debugfs_lock); 74 list_add(&node->list, &minor->debugfs_list); 75 mutex_unlock(&minor->debugfs_lock); 76 77 return 0; 78 } 79 80 static int i915_capabilities(struct seq_file *m, void *data) 81 { 82 struct drm_info_node *node = m->private; 83 struct drm_device *dev = node->minor->dev; 84 const struct intel_device_info *info = INTEL_INFO(dev); 85 86 seq_printf(m, "gen: %d\n", info->gen); 87 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 88 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 89 #define SEP_SEMICOLON ; 90 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON); 91 #undef PRINT_FLAG 92 #undef SEP_SEMICOLON 93 94 return 0; 95 } 96 97 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 98 { 99 if (obj->user_pin_count > 0) 100 return "P"; 101 else if (i915_gem_obj_is_pinned(obj)) 102 return "p"; 103 else 104 return " "; 105 } 106 107 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 108 { 109 switch (obj->tiling_mode) { 110 default: 111 case I915_TILING_NONE: return " "; 112 case I915_TILING_X: return "X"; 113 case I915_TILING_Y: return "Y"; 114 } 115 } 116 117 static inline const char *get_global_flag(struct drm_i915_gem_object *obj) 118 { 119 return obj->has_global_gtt_mapping ? "g" : " "; 120 } 121 122 static void 123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 124 { 125 struct i915_vma *vma; 126 int pin_count = 0; 127 128 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s", 129 &obj->base, 130 get_pin_flag(obj), 131 get_tiling_flag(obj), 132 get_global_flag(obj), 133 obj->base.size / 1024, 134 obj->base.read_domains, 135 obj->base.write_domain, 136 obj->last_read_seqno, 137 obj->last_write_seqno, 138 obj->last_fenced_seqno, 139 i915_cache_level_str(obj->cache_level), 140 obj->dirty ? " dirty" : "", 141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 142 if (obj->base.name) 143 seq_printf(m, " (name: %d)", obj->base.name); 144 list_for_each_entry(vma, &obj->vma_list, vma_link) 145 if (vma->pin_count > 0) 146 pin_count++; 147 seq_printf(m, " (pinned x %d)", pin_count); 148 if (obj->pin_display) 149 seq_printf(m, " (display)"); 150 if (obj->fence_reg != I915_FENCE_REG_NONE) 151 seq_printf(m, " (fence: %d)", obj->fence_reg); 152 list_for_each_entry(vma, &obj->vma_list, vma_link) { 153 if (!i915_is_ggtt(vma->vm)) 154 seq_puts(m, " (pp"); 155 else 156 seq_puts(m, " (g"); 157 seq_printf(m, "gtt offset: %08lx, size: %08lx)", 158 vma->node.start, vma->node.size); 159 } 160 if (obj->stolen) 161 seq_printf(m, " (stolen: %08lx)", obj->stolen->start); 162 if (obj->pin_mappable || obj->fault_mappable) { 163 char s[3], *t = s; 164 if (obj->pin_mappable) 165 *t++ = 'p'; 166 if (obj->fault_mappable) 167 *t++ = 'f'; 168 *t = '\0'; 169 seq_printf(m, " (%s mappable)", s); 170 } 171 if (obj->ring != NULL) 172 seq_printf(m, " (%s)", obj->ring->name); 173 } 174 175 static void describe_ctx(struct seq_file *m, struct intel_context *ctx) 176 { 177 seq_putc(m, ctx->is_initialized ? 'I' : 'i'); 178 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 179 seq_putc(m, ' '); 180 } 181 182 static int i915_gem_object_list_info(struct seq_file *m, void *data) 183 { 184 struct drm_info_node *node = m->private; 185 uintptr_t list = (uintptr_t) node->info_ent->data; 186 struct list_head *head; 187 struct drm_device *dev = node->minor->dev; 188 struct drm_i915_private *dev_priv = dev->dev_private; 189 struct i915_address_space *vm = &dev_priv->gtt.base; 190 struct i915_vma *vma; 191 size_t total_obj_size, total_gtt_size; 192 int count, ret; 193 194 ret = mutex_lock_interruptible(&dev->struct_mutex); 195 if (ret) 196 return ret; 197 198 /* FIXME: the user of this interface might want more than just GGTT */ 199 switch (list) { 200 case ACTIVE_LIST: 201 seq_puts(m, "Active:\n"); 202 head = &vm->active_list; 203 break; 204 case INACTIVE_LIST: 205 seq_puts(m, "Inactive:\n"); 206 head = &vm->inactive_list; 207 break; 208 default: 209 mutex_unlock(&dev->struct_mutex); 210 return -EINVAL; 211 } 212 213 total_obj_size = total_gtt_size = count = 0; 214 list_for_each_entry(vma, head, mm_list) { 215 seq_printf(m, " "); 216 describe_obj(m, vma->obj); 217 seq_printf(m, "\n"); 218 total_obj_size += vma->obj->base.size; 219 total_gtt_size += vma->node.size; 220 count++; 221 } 222 mutex_unlock(&dev->struct_mutex); 223 224 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 225 count, total_obj_size, total_gtt_size); 226 return 0; 227 } 228 229 static int obj_rank_by_stolen(void *priv, 230 struct list_head *A, struct list_head *B) 231 { 232 struct drm_i915_gem_object *a = 233 container_of(A, struct drm_i915_gem_object, obj_exec_link); 234 struct drm_i915_gem_object *b = 235 container_of(B, struct drm_i915_gem_object, obj_exec_link); 236 237 return a->stolen->start - b->stolen->start; 238 } 239 240 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 241 { 242 struct drm_info_node *node = m->private; 243 struct drm_device *dev = node->minor->dev; 244 struct drm_i915_private *dev_priv = dev->dev_private; 245 struct drm_i915_gem_object *obj; 246 size_t total_obj_size, total_gtt_size; 247 LIST_HEAD(stolen); 248 int count, ret; 249 250 ret = mutex_lock_interruptible(&dev->struct_mutex); 251 if (ret) 252 return ret; 253 254 total_obj_size = total_gtt_size = count = 0; 255 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 256 if (obj->stolen == NULL) 257 continue; 258 259 list_add(&obj->obj_exec_link, &stolen); 260 261 total_obj_size += obj->base.size; 262 total_gtt_size += i915_gem_obj_ggtt_size(obj); 263 count++; 264 } 265 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 266 if (obj->stolen == NULL) 267 continue; 268 269 list_add(&obj->obj_exec_link, &stolen); 270 271 total_obj_size += obj->base.size; 272 count++; 273 } 274 list_sort(NULL, &stolen, obj_rank_by_stolen); 275 seq_puts(m, "Stolen:\n"); 276 while (!list_empty(&stolen)) { 277 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link); 278 seq_puts(m, " "); 279 describe_obj(m, obj); 280 seq_putc(m, '\n'); 281 list_del_init(&obj->obj_exec_link); 282 } 283 mutex_unlock(&dev->struct_mutex); 284 285 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 286 count, total_obj_size, total_gtt_size); 287 return 0; 288 } 289 290 #define count_objects(list, member) do { \ 291 list_for_each_entry(obj, list, member) { \ 292 size += i915_gem_obj_ggtt_size(obj); \ 293 ++count; \ 294 if (obj->map_and_fenceable) { \ 295 mappable_size += i915_gem_obj_ggtt_size(obj); \ 296 ++mappable_count; \ 297 } \ 298 } \ 299 } while (0) 300 301 struct file_stats { 302 struct drm_i915_file_private *file_priv; 303 int count; 304 size_t total, unbound; 305 size_t global, shared; 306 size_t active, inactive; 307 }; 308 309 static int per_file_stats(int id, void *ptr, void *data) 310 { 311 struct drm_i915_gem_object *obj = ptr; 312 struct file_stats *stats = data; 313 struct i915_vma *vma; 314 315 stats->count++; 316 stats->total += obj->base.size; 317 318 if (obj->base.name || obj->base.dma_buf) 319 stats->shared += obj->base.size; 320 321 if (USES_FULL_PPGTT(obj->base.dev)) { 322 list_for_each_entry(vma, &obj->vma_list, vma_link) { 323 struct i915_hw_ppgtt *ppgtt; 324 325 if (!drm_mm_node_allocated(&vma->node)) 326 continue; 327 328 if (i915_is_ggtt(vma->vm)) { 329 stats->global += obj->base.size; 330 continue; 331 } 332 333 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); 334 if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv) 335 continue; 336 337 if (obj->ring) /* XXX per-vma statistic */ 338 stats->active += obj->base.size; 339 else 340 stats->inactive += obj->base.size; 341 342 return 0; 343 } 344 } else { 345 if (i915_gem_obj_ggtt_bound(obj)) { 346 stats->global += obj->base.size; 347 if (obj->ring) 348 stats->active += obj->base.size; 349 else 350 stats->inactive += obj->base.size; 351 return 0; 352 } 353 } 354 355 if (!list_empty(&obj->global_list)) 356 stats->unbound += obj->base.size; 357 358 return 0; 359 } 360 361 #define count_vmas(list, member) do { \ 362 list_for_each_entry(vma, list, member) { \ 363 size += i915_gem_obj_ggtt_size(vma->obj); \ 364 ++count; \ 365 if (vma->obj->map_and_fenceable) { \ 366 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ 367 ++mappable_count; \ 368 } \ 369 } \ 370 } while (0) 371 372 static int i915_gem_object_info(struct seq_file *m, void* data) 373 { 374 struct drm_info_node *node = m->private; 375 struct drm_device *dev = node->minor->dev; 376 struct drm_i915_private *dev_priv = dev->dev_private; 377 u32 count, mappable_count, purgeable_count; 378 size_t size, mappable_size, purgeable_size; 379 struct drm_i915_gem_object *obj; 380 struct i915_address_space *vm = &dev_priv->gtt.base; 381 struct drm_file *file; 382 struct i915_vma *vma; 383 int ret; 384 385 ret = mutex_lock_interruptible(&dev->struct_mutex); 386 if (ret) 387 return ret; 388 389 seq_printf(m, "%u objects, %zu bytes\n", 390 dev_priv->mm.object_count, 391 dev_priv->mm.object_memory); 392 393 size = count = mappable_size = mappable_count = 0; 394 count_objects(&dev_priv->mm.bound_list, global_list); 395 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 396 count, mappable_count, size, mappable_size); 397 398 size = count = mappable_size = mappable_count = 0; 399 count_vmas(&vm->active_list, mm_list); 400 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 401 count, mappable_count, size, mappable_size); 402 403 size = count = mappable_size = mappable_count = 0; 404 count_vmas(&vm->inactive_list, mm_list); 405 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 406 count, mappable_count, size, mappable_size); 407 408 size = count = purgeable_size = purgeable_count = 0; 409 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 410 size += obj->base.size, ++count; 411 if (obj->madv == I915_MADV_DONTNEED) 412 purgeable_size += obj->base.size, ++purgeable_count; 413 } 414 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); 415 416 size = count = mappable_size = mappable_count = 0; 417 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 418 if (obj->fault_mappable) { 419 size += i915_gem_obj_ggtt_size(obj); 420 ++count; 421 } 422 if (obj->pin_mappable) { 423 mappable_size += i915_gem_obj_ggtt_size(obj); 424 ++mappable_count; 425 } 426 if (obj->madv == I915_MADV_DONTNEED) { 427 purgeable_size += obj->base.size; 428 ++purgeable_count; 429 } 430 } 431 seq_printf(m, "%u purgeable objects, %zu bytes\n", 432 purgeable_count, purgeable_size); 433 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 434 mappable_count, mappable_size); 435 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 436 count, size); 437 438 seq_printf(m, "%zu [%lu] gtt total\n", 439 dev_priv->gtt.base.total, 440 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); 441 442 seq_putc(m, '\n'); 443 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 444 struct file_stats stats; 445 struct task_struct *task; 446 447 memset(&stats, 0, sizeof(stats)); 448 stats.file_priv = file->driver_priv; 449 idr_for_each(&file->object_idr, per_file_stats, &stats); 450 /* 451 * Although we have a valid reference on file->pid, that does 452 * not guarantee that the task_struct who called get_pid() is 453 * still alive (e.g. get_pid(current) => fork() => exit()). 454 * Therefore, we need to protect this ->comm access using RCU. 455 */ 456 rcu_read_lock(); 457 task = pid_task(file->pid, PIDTYPE_PID); 458 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", 459 task ? task->comm : "<unknown>", 460 stats.count, 461 stats.total, 462 stats.active, 463 stats.inactive, 464 stats.global, 465 stats.shared, 466 stats.unbound); 467 rcu_read_unlock(); 468 } 469 470 mutex_unlock(&dev->struct_mutex); 471 472 return 0; 473 } 474 475 static int i915_gem_gtt_info(struct seq_file *m, void *data) 476 { 477 struct drm_info_node *node = m->private; 478 struct drm_device *dev = node->minor->dev; 479 uintptr_t list = (uintptr_t) node->info_ent->data; 480 struct drm_i915_private *dev_priv = dev->dev_private; 481 struct drm_i915_gem_object *obj; 482 size_t total_obj_size, total_gtt_size; 483 int count, ret; 484 485 ret = mutex_lock_interruptible(&dev->struct_mutex); 486 if (ret) 487 return ret; 488 489 total_obj_size = total_gtt_size = count = 0; 490 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 491 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj)) 492 continue; 493 494 seq_puts(m, " "); 495 describe_obj(m, obj); 496 seq_putc(m, '\n'); 497 total_obj_size += obj->base.size; 498 total_gtt_size += i915_gem_obj_ggtt_size(obj); 499 count++; 500 } 501 502 mutex_unlock(&dev->struct_mutex); 503 504 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 505 count, total_obj_size, total_gtt_size); 506 507 return 0; 508 } 509 510 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 511 { 512 struct drm_info_node *node = m->private; 513 struct drm_device *dev = node->minor->dev; 514 unsigned long flags; 515 struct intel_crtc *crtc; 516 517 for_each_intel_crtc(dev, crtc) { 518 const char pipe = pipe_name(crtc->pipe); 519 const char plane = plane_name(crtc->plane); 520 struct intel_unpin_work *work; 521 522 spin_lock_irqsave(&dev->event_lock, flags); 523 work = crtc->unpin_work; 524 if (work == NULL) { 525 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 526 pipe, plane); 527 } else { 528 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 529 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 530 pipe, plane); 531 } else { 532 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 533 pipe, plane); 534 } 535 if (work->enable_stall_check) 536 seq_puts(m, "Stall check enabled, "); 537 else 538 seq_puts(m, "Stall check waiting for page flip ioctl, "); 539 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 540 541 if (work->old_fb_obj) { 542 struct drm_i915_gem_object *obj = work->old_fb_obj; 543 if (obj) 544 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n", 545 i915_gem_obj_ggtt_offset(obj)); 546 } 547 if (work->pending_flip_obj) { 548 struct drm_i915_gem_object *obj = work->pending_flip_obj; 549 if (obj) 550 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n", 551 i915_gem_obj_ggtt_offset(obj)); 552 } 553 } 554 spin_unlock_irqrestore(&dev->event_lock, flags); 555 } 556 557 return 0; 558 } 559 560 static int i915_gem_request_info(struct seq_file *m, void *data) 561 { 562 struct drm_info_node *node = m->private; 563 struct drm_device *dev = node->minor->dev; 564 struct drm_i915_private *dev_priv = dev->dev_private; 565 struct intel_engine_cs *ring; 566 struct drm_i915_gem_request *gem_request; 567 int ret, count, i; 568 569 ret = mutex_lock_interruptible(&dev->struct_mutex); 570 if (ret) 571 return ret; 572 573 count = 0; 574 for_each_ring(ring, dev_priv, i) { 575 if (list_empty(&ring->request_list)) 576 continue; 577 578 seq_printf(m, "%s requests:\n", ring->name); 579 list_for_each_entry(gem_request, 580 &ring->request_list, 581 list) { 582 seq_printf(m, " %d @ %d\n", 583 gem_request->seqno, 584 (int) (jiffies - gem_request->emitted_jiffies)); 585 } 586 count++; 587 } 588 mutex_unlock(&dev->struct_mutex); 589 590 if (count == 0) 591 seq_puts(m, "No requests\n"); 592 593 return 0; 594 } 595 596 static void i915_ring_seqno_info(struct seq_file *m, 597 struct intel_engine_cs *ring) 598 { 599 if (ring->get_seqno) { 600 seq_printf(m, "Current sequence (%s): %u\n", 601 ring->name, ring->get_seqno(ring, false)); 602 } 603 } 604 605 static int i915_gem_seqno_info(struct seq_file *m, void *data) 606 { 607 struct drm_info_node *node = m->private; 608 struct drm_device *dev = node->minor->dev; 609 struct drm_i915_private *dev_priv = dev->dev_private; 610 struct intel_engine_cs *ring; 611 int ret, i; 612 613 ret = mutex_lock_interruptible(&dev->struct_mutex); 614 if (ret) 615 return ret; 616 intel_runtime_pm_get(dev_priv); 617 618 for_each_ring(ring, dev_priv, i) 619 i915_ring_seqno_info(m, ring); 620 621 intel_runtime_pm_put(dev_priv); 622 mutex_unlock(&dev->struct_mutex); 623 624 return 0; 625 } 626 627 628 static int i915_interrupt_info(struct seq_file *m, void *data) 629 { 630 struct drm_info_node *node = m->private; 631 struct drm_device *dev = node->minor->dev; 632 struct drm_i915_private *dev_priv = dev->dev_private; 633 struct intel_engine_cs *ring; 634 int ret, i, pipe; 635 636 ret = mutex_lock_interruptible(&dev->struct_mutex); 637 if (ret) 638 return ret; 639 intel_runtime_pm_get(dev_priv); 640 641 if (IS_CHERRYVIEW(dev)) { 642 int i; 643 seq_printf(m, "Master Interrupt Control:\t%08x\n", 644 I915_READ(GEN8_MASTER_IRQ)); 645 646 seq_printf(m, "Display IER:\t%08x\n", 647 I915_READ(VLV_IER)); 648 seq_printf(m, "Display IIR:\t%08x\n", 649 I915_READ(VLV_IIR)); 650 seq_printf(m, "Display IIR_RW:\t%08x\n", 651 I915_READ(VLV_IIR_RW)); 652 seq_printf(m, "Display IMR:\t%08x\n", 653 I915_READ(VLV_IMR)); 654 for_each_pipe(pipe) 655 seq_printf(m, "Pipe %c stat:\t%08x\n", 656 pipe_name(pipe), 657 I915_READ(PIPESTAT(pipe))); 658 659 seq_printf(m, "Port hotplug:\t%08x\n", 660 I915_READ(PORT_HOTPLUG_EN)); 661 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 662 I915_READ(VLV_DPFLIPSTAT)); 663 seq_printf(m, "DPINVGTT:\t%08x\n", 664 I915_READ(DPINVGTT)); 665 666 for (i = 0; i < 4; i++) { 667 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 668 i, I915_READ(GEN8_GT_IMR(i))); 669 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 670 i, I915_READ(GEN8_GT_IIR(i))); 671 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 672 i, I915_READ(GEN8_GT_IER(i))); 673 } 674 675 seq_printf(m, "PCU interrupt mask:\t%08x\n", 676 I915_READ(GEN8_PCU_IMR)); 677 seq_printf(m, "PCU interrupt identity:\t%08x\n", 678 I915_READ(GEN8_PCU_IIR)); 679 seq_printf(m, "PCU interrupt enable:\t%08x\n", 680 I915_READ(GEN8_PCU_IER)); 681 } else if (INTEL_INFO(dev)->gen >= 8) { 682 seq_printf(m, "Master Interrupt Control:\t%08x\n", 683 I915_READ(GEN8_MASTER_IRQ)); 684 685 for (i = 0; i < 4; i++) { 686 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 687 i, I915_READ(GEN8_GT_IMR(i))); 688 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 689 i, I915_READ(GEN8_GT_IIR(i))); 690 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 691 i, I915_READ(GEN8_GT_IER(i))); 692 } 693 694 for_each_pipe(pipe) { 695 seq_printf(m, "Pipe %c IMR:\t%08x\n", 696 pipe_name(pipe), 697 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 698 seq_printf(m, "Pipe %c IIR:\t%08x\n", 699 pipe_name(pipe), 700 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 701 seq_printf(m, "Pipe %c IER:\t%08x\n", 702 pipe_name(pipe), 703 I915_READ(GEN8_DE_PIPE_IER(pipe))); 704 } 705 706 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 707 I915_READ(GEN8_DE_PORT_IMR)); 708 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 709 I915_READ(GEN8_DE_PORT_IIR)); 710 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 711 I915_READ(GEN8_DE_PORT_IER)); 712 713 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 714 I915_READ(GEN8_DE_MISC_IMR)); 715 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 716 I915_READ(GEN8_DE_MISC_IIR)); 717 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 718 I915_READ(GEN8_DE_MISC_IER)); 719 720 seq_printf(m, "PCU interrupt mask:\t%08x\n", 721 I915_READ(GEN8_PCU_IMR)); 722 seq_printf(m, "PCU interrupt identity:\t%08x\n", 723 I915_READ(GEN8_PCU_IIR)); 724 seq_printf(m, "PCU interrupt enable:\t%08x\n", 725 I915_READ(GEN8_PCU_IER)); 726 } else if (IS_VALLEYVIEW(dev)) { 727 seq_printf(m, "Display IER:\t%08x\n", 728 I915_READ(VLV_IER)); 729 seq_printf(m, "Display IIR:\t%08x\n", 730 I915_READ(VLV_IIR)); 731 seq_printf(m, "Display IIR_RW:\t%08x\n", 732 I915_READ(VLV_IIR_RW)); 733 seq_printf(m, "Display IMR:\t%08x\n", 734 I915_READ(VLV_IMR)); 735 for_each_pipe(pipe) 736 seq_printf(m, "Pipe %c stat:\t%08x\n", 737 pipe_name(pipe), 738 I915_READ(PIPESTAT(pipe))); 739 740 seq_printf(m, "Master IER:\t%08x\n", 741 I915_READ(VLV_MASTER_IER)); 742 743 seq_printf(m, "Render IER:\t%08x\n", 744 I915_READ(GTIER)); 745 seq_printf(m, "Render IIR:\t%08x\n", 746 I915_READ(GTIIR)); 747 seq_printf(m, "Render IMR:\t%08x\n", 748 I915_READ(GTIMR)); 749 750 seq_printf(m, "PM IER:\t\t%08x\n", 751 I915_READ(GEN6_PMIER)); 752 seq_printf(m, "PM IIR:\t\t%08x\n", 753 I915_READ(GEN6_PMIIR)); 754 seq_printf(m, "PM IMR:\t\t%08x\n", 755 I915_READ(GEN6_PMIMR)); 756 757 seq_printf(m, "Port hotplug:\t%08x\n", 758 I915_READ(PORT_HOTPLUG_EN)); 759 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 760 I915_READ(VLV_DPFLIPSTAT)); 761 seq_printf(m, "DPINVGTT:\t%08x\n", 762 I915_READ(DPINVGTT)); 763 764 } else if (!HAS_PCH_SPLIT(dev)) { 765 seq_printf(m, "Interrupt enable: %08x\n", 766 I915_READ(IER)); 767 seq_printf(m, "Interrupt identity: %08x\n", 768 I915_READ(IIR)); 769 seq_printf(m, "Interrupt mask: %08x\n", 770 I915_READ(IMR)); 771 for_each_pipe(pipe) 772 seq_printf(m, "Pipe %c stat: %08x\n", 773 pipe_name(pipe), 774 I915_READ(PIPESTAT(pipe))); 775 } else { 776 seq_printf(m, "North Display Interrupt enable: %08x\n", 777 I915_READ(DEIER)); 778 seq_printf(m, "North Display Interrupt identity: %08x\n", 779 I915_READ(DEIIR)); 780 seq_printf(m, "North Display Interrupt mask: %08x\n", 781 I915_READ(DEIMR)); 782 seq_printf(m, "South Display Interrupt enable: %08x\n", 783 I915_READ(SDEIER)); 784 seq_printf(m, "South Display Interrupt identity: %08x\n", 785 I915_READ(SDEIIR)); 786 seq_printf(m, "South Display Interrupt mask: %08x\n", 787 I915_READ(SDEIMR)); 788 seq_printf(m, "Graphics Interrupt enable: %08x\n", 789 I915_READ(GTIER)); 790 seq_printf(m, "Graphics Interrupt identity: %08x\n", 791 I915_READ(GTIIR)); 792 seq_printf(m, "Graphics Interrupt mask: %08x\n", 793 I915_READ(GTIMR)); 794 } 795 for_each_ring(ring, dev_priv, i) { 796 if (INTEL_INFO(dev)->gen >= 6) { 797 seq_printf(m, 798 "Graphics Interrupt mask (%s): %08x\n", 799 ring->name, I915_READ_IMR(ring)); 800 } 801 i915_ring_seqno_info(m, ring); 802 } 803 intel_runtime_pm_put(dev_priv); 804 mutex_unlock(&dev->struct_mutex); 805 806 return 0; 807 } 808 809 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 810 { 811 struct drm_info_node *node = m->private; 812 struct drm_device *dev = node->minor->dev; 813 struct drm_i915_private *dev_priv = dev->dev_private; 814 int i, ret; 815 816 ret = mutex_lock_interruptible(&dev->struct_mutex); 817 if (ret) 818 return ret; 819 820 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 821 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 822 for (i = 0; i < dev_priv->num_fence_regs; i++) { 823 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 824 825 seq_printf(m, "Fence %d, pin count = %d, object = ", 826 i, dev_priv->fence_regs[i].pin_count); 827 if (obj == NULL) 828 seq_puts(m, "unused"); 829 else 830 describe_obj(m, obj); 831 seq_putc(m, '\n'); 832 } 833 834 mutex_unlock(&dev->struct_mutex); 835 return 0; 836 } 837 838 static int i915_hws_info(struct seq_file *m, void *data) 839 { 840 struct drm_info_node *node = m->private; 841 struct drm_device *dev = node->minor->dev; 842 struct drm_i915_private *dev_priv = dev->dev_private; 843 struct intel_engine_cs *ring; 844 const u32 *hws; 845 int i; 846 847 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 848 hws = ring->status_page.page_addr; 849 if (hws == NULL) 850 return 0; 851 852 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 853 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 854 i * 4, 855 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 856 } 857 return 0; 858 } 859 860 static ssize_t 861 i915_error_state_write(struct file *filp, 862 const char __user *ubuf, 863 size_t cnt, 864 loff_t *ppos) 865 { 866 struct i915_error_state_file_priv *error_priv = filp->private_data; 867 struct drm_device *dev = error_priv->dev; 868 int ret; 869 870 DRM_DEBUG_DRIVER("Resetting error state\n"); 871 872 ret = mutex_lock_interruptible(&dev->struct_mutex); 873 if (ret) 874 return ret; 875 876 i915_destroy_error_state(dev); 877 mutex_unlock(&dev->struct_mutex); 878 879 return cnt; 880 } 881 882 static int i915_error_state_open(struct inode *inode, struct file *file) 883 { 884 struct drm_device *dev = inode->i_private; 885 struct i915_error_state_file_priv *error_priv; 886 887 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 888 if (!error_priv) 889 return -ENOMEM; 890 891 error_priv->dev = dev; 892 893 i915_error_state_get(dev, error_priv); 894 895 file->private_data = error_priv; 896 897 return 0; 898 } 899 900 static int i915_error_state_release(struct inode *inode, struct file *file) 901 { 902 struct i915_error_state_file_priv *error_priv = file->private_data; 903 904 i915_error_state_put(error_priv); 905 kfree(error_priv); 906 907 return 0; 908 } 909 910 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, 911 size_t count, loff_t *pos) 912 { 913 struct i915_error_state_file_priv *error_priv = file->private_data; 914 struct drm_i915_error_state_buf error_str; 915 loff_t tmp_pos = 0; 916 ssize_t ret_count = 0; 917 int ret; 918 919 ret = i915_error_state_buf_init(&error_str, count, *pos); 920 if (ret) 921 return ret; 922 923 ret = i915_error_state_to_str(&error_str, error_priv); 924 if (ret) 925 goto out; 926 927 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, 928 error_str.buf, 929 error_str.bytes); 930 931 if (ret_count < 0) 932 ret = ret_count; 933 else 934 *pos = error_str.start + ret_count; 935 out: 936 i915_error_state_buf_release(&error_str); 937 return ret ?: ret_count; 938 } 939 940 static const struct file_operations i915_error_state_fops = { 941 .owner = THIS_MODULE, 942 .open = i915_error_state_open, 943 .read = i915_error_state_read, 944 .write = i915_error_state_write, 945 .llseek = default_llseek, 946 .release = i915_error_state_release, 947 }; 948 949 static int 950 i915_next_seqno_get(void *data, u64 *val) 951 { 952 struct drm_device *dev = data; 953 struct drm_i915_private *dev_priv = dev->dev_private; 954 int ret; 955 956 ret = mutex_lock_interruptible(&dev->struct_mutex); 957 if (ret) 958 return ret; 959 960 *val = dev_priv->next_seqno; 961 mutex_unlock(&dev->struct_mutex); 962 963 return 0; 964 } 965 966 static int 967 i915_next_seqno_set(void *data, u64 val) 968 { 969 struct drm_device *dev = data; 970 int ret; 971 972 ret = mutex_lock_interruptible(&dev->struct_mutex); 973 if (ret) 974 return ret; 975 976 ret = i915_gem_set_seqno(dev, val); 977 mutex_unlock(&dev->struct_mutex); 978 979 return ret; 980 } 981 982 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 983 i915_next_seqno_get, i915_next_seqno_set, 984 "0x%llx\n"); 985 986 static int i915_rstdby_delays(struct seq_file *m, void *unused) 987 { 988 struct drm_info_node *node = m->private; 989 struct drm_device *dev = node->minor->dev; 990 struct drm_i915_private *dev_priv = dev->dev_private; 991 u16 crstanddelay; 992 int ret; 993 994 ret = mutex_lock_interruptible(&dev->struct_mutex); 995 if (ret) 996 return ret; 997 intel_runtime_pm_get(dev_priv); 998 999 crstanddelay = I915_READ16(CRSTANDVID); 1000 1001 intel_runtime_pm_put(dev_priv); 1002 mutex_unlock(&dev->struct_mutex); 1003 1004 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 1005 1006 return 0; 1007 } 1008 1009 static int i915_frequency_info(struct seq_file *m, void *unused) 1010 { 1011 struct drm_info_node *node = m->private; 1012 struct drm_device *dev = node->minor->dev; 1013 struct drm_i915_private *dev_priv = dev->dev_private; 1014 int ret = 0; 1015 1016 intel_runtime_pm_get(dev_priv); 1017 1018 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1019 1020 if (IS_GEN5(dev)) { 1021 u16 rgvswctl = I915_READ16(MEMSWCTL); 1022 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1023 1024 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 1025 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 1026 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 1027 MEMSTAT_VID_SHIFT); 1028 seq_printf(m, "Current P-state: %d\n", 1029 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1030 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { 1031 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1032 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1033 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1034 u32 rpmodectl, rpinclimit, rpdeclimit; 1035 u32 rpstat, cagf, reqf; 1036 u32 rpupei, rpcurup, rpprevup; 1037 u32 rpdownei, rpcurdown, rpprevdown; 1038 int max_freq; 1039 1040 /* RPSTAT1 is in the GT power well */ 1041 ret = mutex_lock_interruptible(&dev->struct_mutex); 1042 if (ret) 1043 goto out; 1044 1045 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 1046 1047 reqf = I915_READ(GEN6_RPNSWREQ); 1048 reqf &= ~GEN6_TURBO_DISABLE; 1049 if (IS_HASWELL(dev)) 1050 reqf >>= 24; 1051 else 1052 reqf >>= 25; 1053 reqf *= GT_FREQUENCY_MULTIPLIER; 1054 1055 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1056 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 1057 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 1058 1059 rpstat = I915_READ(GEN6_RPSTAT1); 1060 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 1061 rpcurup = I915_READ(GEN6_RP_CUR_UP); 1062 rpprevup = I915_READ(GEN6_RP_PREV_UP); 1063 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 1064 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 1065 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 1066 if (IS_HASWELL(dev)) 1067 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1068 else 1069 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1070 cagf *= GT_FREQUENCY_MULTIPLIER; 1071 1072 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 1073 mutex_unlock(&dev->struct_mutex); 1074 1075 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1076 I915_READ(GEN6_PMIER), 1077 I915_READ(GEN6_PMIMR), 1078 I915_READ(GEN6_PMISR), 1079 I915_READ(GEN6_PMIIR), 1080 I915_READ(GEN6_PMINTRMSK)); 1081 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1082 seq_printf(m, "Render p-state ratio: %d\n", 1083 (gt_perf_status & 0xff00) >> 8); 1084 seq_printf(m, "Render p-state VID: %d\n", 1085 gt_perf_status & 0xff); 1086 seq_printf(m, "Render p-state limit: %d\n", 1087 rp_state_limits & 0xff); 1088 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1089 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 1090 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 1091 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 1092 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1093 seq_printf(m, "CAGF: %dMHz\n", cagf); 1094 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 1095 GEN6_CURICONT_MASK); 1096 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 1097 GEN6_CURBSYTAVG_MASK); 1098 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 1099 GEN6_CURBSYTAVG_MASK); 1100 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 1101 GEN6_CURIAVG_MASK); 1102 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 1103 GEN6_CURBSYTAVG_MASK); 1104 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 1105 GEN6_CURBSYTAVG_MASK); 1106 1107 max_freq = (rp_state_cap & 0xff0000) >> 16; 1108 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1109 max_freq * GT_FREQUENCY_MULTIPLIER); 1110 1111 max_freq = (rp_state_cap & 0xff00) >> 8; 1112 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1113 max_freq * GT_FREQUENCY_MULTIPLIER); 1114 1115 max_freq = rp_state_cap & 0xff; 1116 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1117 max_freq * GT_FREQUENCY_MULTIPLIER); 1118 1119 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1120 dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER); 1121 } else if (IS_VALLEYVIEW(dev)) { 1122 u32 freq_sts, val; 1123 1124 mutex_lock(&dev_priv->rps.hw_lock); 1125 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1126 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1127 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1128 1129 val = valleyview_rps_max_freq(dev_priv); 1130 seq_printf(m, "max GPU freq: %d MHz\n", 1131 vlv_gpu_freq(dev_priv, val)); 1132 1133 val = valleyview_rps_min_freq(dev_priv); 1134 seq_printf(m, "min GPU freq: %d MHz\n", 1135 vlv_gpu_freq(dev_priv, val)); 1136 1137 seq_printf(m, "current GPU freq: %d MHz\n", 1138 vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1139 mutex_unlock(&dev_priv->rps.hw_lock); 1140 } else { 1141 seq_puts(m, "no P-state info available\n"); 1142 } 1143 1144 out: 1145 intel_runtime_pm_put(dev_priv); 1146 return ret; 1147 } 1148 1149 static int i915_delayfreq_table(struct seq_file *m, void *unused) 1150 { 1151 struct drm_info_node *node = m->private; 1152 struct drm_device *dev = node->minor->dev; 1153 struct drm_i915_private *dev_priv = dev->dev_private; 1154 u32 delayfreq; 1155 int ret, i; 1156 1157 ret = mutex_lock_interruptible(&dev->struct_mutex); 1158 if (ret) 1159 return ret; 1160 intel_runtime_pm_get(dev_priv); 1161 1162 for (i = 0; i < 16; i++) { 1163 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 1164 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 1165 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 1166 } 1167 1168 intel_runtime_pm_put(dev_priv); 1169 1170 mutex_unlock(&dev->struct_mutex); 1171 1172 return 0; 1173 } 1174 1175 static inline int MAP_TO_MV(int map) 1176 { 1177 return 1250 - (map * 25); 1178 } 1179 1180 static int i915_inttoext_table(struct seq_file *m, void *unused) 1181 { 1182 struct drm_info_node *node = m->private; 1183 struct drm_device *dev = node->minor->dev; 1184 struct drm_i915_private *dev_priv = dev->dev_private; 1185 u32 inttoext; 1186 int ret, i; 1187 1188 ret = mutex_lock_interruptible(&dev->struct_mutex); 1189 if (ret) 1190 return ret; 1191 intel_runtime_pm_get(dev_priv); 1192 1193 for (i = 1; i <= 32; i++) { 1194 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 1195 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 1196 } 1197 1198 intel_runtime_pm_put(dev_priv); 1199 mutex_unlock(&dev->struct_mutex); 1200 1201 return 0; 1202 } 1203 1204 static int ironlake_drpc_info(struct seq_file *m) 1205 { 1206 struct drm_info_node *node = m->private; 1207 struct drm_device *dev = node->minor->dev; 1208 struct drm_i915_private *dev_priv = dev->dev_private; 1209 u32 rgvmodectl, rstdbyctl; 1210 u16 crstandvid; 1211 int ret; 1212 1213 ret = mutex_lock_interruptible(&dev->struct_mutex); 1214 if (ret) 1215 return ret; 1216 intel_runtime_pm_get(dev_priv); 1217 1218 rgvmodectl = I915_READ(MEMMODECTL); 1219 rstdbyctl = I915_READ(RSTDBYCTL); 1220 crstandvid = I915_READ16(CRSTANDVID); 1221 1222 intel_runtime_pm_put(dev_priv); 1223 mutex_unlock(&dev->struct_mutex); 1224 1225 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1226 "yes" : "no"); 1227 seq_printf(m, "Boost freq: %d\n", 1228 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1229 MEMMODE_BOOST_FREQ_SHIFT); 1230 seq_printf(m, "HW control enabled: %s\n", 1231 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 1232 seq_printf(m, "SW control enabled: %s\n", 1233 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 1234 seq_printf(m, "Gated voltage change: %s\n", 1235 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 1236 seq_printf(m, "Starting frequency: P%d\n", 1237 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1238 seq_printf(m, "Max P-state: P%d\n", 1239 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1240 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1241 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1242 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1243 seq_printf(m, "Render standby enabled: %s\n", 1244 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1245 seq_puts(m, "Current RS state: "); 1246 switch (rstdbyctl & RSX_STATUS_MASK) { 1247 case RSX_STATUS_ON: 1248 seq_puts(m, "on\n"); 1249 break; 1250 case RSX_STATUS_RC1: 1251 seq_puts(m, "RC1\n"); 1252 break; 1253 case RSX_STATUS_RC1E: 1254 seq_puts(m, "RC1E\n"); 1255 break; 1256 case RSX_STATUS_RS1: 1257 seq_puts(m, "RS1\n"); 1258 break; 1259 case RSX_STATUS_RS2: 1260 seq_puts(m, "RS2 (RC6)\n"); 1261 break; 1262 case RSX_STATUS_RS3: 1263 seq_puts(m, "RC3 (RC6+)\n"); 1264 break; 1265 default: 1266 seq_puts(m, "unknown\n"); 1267 break; 1268 } 1269 1270 return 0; 1271 } 1272 1273 static int vlv_drpc_info(struct seq_file *m) 1274 { 1275 1276 struct drm_info_node *node = m->private; 1277 struct drm_device *dev = node->minor->dev; 1278 struct drm_i915_private *dev_priv = dev->dev_private; 1279 u32 rpmodectl1, rcctl1; 1280 unsigned fw_rendercount = 0, fw_mediacount = 0; 1281 1282 intel_runtime_pm_get(dev_priv); 1283 1284 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1285 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1286 1287 intel_runtime_pm_put(dev_priv); 1288 1289 seq_printf(m, "Video Turbo Mode: %s\n", 1290 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1291 seq_printf(m, "Turbo enabled: %s\n", 1292 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1293 seq_printf(m, "HW control enabled: %s\n", 1294 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1295 seq_printf(m, "SW control enabled: %s\n", 1296 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1297 GEN6_RP_MEDIA_SW_MODE)); 1298 seq_printf(m, "RC6 Enabled: %s\n", 1299 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1300 GEN6_RC_CTL_EI_MODE(1)))); 1301 seq_printf(m, "Render Power Well: %s\n", 1302 (I915_READ(VLV_GTLC_PW_STATUS) & 1303 VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1304 seq_printf(m, "Media Power Well: %s\n", 1305 (I915_READ(VLV_GTLC_PW_STATUS) & 1306 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1307 1308 seq_printf(m, "Render RC6 residency since boot: %u\n", 1309 I915_READ(VLV_GT_RENDER_RC6)); 1310 seq_printf(m, "Media RC6 residency since boot: %u\n", 1311 I915_READ(VLV_GT_MEDIA_RC6)); 1312 1313 spin_lock_irq(&dev_priv->uncore.lock); 1314 fw_rendercount = dev_priv->uncore.fw_rendercount; 1315 fw_mediacount = dev_priv->uncore.fw_mediacount; 1316 spin_unlock_irq(&dev_priv->uncore.lock); 1317 1318 seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount); 1319 seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount); 1320 1321 1322 return 0; 1323 } 1324 1325 1326 static int gen6_drpc_info(struct seq_file *m) 1327 { 1328 1329 struct drm_info_node *node = m->private; 1330 struct drm_device *dev = node->minor->dev; 1331 struct drm_i915_private *dev_priv = dev->dev_private; 1332 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1333 unsigned forcewake_count; 1334 int count = 0, ret; 1335 1336 ret = mutex_lock_interruptible(&dev->struct_mutex); 1337 if (ret) 1338 return ret; 1339 intel_runtime_pm_get(dev_priv); 1340 1341 spin_lock_irq(&dev_priv->uncore.lock); 1342 forcewake_count = dev_priv->uncore.forcewake_count; 1343 spin_unlock_irq(&dev_priv->uncore.lock); 1344 1345 if (forcewake_count) { 1346 seq_puts(m, "RC information inaccurate because somebody " 1347 "holds a forcewake reference \n"); 1348 } else { 1349 /* NB: we cannot use forcewake, else we read the wrong values */ 1350 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1351 udelay(10); 1352 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1353 } 1354 1355 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1356 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1357 1358 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1359 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1360 mutex_unlock(&dev->struct_mutex); 1361 mutex_lock(&dev_priv->rps.hw_lock); 1362 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1363 mutex_unlock(&dev_priv->rps.hw_lock); 1364 1365 intel_runtime_pm_put(dev_priv); 1366 1367 seq_printf(m, "Video Turbo Mode: %s\n", 1368 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1369 seq_printf(m, "HW control enabled: %s\n", 1370 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1371 seq_printf(m, "SW control enabled: %s\n", 1372 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1373 GEN6_RP_MEDIA_SW_MODE)); 1374 seq_printf(m, "RC1e Enabled: %s\n", 1375 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1376 seq_printf(m, "RC6 Enabled: %s\n", 1377 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1378 seq_printf(m, "Deep RC6 Enabled: %s\n", 1379 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1380 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1381 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1382 seq_puts(m, "Current RC state: "); 1383 switch (gt_core_status & GEN6_RCn_MASK) { 1384 case GEN6_RC0: 1385 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1386 seq_puts(m, "Core Power Down\n"); 1387 else 1388 seq_puts(m, "on\n"); 1389 break; 1390 case GEN6_RC3: 1391 seq_puts(m, "RC3\n"); 1392 break; 1393 case GEN6_RC6: 1394 seq_puts(m, "RC6\n"); 1395 break; 1396 case GEN6_RC7: 1397 seq_puts(m, "RC7\n"); 1398 break; 1399 default: 1400 seq_puts(m, "Unknown\n"); 1401 break; 1402 } 1403 1404 seq_printf(m, "Core Power Down: %s\n", 1405 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1406 1407 /* Not exactly sure what this is */ 1408 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1409 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1410 seq_printf(m, "RC6 residency since boot: %u\n", 1411 I915_READ(GEN6_GT_GFX_RC6)); 1412 seq_printf(m, "RC6+ residency since boot: %u\n", 1413 I915_READ(GEN6_GT_GFX_RC6p)); 1414 seq_printf(m, "RC6++ residency since boot: %u\n", 1415 I915_READ(GEN6_GT_GFX_RC6pp)); 1416 1417 seq_printf(m, "RC6 voltage: %dmV\n", 1418 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1419 seq_printf(m, "RC6+ voltage: %dmV\n", 1420 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1421 seq_printf(m, "RC6++ voltage: %dmV\n", 1422 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1423 return 0; 1424 } 1425 1426 static int i915_drpc_info(struct seq_file *m, void *unused) 1427 { 1428 struct drm_info_node *node = m->private; 1429 struct drm_device *dev = node->minor->dev; 1430 1431 if (IS_VALLEYVIEW(dev)) 1432 return vlv_drpc_info(m); 1433 else if (IS_GEN6(dev) || IS_GEN7(dev)) 1434 return gen6_drpc_info(m); 1435 else 1436 return ironlake_drpc_info(m); 1437 } 1438 1439 static int i915_fbc_status(struct seq_file *m, void *unused) 1440 { 1441 struct drm_info_node *node = m->private; 1442 struct drm_device *dev = node->minor->dev; 1443 struct drm_i915_private *dev_priv = dev->dev_private; 1444 1445 if (!HAS_FBC(dev)) { 1446 seq_puts(m, "FBC unsupported on this chipset\n"); 1447 return 0; 1448 } 1449 1450 intel_runtime_pm_get(dev_priv); 1451 1452 if (intel_fbc_enabled(dev)) { 1453 seq_puts(m, "FBC enabled\n"); 1454 } else { 1455 seq_puts(m, "FBC disabled: "); 1456 switch (dev_priv->fbc.no_fbc_reason) { 1457 case FBC_OK: 1458 seq_puts(m, "FBC actived, but currently disabled in hardware"); 1459 break; 1460 case FBC_UNSUPPORTED: 1461 seq_puts(m, "unsupported by this chipset"); 1462 break; 1463 case FBC_NO_OUTPUT: 1464 seq_puts(m, "no outputs"); 1465 break; 1466 case FBC_STOLEN_TOO_SMALL: 1467 seq_puts(m, "not enough stolen memory"); 1468 break; 1469 case FBC_UNSUPPORTED_MODE: 1470 seq_puts(m, "mode not supported"); 1471 break; 1472 case FBC_MODE_TOO_LARGE: 1473 seq_puts(m, "mode too large"); 1474 break; 1475 case FBC_BAD_PLANE: 1476 seq_puts(m, "FBC unsupported on plane"); 1477 break; 1478 case FBC_NOT_TILED: 1479 seq_puts(m, "scanout buffer not tiled"); 1480 break; 1481 case FBC_MULTIPLE_PIPES: 1482 seq_puts(m, "multiple pipes are enabled"); 1483 break; 1484 case FBC_MODULE_PARAM: 1485 seq_puts(m, "disabled per module param (default off)"); 1486 break; 1487 case FBC_CHIP_DEFAULT: 1488 seq_puts(m, "disabled per chip default"); 1489 break; 1490 default: 1491 seq_puts(m, "unknown reason"); 1492 } 1493 seq_putc(m, '\n'); 1494 } 1495 1496 intel_runtime_pm_put(dev_priv); 1497 1498 return 0; 1499 } 1500 1501 static int i915_ips_status(struct seq_file *m, void *unused) 1502 { 1503 struct drm_info_node *node = m->private; 1504 struct drm_device *dev = node->minor->dev; 1505 struct drm_i915_private *dev_priv = dev->dev_private; 1506 1507 if (!HAS_IPS(dev)) { 1508 seq_puts(m, "not supported\n"); 1509 return 0; 1510 } 1511 1512 intel_runtime_pm_get(dev_priv); 1513 1514 if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE) 1515 seq_puts(m, "enabled\n"); 1516 else 1517 seq_puts(m, "disabled\n"); 1518 1519 intel_runtime_pm_put(dev_priv); 1520 1521 return 0; 1522 } 1523 1524 static int i915_sr_status(struct seq_file *m, void *unused) 1525 { 1526 struct drm_info_node *node = m->private; 1527 struct drm_device *dev = node->minor->dev; 1528 struct drm_i915_private *dev_priv = dev->dev_private; 1529 bool sr_enabled = false; 1530 1531 intel_runtime_pm_get(dev_priv); 1532 1533 if (HAS_PCH_SPLIT(dev)) 1534 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1535 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1536 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1537 else if (IS_I915GM(dev)) 1538 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1539 else if (IS_PINEVIEW(dev)) 1540 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1541 1542 intel_runtime_pm_put(dev_priv); 1543 1544 seq_printf(m, "self-refresh: %s\n", 1545 sr_enabled ? "enabled" : "disabled"); 1546 1547 return 0; 1548 } 1549 1550 static int i915_emon_status(struct seq_file *m, void *unused) 1551 { 1552 struct drm_info_node *node = m->private; 1553 struct drm_device *dev = node->minor->dev; 1554 struct drm_i915_private *dev_priv = dev->dev_private; 1555 unsigned long temp, chipset, gfx; 1556 int ret; 1557 1558 if (!IS_GEN5(dev)) 1559 return -ENODEV; 1560 1561 ret = mutex_lock_interruptible(&dev->struct_mutex); 1562 if (ret) 1563 return ret; 1564 1565 temp = i915_mch_val(dev_priv); 1566 chipset = i915_chipset_val(dev_priv); 1567 gfx = i915_gfx_val(dev_priv); 1568 mutex_unlock(&dev->struct_mutex); 1569 1570 seq_printf(m, "GMCH temp: %ld\n", temp); 1571 seq_printf(m, "Chipset power: %ld\n", chipset); 1572 seq_printf(m, "GFX power: %ld\n", gfx); 1573 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1574 1575 return 0; 1576 } 1577 1578 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1579 { 1580 struct drm_info_node *node = m->private; 1581 struct drm_device *dev = node->minor->dev; 1582 struct drm_i915_private *dev_priv = dev->dev_private; 1583 int ret = 0; 1584 int gpu_freq, ia_freq; 1585 1586 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1587 seq_puts(m, "unsupported on this chipset\n"); 1588 return 0; 1589 } 1590 1591 intel_runtime_pm_get(dev_priv); 1592 1593 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1594 1595 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1596 if (ret) 1597 goto out; 1598 1599 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1600 1601 for (gpu_freq = dev_priv->rps.min_freq_softlimit; 1602 gpu_freq <= dev_priv->rps.max_freq_softlimit; 1603 gpu_freq++) { 1604 ia_freq = gpu_freq; 1605 sandybridge_pcode_read(dev_priv, 1606 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1607 &ia_freq); 1608 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1609 gpu_freq * GT_FREQUENCY_MULTIPLIER, 1610 ((ia_freq >> 0) & 0xff) * 100, 1611 ((ia_freq >> 8) & 0xff) * 100); 1612 } 1613 1614 mutex_unlock(&dev_priv->rps.hw_lock); 1615 1616 out: 1617 intel_runtime_pm_put(dev_priv); 1618 return ret; 1619 } 1620 1621 static int i915_gfxec(struct seq_file *m, void *unused) 1622 { 1623 struct drm_info_node *node = m->private; 1624 struct drm_device *dev = node->minor->dev; 1625 struct drm_i915_private *dev_priv = dev->dev_private; 1626 int ret; 1627 1628 ret = mutex_lock_interruptible(&dev->struct_mutex); 1629 if (ret) 1630 return ret; 1631 intel_runtime_pm_get(dev_priv); 1632 1633 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1634 intel_runtime_pm_put(dev_priv); 1635 1636 mutex_unlock(&dev->struct_mutex); 1637 1638 return 0; 1639 } 1640 1641 static int i915_opregion(struct seq_file *m, void *unused) 1642 { 1643 struct drm_info_node *node = m->private; 1644 struct drm_device *dev = node->minor->dev; 1645 struct drm_i915_private *dev_priv = dev->dev_private; 1646 struct intel_opregion *opregion = &dev_priv->opregion; 1647 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); 1648 int ret; 1649 1650 if (data == NULL) 1651 return -ENOMEM; 1652 1653 ret = mutex_lock_interruptible(&dev->struct_mutex); 1654 if (ret) 1655 goto out; 1656 1657 if (opregion->header) { 1658 memcpy_fromio(data, opregion->header, OPREGION_SIZE); 1659 seq_write(m, data, OPREGION_SIZE); 1660 } 1661 1662 mutex_unlock(&dev->struct_mutex); 1663 1664 out: 1665 kfree(data); 1666 return 0; 1667 } 1668 1669 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1670 { 1671 struct drm_info_node *node = m->private; 1672 struct drm_device *dev = node->minor->dev; 1673 struct intel_fbdev *ifbdev = NULL; 1674 struct intel_framebuffer *fb; 1675 1676 #ifdef CONFIG_DRM_I915_FBDEV 1677 struct drm_i915_private *dev_priv = dev->dev_private; 1678 int ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1679 if (ret) 1680 return ret; 1681 1682 ifbdev = dev_priv->fbdev; 1683 fb = to_intel_framebuffer(ifbdev->helper.fb); 1684 1685 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1686 fb->base.width, 1687 fb->base.height, 1688 fb->base.depth, 1689 fb->base.bits_per_pixel, 1690 atomic_read(&fb->base.refcount.refcount)); 1691 describe_obj(m, fb->obj); 1692 seq_putc(m, '\n'); 1693 mutex_unlock(&dev->mode_config.mutex); 1694 #endif 1695 1696 mutex_lock(&dev->mode_config.fb_lock); 1697 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1698 if (ifbdev && &fb->base == ifbdev->helper.fb) 1699 continue; 1700 1701 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1702 fb->base.width, 1703 fb->base.height, 1704 fb->base.depth, 1705 fb->base.bits_per_pixel, 1706 atomic_read(&fb->base.refcount.refcount)); 1707 describe_obj(m, fb->obj); 1708 seq_putc(m, '\n'); 1709 } 1710 mutex_unlock(&dev->mode_config.fb_lock); 1711 1712 return 0; 1713 } 1714 1715 static int i915_context_status(struct seq_file *m, void *unused) 1716 { 1717 struct drm_info_node *node = m->private; 1718 struct drm_device *dev = node->minor->dev; 1719 struct drm_i915_private *dev_priv = dev->dev_private; 1720 struct intel_engine_cs *ring; 1721 struct intel_context *ctx; 1722 int ret, i; 1723 1724 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1725 if (ret) 1726 return ret; 1727 1728 if (dev_priv->ips.pwrctx) { 1729 seq_puts(m, "power context "); 1730 describe_obj(m, dev_priv->ips.pwrctx); 1731 seq_putc(m, '\n'); 1732 } 1733 1734 if (dev_priv->ips.renderctx) { 1735 seq_puts(m, "render context "); 1736 describe_obj(m, dev_priv->ips.renderctx); 1737 seq_putc(m, '\n'); 1738 } 1739 1740 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1741 if (ctx->obj == NULL) 1742 continue; 1743 1744 seq_puts(m, "HW context "); 1745 describe_ctx(m, ctx); 1746 for_each_ring(ring, dev_priv, i) 1747 if (ring->default_context == ctx) 1748 seq_printf(m, "(default context %s) ", ring->name); 1749 1750 describe_obj(m, ctx->obj); 1751 seq_putc(m, '\n'); 1752 } 1753 1754 mutex_unlock(&dev->mode_config.mutex); 1755 1756 return 0; 1757 } 1758 1759 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1760 { 1761 struct drm_info_node *node = m->private; 1762 struct drm_device *dev = node->minor->dev; 1763 struct drm_i915_private *dev_priv = dev->dev_private; 1764 unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0; 1765 1766 spin_lock_irq(&dev_priv->uncore.lock); 1767 if (IS_VALLEYVIEW(dev)) { 1768 fw_rendercount = dev_priv->uncore.fw_rendercount; 1769 fw_mediacount = dev_priv->uncore.fw_mediacount; 1770 } else 1771 forcewake_count = dev_priv->uncore.forcewake_count; 1772 spin_unlock_irq(&dev_priv->uncore.lock); 1773 1774 if (IS_VALLEYVIEW(dev)) { 1775 seq_printf(m, "fw_rendercount = %u\n", fw_rendercount); 1776 seq_printf(m, "fw_mediacount = %u\n", fw_mediacount); 1777 } else 1778 seq_printf(m, "forcewake count = %u\n", forcewake_count); 1779 1780 return 0; 1781 } 1782 1783 static const char *swizzle_string(unsigned swizzle) 1784 { 1785 switch (swizzle) { 1786 case I915_BIT_6_SWIZZLE_NONE: 1787 return "none"; 1788 case I915_BIT_6_SWIZZLE_9: 1789 return "bit9"; 1790 case I915_BIT_6_SWIZZLE_9_10: 1791 return "bit9/bit10"; 1792 case I915_BIT_6_SWIZZLE_9_11: 1793 return "bit9/bit11"; 1794 case I915_BIT_6_SWIZZLE_9_10_11: 1795 return "bit9/bit10/bit11"; 1796 case I915_BIT_6_SWIZZLE_9_17: 1797 return "bit9/bit17"; 1798 case I915_BIT_6_SWIZZLE_9_10_17: 1799 return "bit9/bit10/bit17"; 1800 case I915_BIT_6_SWIZZLE_UNKNOWN: 1801 return "unknown"; 1802 } 1803 1804 return "bug"; 1805 } 1806 1807 static int i915_swizzle_info(struct seq_file *m, void *data) 1808 { 1809 struct drm_info_node *node = m->private; 1810 struct drm_device *dev = node->minor->dev; 1811 struct drm_i915_private *dev_priv = dev->dev_private; 1812 int ret; 1813 1814 ret = mutex_lock_interruptible(&dev->struct_mutex); 1815 if (ret) 1816 return ret; 1817 intel_runtime_pm_get(dev_priv); 1818 1819 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1820 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1821 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1822 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1823 1824 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1825 seq_printf(m, "DDC = 0x%08x\n", 1826 I915_READ(DCC)); 1827 seq_printf(m, "C0DRB3 = 0x%04x\n", 1828 I915_READ16(C0DRB3)); 1829 seq_printf(m, "C1DRB3 = 0x%04x\n", 1830 I915_READ16(C1DRB3)); 1831 } else if (INTEL_INFO(dev)->gen >= 6) { 1832 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1833 I915_READ(MAD_DIMM_C0)); 1834 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1835 I915_READ(MAD_DIMM_C1)); 1836 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1837 I915_READ(MAD_DIMM_C2)); 1838 seq_printf(m, "TILECTL = 0x%08x\n", 1839 I915_READ(TILECTL)); 1840 if (IS_GEN8(dev)) 1841 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 1842 I915_READ(GAMTARBMODE)); 1843 else 1844 seq_printf(m, "ARB_MODE = 0x%08x\n", 1845 I915_READ(ARB_MODE)); 1846 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1847 I915_READ(DISP_ARB_CTL)); 1848 } 1849 intel_runtime_pm_put(dev_priv); 1850 mutex_unlock(&dev->struct_mutex); 1851 1852 return 0; 1853 } 1854 1855 static int per_file_ctx(int id, void *ptr, void *data) 1856 { 1857 struct intel_context *ctx = ptr; 1858 struct seq_file *m = data; 1859 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx); 1860 1861 if (i915_gem_context_is_default(ctx)) 1862 seq_puts(m, " default context:\n"); 1863 else 1864 seq_printf(m, " context %d:\n", ctx->id); 1865 ppgtt->debug_dump(ppgtt, m); 1866 1867 return 0; 1868 } 1869 1870 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 1871 { 1872 struct drm_i915_private *dev_priv = dev->dev_private; 1873 struct intel_engine_cs *ring; 1874 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1875 int unused, i; 1876 1877 if (!ppgtt) 1878 return; 1879 1880 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages); 1881 seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries); 1882 for_each_ring(ring, dev_priv, unused) { 1883 seq_printf(m, "%s\n", ring->name); 1884 for (i = 0; i < 4; i++) { 1885 u32 offset = 0x270 + i * 8; 1886 u64 pdp = I915_READ(ring->mmio_base + offset + 4); 1887 pdp <<= 32; 1888 pdp |= I915_READ(ring->mmio_base + offset); 1889 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 1890 } 1891 } 1892 } 1893 1894 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) 1895 { 1896 struct drm_i915_private *dev_priv = dev->dev_private; 1897 struct intel_engine_cs *ring; 1898 struct drm_file *file; 1899 int i; 1900 1901 if (INTEL_INFO(dev)->gen == 6) 1902 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1903 1904 for_each_ring(ring, dev_priv, i) { 1905 seq_printf(m, "%s\n", ring->name); 1906 if (INTEL_INFO(dev)->gen == 7) 1907 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 1908 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 1909 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 1910 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 1911 } 1912 if (dev_priv->mm.aliasing_ppgtt) { 1913 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1914 1915 seq_puts(m, "aliasing PPGTT:\n"); 1916 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1917 1918 ppgtt->debug_dump(ppgtt, m); 1919 } else 1920 return; 1921 1922 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 1923 struct drm_i915_file_private *file_priv = file->driver_priv; 1924 1925 seq_printf(m, "proc: %s\n", 1926 get_pid_task(file->pid, PIDTYPE_PID)->comm); 1927 idr_for_each(&file_priv->context_idr, per_file_ctx, m); 1928 } 1929 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1930 } 1931 1932 static int i915_ppgtt_info(struct seq_file *m, void *data) 1933 { 1934 struct drm_info_node *node = m->private; 1935 struct drm_device *dev = node->minor->dev; 1936 struct drm_i915_private *dev_priv = dev->dev_private; 1937 1938 int ret = mutex_lock_interruptible(&dev->struct_mutex); 1939 if (ret) 1940 return ret; 1941 intel_runtime_pm_get(dev_priv); 1942 1943 if (INTEL_INFO(dev)->gen >= 8) 1944 gen8_ppgtt_info(m, dev); 1945 else if (INTEL_INFO(dev)->gen >= 6) 1946 gen6_ppgtt_info(m, dev); 1947 1948 intel_runtime_pm_put(dev_priv); 1949 mutex_unlock(&dev->struct_mutex); 1950 1951 return 0; 1952 } 1953 1954 static int i915_llc(struct seq_file *m, void *data) 1955 { 1956 struct drm_info_node *node = m->private; 1957 struct drm_device *dev = node->minor->dev; 1958 struct drm_i915_private *dev_priv = dev->dev_private; 1959 1960 /* Size calculation for LLC is a bit of a pain. Ignore for now. */ 1961 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); 1962 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size); 1963 1964 return 0; 1965 } 1966 1967 static int i915_edp_psr_status(struct seq_file *m, void *data) 1968 { 1969 struct drm_info_node *node = m->private; 1970 struct drm_device *dev = node->minor->dev; 1971 struct drm_i915_private *dev_priv = dev->dev_private; 1972 u32 psrperf = 0; 1973 bool enabled = false; 1974 1975 intel_runtime_pm_get(dev_priv); 1976 1977 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 1978 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 1979 1980 enabled = HAS_PSR(dev) && 1981 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; 1982 seq_printf(m, "Enabled: %s\n", yesno(enabled)); 1983 1984 if (HAS_PSR(dev)) 1985 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & 1986 EDP_PSR_PERF_CNT_MASK; 1987 seq_printf(m, "Performance_Counter: %u\n", psrperf); 1988 1989 intel_runtime_pm_put(dev_priv); 1990 return 0; 1991 } 1992 1993 static int i915_sink_crc(struct seq_file *m, void *data) 1994 { 1995 struct drm_info_node *node = m->private; 1996 struct drm_device *dev = node->minor->dev; 1997 struct intel_encoder *encoder; 1998 struct intel_connector *connector; 1999 struct intel_dp *intel_dp = NULL; 2000 int ret; 2001 u8 crc[6]; 2002 2003 drm_modeset_lock_all(dev); 2004 list_for_each_entry(connector, &dev->mode_config.connector_list, 2005 base.head) { 2006 2007 if (connector->base.dpms != DRM_MODE_DPMS_ON) 2008 continue; 2009 2010 if (!connector->base.encoder) 2011 continue; 2012 2013 encoder = to_intel_encoder(connector->base.encoder); 2014 if (encoder->type != INTEL_OUTPUT_EDP) 2015 continue; 2016 2017 intel_dp = enc_to_intel_dp(&encoder->base); 2018 2019 ret = intel_dp_sink_crc(intel_dp, crc); 2020 if (ret) 2021 goto out; 2022 2023 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 2024 crc[0], crc[1], crc[2], 2025 crc[3], crc[4], crc[5]); 2026 goto out; 2027 } 2028 ret = -ENODEV; 2029 out: 2030 drm_modeset_unlock_all(dev); 2031 return ret; 2032 } 2033 2034 static int i915_energy_uJ(struct seq_file *m, void *data) 2035 { 2036 struct drm_info_node *node = m->private; 2037 struct drm_device *dev = node->minor->dev; 2038 struct drm_i915_private *dev_priv = dev->dev_private; 2039 u64 power; 2040 u32 units; 2041 2042 if (INTEL_INFO(dev)->gen < 6) 2043 return -ENODEV; 2044 2045 intel_runtime_pm_get(dev_priv); 2046 2047 rdmsrl(MSR_RAPL_POWER_UNIT, power); 2048 power = (power & 0x1f00) >> 8; 2049 units = 1000000 / (1 << power); /* convert to uJ */ 2050 power = I915_READ(MCH_SECP_NRG_STTS); 2051 power *= units; 2052 2053 intel_runtime_pm_put(dev_priv); 2054 2055 seq_printf(m, "%llu", (long long unsigned)power); 2056 2057 return 0; 2058 } 2059 2060 static int i915_pc8_status(struct seq_file *m, void *unused) 2061 { 2062 struct drm_info_node *node = m->private; 2063 struct drm_device *dev = node->minor->dev; 2064 struct drm_i915_private *dev_priv = dev->dev_private; 2065 2066 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { 2067 seq_puts(m, "not supported\n"); 2068 return 0; 2069 } 2070 2071 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); 2072 seq_printf(m, "IRQs disabled: %s\n", 2073 yesno(dev_priv->pm.irqs_disabled)); 2074 2075 return 0; 2076 } 2077 2078 static const char *power_domain_str(enum intel_display_power_domain domain) 2079 { 2080 switch (domain) { 2081 case POWER_DOMAIN_PIPE_A: 2082 return "PIPE_A"; 2083 case POWER_DOMAIN_PIPE_B: 2084 return "PIPE_B"; 2085 case POWER_DOMAIN_PIPE_C: 2086 return "PIPE_C"; 2087 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 2088 return "PIPE_A_PANEL_FITTER"; 2089 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 2090 return "PIPE_B_PANEL_FITTER"; 2091 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 2092 return "PIPE_C_PANEL_FITTER"; 2093 case POWER_DOMAIN_TRANSCODER_A: 2094 return "TRANSCODER_A"; 2095 case POWER_DOMAIN_TRANSCODER_B: 2096 return "TRANSCODER_B"; 2097 case POWER_DOMAIN_TRANSCODER_C: 2098 return "TRANSCODER_C"; 2099 case POWER_DOMAIN_TRANSCODER_EDP: 2100 return "TRANSCODER_EDP"; 2101 case POWER_DOMAIN_PORT_DDI_A_2_LANES: 2102 return "PORT_DDI_A_2_LANES"; 2103 case POWER_DOMAIN_PORT_DDI_A_4_LANES: 2104 return "PORT_DDI_A_4_LANES"; 2105 case POWER_DOMAIN_PORT_DDI_B_2_LANES: 2106 return "PORT_DDI_B_2_LANES"; 2107 case POWER_DOMAIN_PORT_DDI_B_4_LANES: 2108 return "PORT_DDI_B_4_LANES"; 2109 case POWER_DOMAIN_PORT_DDI_C_2_LANES: 2110 return "PORT_DDI_C_2_LANES"; 2111 case POWER_DOMAIN_PORT_DDI_C_4_LANES: 2112 return "PORT_DDI_C_4_LANES"; 2113 case POWER_DOMAIN_PORT_DDI_D_2_LANES: 2114 return "PORT_DDI_D_2_LANES"; 2115 case POWER_DOMAIN_PORT_DDI_D_4_LANES: 2116 return "PORT_DDI_D_4_LANES"; 2117 case POWER_DOMAIN_PORT_DSI: 2118 return "PORT_DSI"; 2119 case POWER_DOMAIN_PORT_CRT: 2120 return "PORT_CRT"; 2121 case POWER_DOMAIN_PORT_OTHER: 2122 return "PORT_OTHER"; 2123 case POWER_DOMAIN_VGA: 2124 return "VGA"; 2125 case POWER_DOMAIN_AUDIO: 2126 return "AUDIO"; 2127 case POWER_DOMAIN_INIT: 2128 return "INIT"; 2129 default: 2130 WARN_ON(1); 2131 return "?"; 2132 } 2133 } 2134 2135 static int i915_power_domain_info(struct seq_file *m, void *unused) 2136 { 2137 struct drm_info_node *node = m->private; 2138 struct drm_device *dev = node->minor->dev; 2139 struct drm_i915_private *dev_priv = dev->dev_private; 2140 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2141 int i; 2142 2143 mutex_lock(&power_domains->lock); 2144 2145 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2146 for (i = 0; i < power_domains->power_well_count; i++) { 2147 struct i915_power_well *power_well; 2148 enum intel_display_power_domain power_domain; 2149 2150 power_well = &power_domains->power_wells[i]; 2151 seq_printf(m, "%-25s %d\n", power_well->name, 2152 power_well->count); 2153 2154 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM; 2155 power_domain++) { 2156 if (!(BIT(power_domain) & power_well->domains)) 2157 continue; 2158 2159 seq_printf(m, " %-23s %d\n", 2160 power_domain_str(power_domain), 2161 power_domains->domain_use_count[power_domain]); 2162 } 2163 } 2164 2165 mutex_unlock(&power_domains->lock); 2166 2167 return 0; 2168 } 2169 2170 static void intel_seq_print_mode(struct seq_file *m, int tabs, 2171 struct drm_display_mode *mode) 2172 { 2173 int i; 2174 2175 for (i = 0; i < tabs; i++) 2176 seq_putc(m, '\t'); 2177 2178 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2179 mode->base.id, mode->name, 2180 mode->vrefresh, mode->clock, 2181 mode->hdisplay, mode->hsync_start, 2182 mode->hsync_end, mode->htotal, 2183 mode->vdisplay, mode->vsync_start, 2184 mode->vsync_end, mode->vtotal, 2185 mode->type, mode->flags); 2186 } 2187 2188 static void intel_encoder_info(struct seq_file *m, 2189 struct intel_crtc *intel_crtc, 2190 struct intel_encoder *intel_encoder) 2191 { 2192 struct drm_info_node *node = m->private; 2193 struct drm_device *dev = node->minor->dev; 2194 struct drm_crtc *crtc = &intel_crtc->base; 2195 struct intel_connector *intel_connector; 2196 struct drm_encoder *encoder; 2197 2198 encoder = &intel_encoder->base; 2199 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2200 encoder->base.id, encoder->name); 2201 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2202 struct drm_connector *connector = &intel_connector->base; 2203 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2204 connector->base.id, 2205 connector->name, 2206 drm_get_connector_status_name(connector->status)); 2207 if (connector->status == connector_status_connected) { 2208 struct drm_display_mode *mode = &crtc->mode; 2209 seq_printf(m, ", mode:\n"); 2210 intel_seq_print_mode(m, 2, mode); 2211 } else { 2212 seq_putc(m, '\n'); 2213 } 2214 } 2215 } 2216 2217 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2218 { 2219 struct drm_info_node *node = m->private; 2220 struct drm_device *dev = node->minor->dev; 2221 struct drm_crtc *crtc = &intel_crtc->base; 2222 struct intel_encoder *intel_encoder; 2223 2224 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2225 crtc->primary->fb->base.id, crtc->x, crtc->y, 2226 crtc->primary->fb->width, crtc->primary->fb->height); 2227 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2228 intel_encoder_info(m, intel_crtc, intel_encoder); 2229 } 2230 2231 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 2232 { 2233 struct drm_display_mode *mode = panel->fixed_mode; 2234 2235 seq_printf(m, "\tfixed mode:\n"); 2236 intel_seq_print_mode(m, 2, mode); 2237 } 2238 2239 static void intel_dp_info(struct seq_file *m, 2240 struct intel_connector *intel_connector) 2241 { 2242 struct intel_encoder *intel_encoder = intel_connector->encoder; 2243 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2244 2245 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2246 seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" : 2247 "no"); 2248 if (intel_encoder->type == INTEL_OUTPUT_EDP) 2249 intel_panel_info(m, &intel_connector->panel); 2250 } 2251 2252 static void intel_hdmi_info(struct seq_file *m, 2253 struct intel_connector *intel_connector) 2254 { 2255 struct intel_encoder *intel_encoder = intel_connector->encoder; 2256 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 2257 2258 seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" : 2259 "no"); 2260 } 2261 2262 static void intel_lvds_info(struct seq_file *m, 2263 struct intel_connector *intel_connector) 2264 { 2265 intel_panel_info(m, &intel_connector->panel); 2266 } 2267 2268 static void intel_connector_info(struct seq_file *m, 2269 struct drm_connector *connector) 2270 { 2271 struct intel_connector *intel_connector = to_intel_connector(connector); 2272 struct intel_encoder *intel_encoder = intel_connector->encoder; 2273 struct drm_display_mode *mode; 2274 2275 seq_printf(m, "connector %d: type %s, status: %s\n", 2276 connector->base.id, connector->name, 2277 drm_get_connector_status_name(connector->status)); 2278 if (connector->status == connector_status_connected) { 2279 seq_printf(m, "\tname: %s\n", connector->display_info.name); 2280 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 2281 connector->display_info.width_mm, 2282 connector->display_info.height_mm); 2283 seq_printf(m, "\tsubpixel order: %s\n", 2284 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 2285 seq_printf(m, "\tCEA rev: %d\n", 2286 connector->display_info.cea_rev); 2287 } 2288 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2289 intel_encoder->type == INTEL_OUTPUT_EDP) 2290 intel_dp_info(m, intel_connector); 2291 else if (intel_encoder->type == INTEL_OUTPUT_HDMI) 2292 intel_hdmi_info(m, intel_connector); 2293 else if (intel_encoder->type == INTEL_OUTPUT_LVDS) 2294 intel_lvds_info(m, intel_connector); 2295 2296 seq_printf(m, "\tmodes:\n"); 2297 list_for_each_entry(mode, &connector->modes, head) 2298 intel_seq_print_mode(m, 2, mode); 2299 } 2300 2301 static bool cursor_active(struct drm_device *dev, int pipe) 2302 { 2303 struct drm_i915_private *dev_priv = dev->dev_private; 2304 u32 state; 2305 2306 if (IS_845G(dev) || IS_I865G(dev)) 2307 state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 2308 else 2309 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 2310 2311 return state; 2312 } 2313 2314 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) 2315 { 2316 struct drm_i915_private *dev_priv = dev->dev_private; 2317 u32 pos; 2318 2319 pos = I915_READ(CURPOS(pipe)); 2320 2321 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK; 2322 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT)) 2323 *x = -*x; 2324 2325 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK; 2326 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT)) 2327 *y = -*y; 2328 2329 return cursor_active(dev, pipe); 2330 } 2331 2332 static int i915_display_info(struct seq_file *m, void *unused) 2333 { 2334 struct drm_info_node *node = m->private; 2335 struct drm_device *dev = node->minor->dev; 2336 struct drm_i915_private *dev_priv = dev->dev_private; 2337 struct intel_crtc *crtc; 2338 struct drm_connector *connector; 2339 2340 intel_runtime_pm_get(dev_priv); 2341 drm_modeset_lock_all(dev); 2342 seq_printf(m, "CRTC info\n"); 2343 seq_printf(m, "---------\n"); 2344 for_each_intel_crtc(dev, crtc) { 2345 bool active; 2346 int x, y; 2347 2348 seq_printf(m, "CRTC %d: pipe: %c, active: %s\n", 2349 crtc->base.base.id, pipe_name(crtc->pipe), 2350 yesno(crtc->active)); 2351 if (crtc->active) { 2352 intel_crtc_info(m, crtc); 2353 2354 active = cursor_position(dev, crtc->pipe, &x, &y); 2355 seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n", 2356 yesno(crtc->cursor_base), 2357 x, y, crtc->cursor_addr, 2358 yesno(active)); 2359 } 2360 2361 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 2362 yesno(!crtc->cpu_fifo_underrun_disabled), 2363 yesno(!crtc->pch_fifo_underrun_disabled)); 2364 } 2365 2366 seq_printf(m, "\n"); 2367 seq_printf(m, "Connector info\n"); 2368 seq_printf(m, "--------------\n"); 2369 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 2370 intel_connector_info(m, connector); 2371 } 2372 drm_modeset_unlock_all(dev); 2373 intel_runtime_pm_put(dev_priv); 2374 2375 return 0; 2376 } 2377 2378 struct pipe_crc_info { 2379 const char *name; 2380 struct drm_device *dev; 2381 enum pipe pipe; 2382 }; 2383 2384 static int i915_pipe_crc_open(struct inode *inode, struct file *filep) 2385 { 2386 struct pipe_crc_info *info = inode->i_private; 2387 struct drm_i915_private *dev_priv = info->dev->dev_private; 2388 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2389 2390 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes) 2391 return -ENODEV; 2392 2393 spin_lock_irq(&pipe_crc->lock); 2394 2395 if (pipe_crc->opened) { 2396 spin_unlock_irq(&pipe_crc->lock); 2397 return -EBUSY; /* already open */ 2398 } 2399 2400 pipe_crc->opened = true; 2401 filep->private_data = inode->i_private; 2402 2403 spin_unlock_irq(&pipe_crc->lock); 2404 2405 return 0; 2406 } 2407 2408 static int i915_pipe_crc_release(struct inode *inode, struct file *filep) 2409 { 2410 struct pipe_crc_info *info = inode->i_private; 2411 struct drm_i915_private *dev_priv = info->dev->dev_private; 2412 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2413 2414 spin_lock_irq(&pipe_crc->lock); 2415 pipe_crc->opened = false; 2416 spin_unlock_irq(&pipe_crc->lock); 2417 2418 return 0; 2419 } 2420 2421 /* (6 fields, 8 chars each, space separated (5) + '\n') */ 2422 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) 2423 /* account for \'0' */ 2424 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) 2425 2426 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) 2427 { 2428 assert_spin_locked(&pipe_crc->lock); 2429 return CIRC_CNT(pipe_crc->head, pipe_crc->tail, 2430 INTEL_PIPE_CRC_ENTRIES_NR); 2431 } 2432 2433 static ssize_t 2434 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, 2435 loff_t *pos) 2436 { 2437 struct pipe_crc_info *info = filep->private_data; 2438 struct drm_device *dev = info->dev; 2439 struct drm_i915_private *dev_priv = dev->dev_private; 2440 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2441 char buf[PIPE_CRC_BUFFER_LEN]; 2442 int head, tail, n_entries, n; 2443 ssize_t bytes_read; 2444 2445 /* 2446 * Don't allow user space to provide buffers not big enough to hold 2447 * a line of data. 2448 */ 2449 if (count < PIPE_CRC_LINE_LEN) 2450 return -EINVAL; 2451 2452 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) 2453 return 0; 2454 2455 /* nothing to read */ 2456 spin_lock_irq(&pipe_crc->lock); 2457 while (pipe_crc_data_count(pipe_crc) == 0) { 2458 int ret; 2459 2460 if (filep->f_flags & O_NONBLOCK) { 2461 spin_unlock_irq(&pipe_crc->lock); 2462 return -EAGAIN; 2463 } 2464 2465 ret = wait_event_interruptible_lock_irq(pipe_crc->wq, 2466 pipe_crc_data_count(pipe_crc), pipe_crc->lock); 2467 if (ret) { 2468 spin_unlock_irq(&pipe_crc->lock); 2469 return ret; 2470 } 2471 } 2472 2473 /* We now have one or more entries to read */ 2474 head = pipe_crc->head; 2475 tail = pipe_crc->tail; 2476 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR), 2477 count / PIPE_CRC_LINE_LEN); 2478 spin_unlock_irq(&pipe_crc->lock); 2479 2480 bytes_read = 0; 2481 n = 0; 2482 do { 2483 struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail]; 2484 int ret; 2485 2486 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, 2487 "%8u %8x %8x %8x %8x %8x\n", 2488 entry->frame, entry->crc[0], 2489 entry->crc[1], entry->crc[2], 2490 entry->crc[3], entry->crc[4]); 2491 2492 ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN, 2493 buf, PIPE_CRC_LINE_LEN); 2494 if (ret == PIPE_CRC_LINE_LEN) 2495 return -EFAULT; 2496 2497 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); 2498 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 2499 n++; 2500 } while (--n_entries); 2501 2502 spin_lock_irq(&pipe_crc->lock); 2503 pipe_crc->tail = tail; 2504 spin_unlock_irq(&pipe_crc->lock); 2505 2506 return bytes_read; 2507 } 2508 2509 static const struct file_operations i915_pipe_crc_fops = { 2510 .owner = THIS_MODULE, 2511 .open = i915_pipe_crc_open, 2512 .read = i915_pipe_crc_read, 2513 .release = i915_pipe_crc_release, 2514 }; 2515 2516 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { 2517 { 2518 .name = "i915_pipe_A_crc", 2519 .pipe = PIPE_A, 2520 }, 2521 { 2522 .name = "i915_pipe_B_crc", 2523 .pipe = PIPE_B, 2524 }, 2525 { 2526 .name = "i915_pipe_C_crc", 2527 .pipe = PIPE_C, 2528 }, 2529 }; 2530 2531 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, 2532 enum pipe pipe) 2533 { 2534 struct drm_device *dev = minor->dev; 2535 struct dentry *ent; 2536 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; 2537 2538 info->dev = dev; 2539 ent = debugfs_create_file(info->name, S_IRUGO, root, info, 2540 &i915_pipe_crc_fops); 2541 if (!ent) 2542 return -ENOMEM; 2543 2544 return drm_add_fake_info_node(minor, ent, info); 2545 } 2546 2547 static const char * const pipe_crc_sources[] = { 2548 "none", 2549 "plane1", 2550 "plane2", 2551 "pf", 2552 "pipe", 2553 "TV", 2554 "DP-B", 2555 "DP-C", 2556 "DP-D", 2557 "auto", 2558 }; 2559 2560 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) 2561 { 2562 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); 2563 return pipe_crc_sources[source]; 2564 } 2565 2566 static int display_crc_ctl_show(struct seq_file *m, void *data) 2567 { 2568 struct drm_device *dev = m->private; 2569 struct drm_i915_private *dev_priv = dev->dev_private; 2570 int i; 2571 2572 for (i = 0; i < I915_MAX_PIPES; i++) 2573 seq_printf(m, "%c %s\n", pipe_name(i), 2574 pipe_crc_source_name(dev_priv->pipe_crc[i].source)); 2575 2576 return 0; 2577 } 2578 2579 static int display_crc_ctl_open(struct inode *inode, struct file *file) 2580 { 2581 struct drm_device *dev = inode->i_private; 2582 2583 return single_open(file, display_crc_ctl_show, dev); 2584 } 2585 2586 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2587 uint32_t *val) 2588 { 2589 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2590 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2591 2592 switch (*source) { 2593 case INTEL_PIPE_CRC_SOURCE_PIPE: 2594 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; 2595 break; 2596 case INTEL_PIPE_CRC_SOURCE_NONE: 2597 *val = 0; 2598 break; 2599 default: 2600 return -EINVAL; 2601 } 2602 2603 return 0; 2604 } 2605 2606 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe, 2607 enum intel_pipe_crc_source *source) 2608 { 2609 struct intel_encoder *encoder; 2610 struct intel_crtc *crtc; 2611 struct intel_digital_port *dig_port; 2612 int ret = 0; 2613 2614 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2615 2616 drm_modeset_lock_all(dev); 2617 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 2618 base.head) { 2619 if (!encoder->base.crtc) 2620 continue; 2621 2622 crtc = to_intel_crtc(encoder->base.crtc); 2623 2624 if (crtc->pipe != pipe) 2625 continue; 2626 2627 switch (encoder->type) { 2628 case INTEL_OUTPUT_TVOUT: 2629 *source = INTEL_PIPE_CRC_SOURCE_TV; 2630 break; 2631 case INTEL_OUTPUT_DISPLAYPORT: 2632 case INTEL_OUTPUT_EDP: 2633 dig_port = enc_to_dig_port(&encoder->base); 2634 switch (dig_port->port) { 2635 case PORT_B: 2636 *source = INTEL_PIPE_CRC_SOURCE_DP_B; 2637 break; 2638 case PORT_C: 2639 *source = INTEL_PIPE_CRC_SOURCE_DP_C; 2640 break; 2641 case PORT_D: 2642 *source = INTEL_PIPE_CRC_SOURCE_DP_D; 2643 break; 2644 default: 2645 WARN(1, "nonexisting DP port %c\n", 2646 port_name(dig_port->port)); 2647 break; 2648 } 2649 break; 2650 } 2651 } 2652 drm_modeset_unlock_all(dev); 2653 2654 return ret; 2655 } 2656 2657 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, 2658 enum pipe pipe, 2659 enum intel_pipe_crc_source *source, 2660 uint32_t *val) 2661 { 2662 struct drm_i915_private *dev_priv = dev->dev_private; 2663 bool need_stable_symbols = false; 2664 2665 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 2666 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 2667 if (ret) 2668 return ret; 2669 } 2670 2671 switch (*source) { 2672 case INTEL_PIPE_CRC_SOURCE_PIPE: 2673 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; 2674 break; 2675 case INTEL_PIPE_CRC_SOURCE_DP_B: 2676 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; 2677 need_stable_symbols = true; 2678 break; 2679 case INTEL_PIPE_CRC_SOURCE_DP_C: 2680 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; 2681 need_stable_symbols = true; 2682 break; 2683 case INTEL_PIPE_CRC_SOURCE_NONE: 2684 *val = 0; 2685 break; 2686 default: 2687 return -EINVAL; 2688 } 2689 2690 /* 2691 * When the pipe CRC tap point is after the transcoders we need 2692 * to tweak symbol-level features to produce a deterministic series of 2693 * symbols for a given frame. We need to reset those features only once 2694 * a frame (instead of every nth symbol): 2695 * - DC-balance: used to ensure a better clock recovery from the data 2696 * link (SDVO) 2697 * - DisplayPort scrambling: used for EMI reduction 2698 */ 2699 if (need_stable_symbols) { 2700 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2701 2702 tmp |= DC_BALANCE_RESET_VLV; 2703 if (pipe == PIPE_A) 2704 tmp |= PIPE_A_SCRAMBLE_RESET; 2705 else 2706 tmp |= PIPE_B_SCRAMBLE_RESET; 2707 2708 I915_WRITE(PORT_DFT2_G4X, tmp); 2709 } 2710 2711 return 0; 2712 } 2713 2714 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, 2715 enum pipe pipe, 2716 enum intel_pipe_crc_source *source, 2717 uint32_t *val) 2718 { 2719 struct drm_i915_private *dev_priv = dev->dev_private; 2720 bool need_stable_symbols = false; 2721 2722 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 2723 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 2724 if (ret) 2725 return ret; 2726 } 2727 2728 switch (*source) { 2729 case INTEL_PIPE_CRC_SOURCE_PIPE: 2730 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; 2731 break; 2732 case INTEL_PIPE_CRC_SOURCE_TV: 2733 if (!SUPPORTS_TV(dev)) 2734 return -EINVAL; 2735 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; 2736 break; 2737 case INTEL_PIPE_CRC_SOURCE_DP_B: 2738 if (!IS_G4X(dev)) 2739 return -EINVAL; 2740 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; 2741 need_stable_symbols = true; 2742 break; 2743 case INTEL_PIPE_CRC_SOURCE_DP_C: 2744 if (!IS_G4X(dev)) 2745 return -EINVAL; 2746 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; 2747 need_stable_symbols = true; 2748 break; 2749 case INTEL_PIPE_CRC_SOURCE_DP_D: 2750 if (!IS_G4X(dev)) 2751 return -EINVAL; 2752 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; 2753 need_stable_symbols = true; 2754 break; 2755 case INTEL_PIPE_CRC_SOURCE_NONE: 2756 *val = 0; 2757 break; 2758 default: 2759 return -EINVAL; 2760 } 2761 2762 /* 2763 * When the pipe CRC tap point is after the transcoders we need 2764 * to tweak symbol-level features to produce a deterministic series of 2765 * symbols for a given frame. We need to reset those features only once 2766 * a frame (instead of every nth symbol): 2767 * - DC-balance: used to ensure a better clock recovery from the data 2768 * link (SDVO) 2769 * - DisplayPort scrambling: used for EMI reduction 2770 */ 2771 if (need_stable_symbols) { 2772 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2773 2774 WARN_ON(!IS_G4X(dev)); 2775 2776 I915_WRITE(PORT_DFT_I9XX, 2777 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); 2778 2779 if (pipe == PIPE_A) 2780 tmp |= PIPE_A_SCRAMBLE_RESET; 2781 else 2782 tmp |= PIPE_B_SCRAMBLE_RESET; 2783 2784 I915_WRITE(PORT_DFT2_G4X, tmp); 2785 } 2786 2787 return 0; 2788 } 2789 2790 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, 2791 enum pipe pipe) 2792 { 2793 struct drm_i915_private *dev_priv = dev->dev_private; 2794 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2795 2796 if (pipe == PIPE_A) 2797 tmp &= ~PIPE_A_SCRAMBLE_RESET; 2798 else 2799 tmp &= ~PIPE_B_SCRAMBLE_RESET; 2800 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) 2801 tmp &= ~DC_BALANCE_RESET_VLV; 2802 I915_WRITE(PORT_DFT2_G4X, tmp); 2803 2804 } 2805 2806 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, 2807 enum pipe pipe) 2808 { 2809 struct drm_i915_private *dev_priv = dev->dev_private; 2810 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2811 2812 if (pipe == PIPE_A) 2813 tmp &= ~PIPE_A_SCRAMBLE_RESET; 2814 else 2815 tmp &= ~PIPE_B_SCRAMBLE_RESET; 2816 I915_WRITE(PORT_DFT2_G4X, tmp); 2817 2818 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { 2819 I915_WRITE(PORT_DFT_I9XX, 2820 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); 2821 } 2822 } 2823 2824 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2825 uint32_t *val) 2826 { 2827 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2828 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2829 2830 switch (*source) { 2831 case INTEL_PIPE_CRC_SOURCE_PLANE1: 2832 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; 2833 break; 2834 case INTEL_PIPE_CRC_SOURCE_PLANE2: 2835 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; 2836 break; 2837 case INTEL_PIPE_CRC_SOURCE_PIPE: 2838 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; 2839 break; 2840 case INTEL_PIPE_CRC_SOURCE_NONE: 2841 *val = 0; 2842 break; 2843 default: 2844 return -EINVAL; 2845 } 2846 2847 return 0; 2848 } 2849 2850 static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2851 uint32_t *val) 2852 { 2853 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2854 *source = INTEL_PIPE_CRC_SOURCE_PF; 2855 2856 switch (*source) { 2857 case INTEL_PIPE_CRC_SOURCE_PLANE1: 2858 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; 2859 break; 2860 case INTEL_PIPE_CRC_SOURCE_PLANE2: 2861 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; 2862 break; 2863 case INTEL_PIPE_CRC_SOURCE_PF: 2864 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; 2865 break; 2866 case INTEL_PIPE_CRC_SOURCE_NONE: 2867 *val = 0; 2868 break; 2869 default: 2870 return -EINVAL; 2871 } 2872 2873 return 0; 2874 } 2875 2876 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, 2877 enum intel_pipe_crc_source source) 2878 { 2879 struct drm_i915_private *dev_priv = dev->dev_private; 2880 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 2881 u32 val = 0; /* shut up gcc */ 2882 int ret; 2883 2884 if (pipe_crc->source == source) 2885 return 0; 2886 2887 /* forbid changing the source without going back to 'none' */ 2888 if (pipe_crc->source && source) 2889 return -EINVAL; 2890 2891 if (IS_GEN2(dev)) 2892 ret = i8xx_pipe_crc_ctl_reg(&source, &val); 2893 else if (INTEL_INFO(dev)->gen < 5) 2894 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); 2895 else if (IS_VALLEYVIEW(dev)) 2896 ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val); 2897 else if (IS_GEN5(dev) || IS_GEN6(dev)) 2898 ret = ilk_pipe_crc_ctl_reg(&source, &val); 2899 else 2900 ret = ivb_pipe_crc_ctl_reg(&source, &val); 2901 2902 if (ret != 0) 2903 return ret; 2904 2905 /* none -> real source transition */ 2906 if (source) { 2907 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", 2908 pipe_name(pipe), pipe_crc_source_name(source)); 2909 2910 pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) * 2911 INTEL_PIPE_CRC_ENTRIES_NR, 2912 GFP_KERNEL); 2913 if (!pipe_crc->entries) 2914 return -ENOMEM; 2915 2916 spin_lock_irq(&pipe_crc->lock); 2917 pipe_crc->head = 0; 2918 pipe_crc->tail = 0; 2919 spin_unlock_irq(&pipe_crc->lock); 2920 } 2921 2922 pipe_crc->source = source; 2923 2924 I915_WRITE(PIPE_CRC_CTL(pipe), val); 2925 POSTING_READ(PIPE_CRC_CTL(pipe)); 2926 2927 /* real source -> none transition */ 2928 if (source == INTEL_PIPE_CRC_SOURCE_NONE) { 2929 struct intel_pipe_crc_entry *entries; 2930 2931 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", 2932 pipe_name(pipe)); 2933 2934 intel_wait_for_vblank(dev, pipe); 2935 2936 spin_lock_irq(&pipe_crc->lock); 2937 entries = pipe_crc->entries; 2938 pipe_crc->entries = NULL; 2939 spin_unlock_irq(&pipe_crc->lock); 2940 2941 kfree(entries); 2942 2943 if (IS_G4X(dev)) 2944 g4x_undo_pipe_scramble_reset(dev, pipe); 2945 else if (IS_VALLEYVIEW(dev)) 2946 vlv_undo_pipe_scramble_reset(dev, pipe); 2947 } 2948 2949 return 0; 2950 } 2951 2952 /* 2953 * Parse pipe CRC command strings: 2954 * command: wsp* object wsp+ name wsp+ source wsp* 2955 * object: 'pipe' 2956 * name: (A | B | C) 2957 * source: (none | plane1 | plane2 | pf) 2958 * wsp: (#0x20 | #0x9 | #0xA)+ 2959 * 2960 * eg.: 2961 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A 2962 * "pipe A none" -> Stop CRC 2963 */ 2964 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) 2965 { 2966 int n_words = 0; 2967 2968 while (*buf) { 2969 char *end; 2970 2971 /* skip leading white space */ 2972 buf = skip_spaces(buf); 2973 if (!*buf) 2974 break; /* end of buffer */ 2975 2976 /* find end of word */ 2977 for (end = buf; *end && !isspace(*end); end++) 2978 ; 2979 2980 if (n_words == max_words) { 2981 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", 2982 max_words); 2983 return -EINVAL; /* ran out of words[] before bytes */ 2984 } 2985 2986 if (*end) 2987 *end++ = '\0'; 2988 words[n_words++] = buf; 2989 buf = end; 2990 } 2991 2992 return n_words; 2993 } 2994 2995 enum intel_pipe_crc_object { 2996 PIPE_CRC_OBJECT_PIPE, 2997 }; 2998 2999 static const char * const pipe_crc_objects[] = { 3000 "pipe", 3001 }; 3002 3003 static int 3004 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) 3005 { 3006 int i; 3007 3008 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) 3009 if (!strcmp(buf, pipe_crc_objects[i])) { 3010 *o = i; 3011 return 0; 3012 } 3013 3014 return -EINVAL; 3015 } 3016 3017 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) 3018 { 3019 const char name = buf[0]; 3020 3021 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) 3022 return -EINVAL; 3023 3024 *pipe = name - 'A'; 3025 3026 return 0; 3027 } 3028 3029 static int 3030 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) 3031 { 3032 int i; 3033 3034 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) 3035 if (!strcmp(buf, pipe_crc_sources[i])) { 3036 *s = i; 3037 return 0; 3038 } 3039 3040 return -EINVAL; 3041 } 3042 3043 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len) 3044 { 3045 #define N_WORDS 3 3046 int n_words; 3047 char *words[N_WORDS]; 3048 enum pipe pipe; 3049 enum intel_pipe_crc_object object; 3050 enum intel_pipe_crc_source source; 3051 3052 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); 3053 if (n_words != N_WORDS) { 3054 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", 3055 N_WORDS); 3056 return -EINVAL; 3057 } 3058 3059 if (display_crc_ctl_parse_object(words[0], &object) < 0) { 3060 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); 3061 return -EINVAL; 3062 } 3063 3064 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { 3065 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); 3066 return -EINVAL; 3067 } 3068 3069 if (display_crc_ctl_parse_source(words[2], &source) < 0) { 3070 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); 3071 return -EINVAL; 3072 } 3073 3074 return pipe_crc_set_source(dev, pipe, source); 3075 } 3076 3077 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, 3078 size_t len, loff_t *offp) 3079 { 3080 struct seq_file *m = file->private_data; 3081 struct drm_device *dev = m->private; 3082 char *tmpbuf; 3083 int ret; 3084 3085 if (len == 0) 3086 return 0; 3087 3088 if (len > PAGE_SIZE - 1) { 3089 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", 3090 PAGE_SIZE); 3091 return -E2BIG; 3092 } 3093 3094 tmpbuf = kmalloc(len + 1, GFP_KERNEL); 3095 if (!tmpbuf) 3096 return -ENOMEM; 3097 3098 if (copy_from_user(tmpbuf, ubuf, len)) { 3099 ret = -EFAULT; 3100 goto out; 3101 } 3102 tmpbuf[len] = '\0'; 3103 3104 ret = display_crc_ctl_parse(dev, tmpbuf, len); 3105 3106 out: 3107 kfree(tmpbuf); 3108 if (ret < 0) 3109 return ret; 3110 3111 *offp += len; 3112 return len; 3113 } 3114 3115 static const struct file_operations i915_display_crc_ctl_fops = { 3116 .owner = THIS_MODULE, 3117 .open = display_crc_ctl_open, 3118 .read = seq_read, 3119 .llseek = seq_lseek, 3120 .release = single_release, 3121 .write = display_crc_ctl_write 3122 }; 3123 3124 static void wm_latency_show(struct seq_file *m, const uint16_t wm[5]) 3125 { 3126 struct drm_device *dev = m->private; 3127 int num_levels = ilk_wm_max_level(dev) + 1; 3128 int level; 3129 3130 drm_modeset_lock_all(dev); 3131 3132 for (level = 0; level < num_levels; level++) { 3133 unsigned int latency = wm[level]; 3134 3135 /* WM1+ latency values in 0.5us units */ 3136 if (level > 0) 3137 latency *= 5; 3138 3139 seq_printf(m, "WM%d %u (%u.%u usec)\n", 3140 level, wm[level], 3141 latency / 10, latency % 10); 3142 } 3143 3144 drm_modeset_unlock_all(dev); 3145 } 3146 3147 static int pri_wm_latency_show(struct seq_file *m, void *data) 3148 { 3149 struct drm_device *dev = m->private; 3150 3151 wm_latency_show(m, to_i915(dev)->wm.pri_latency); 3152 3153 return 0; 3154 } 3155 3156 static int spr_wm_latency_show(struct seq_file *m, void *data) 3157 { 3158 struct drm_device *dev = m->private; 3159 3160 wm_latency_show(m, to_i915(dev)->wm.spr_latency); 3161 3162 return 0; 3163 } 3164 3165 static int cur_wm_latency_show(struct seq_file *m, void *data) 3166 { 3167 struct drm_device *dev = m->private; 3168 3169 wm_latency_show(m, to_i915(dev)->wm.cur_latency); 3170 3171 return 0; 3172 } 3173 3174 static int pri_wm_latency_open(struct inode *inode, struct file *file) 3175 { 3176 struct drm_device *dev = inode->i_private; 3177 3178 if (!HAS_PCH_SPLIT(dev)) 3179 return -ENODEV; 3180 3181 return single_open(file, pri_wm_latency_show, dev); 3182 } 3183 3184 static int spr_wm_latency_open(struct inode *inode, struct file *file) 3185 { 3186 struct drm_device *dev = inode->i_private; 3187 3188 if (!HAS_PCH_SPLIT(dev)) 3189 return -ENODEV; 3190 3191 return single_open(file, spr_wm_latency_show, dev); 3192 } 3193 3194 static int cur_wm_latency_open(struct inode *inode, struct file *file) 3195 { 3196 struct drm_device *dev = inode->i_private; 3197 3198 if (!HAS_PCH_SPLIT(dev)) 3199 return -ENODEV; 3200 3201 return single_open(file, cur_wm_latency_show, dev); 3202 } 3203 3204 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 3205 size_t len, loff_t *offp, uint16_t wm[5]) 3206 { 3207 struct seq_file *m = file->private_data; 3208 struct drm_device *dev = m->private; 3209 uint16_t new[5] = { 0 }; 3210 int num_levels = ilk_wm_max_level(dev) + 1; 3211 int level; 3212 int ret; 3213 char tmp[32]; 3214 3215 if (len >= sizeof(tmp)) 3216 return -EINVAL; 3217 3218 if (copy_from_user(tmp, ubuf, len)) 3219 return -EFAULT; 3220 3221 tmp[len] = '\0'; 3222 3223 ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]); 3224 if (ret != num_levels) 3225 return -EINVAL; 3226 3227 drm_modeset_lock_all(dev); 3228 3229 for (level = 0; level < num_levels; level++) 3230 wm[level] = new[level]; 3231 3232 drm_modeset_unlock_all(dev); 3233 3234 return len; 3235 } 3236 3237 3238 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 3239 size_t len, loff_t *offp) 3240 { 3241 struct seq_file *m = file->private_data; 3242 struct drm_device *dev = m->private; 3243 3244 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency); 3245 } 3246 3247 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 3248 size_t len, loff_t *offp) 3249 { 3250 struct seq_file *m = file->private_data; 3251 struct drm_device *dev = m->private; 3252 3253 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency); 3254 } 3255 3256 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 3257 size_t len, loff_t *offp) 3258 { 3259 struct seq_file *m = file->private_data; 3260 struct drm_device *dev = m->private; 3261 3262 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency); 3263 } 3264 3265 static const struct file_operations i915_pri_wm_latency_fops = { 3266 .owner = THIS_MODULE, 3267 .open = pri_wm_latency_open, 3268 .read = seq_read, 3269 .llseek = seq_lseek, 3270 .release = single_release, 3271 .write = pri_wm_latency_write 3272 }; 3273 3274 static const struct file_operations i915_spr_wm_latency_fops = { 3275 .owner = THIS_MODULE, 3276 .open = spr_wm_latency_open, 3277 .read = seq_read, 3278 .llseek = seq_lseek, 3279 .release = single_release, 3280 .write = spr_wm_latency_write 3281 }; 3282 3283 static const struct file_operations i915_cur_wm_latency_fops = { 3284 .owner = THIS_MODULE, 3285 .open = cur_wm_latency_open, 3286 .read = seq_read, 3287 .llseek = seq_lseek, 3288 .release = single_release, 3289 .write = cur_wm_latency_write 3290 }; 3291 3292 static int 3293 i915_wedged_get(void *data, u64 *val) 3294 { 3295 struct drm_device *dev = data; 3296 struct drm_i915_private *dev_priv = dev->dev_private; 3297 3298 *val = atomic_read(&dev_priv->gpu_error.reset_counter); 3299 3300 return 0; 3301 } 3302 3303 static int 3304 i915_wedged_set(void *data, u64 val) 3305 { 3306 struct drm_device *dev = data; 3307 struct drm_i915_private *dev_priv = dev->dev_private; 3308 3309 intel_runtime_pm_get(dev_priv); 3310 3311 i915_handle_error(dev, val, 3312 "Manually setting wedged to %llu", val); 3313 3314 intel_runtime_pm_put(dev_priv); 3315 3316 return 0; 3317 } 3318 3319 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 3320 i915_wedged_get, i915_wedged_set, 3321 "%llu\n"); 3322 3323 static int 3324 i915_ring_stop_get(void *data, u64 *val) 3325 { 3326 struct drm_device *dev = data; 3327 struct drm_i915_private *dev_priv = dev->dev_private; 3328 3329 *val = dev_priv->gpu_error.stop_rings; 3330 3331 return 0; 3332 } 3333 3334 static int 3335 i915_ring_stop_set(void *data, u64 val) 3336 { 3337 struct drm_device *dev = data; 3338 struct drm_i915_private *dev_priv = dev->dev_private; 3339 int ret; 3340 3341 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); 3342 3343 ret = mutex_lock_interruptible(&dev->struct_mutex); 3344 if (ret) 3345 return ret; 3346 3347 dev_priv->gpu_error.stop_rings = val; 3348 mutex_unlock(&dev->struct_mutex); 3349 3350 return 0; 3351 } 3352 3353 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, 3354 i915_ring_stop_get, i915_ring_stop_set, 3355 "0x%08llx\n"); 3356 3357 static int 3358 i915_ring_missed_irq_get(void *data, u64 *val) 3359 { 3360 struct drm_device *dev = data; 3361 struct drm_i915_private *dev_priv = dev->dev_private; 3362 3363 *val = dev_priv->gpu_error.missed_irq_rings; 3364 return 0; 3365 } 3366 3367 static int 3368 i915_ring_missed_irq_set(void *data, u64 val) 3369 { 3370 struct drm_device *dev = data; 3371 struct drm_i915_private *dev_priv = dev->dev_private; 3372 int ret; 3373 3374 /* Lock against concurrent debugfs callers */ 3375 ret = mutex_lock_interruptible(&dev->struct_mutex); 3376 if (ret) 3377 return ret; 3378 dev_priv->gpu_error.missed_irq_rings = val; 3379 mutex_unlock(&dev->struct_mutex); 3380 3381 return 0; 3382 } 3383 3384 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 3385 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 3386 "0x%08llx\n"); 3387 3388 static int 3389 i915_ring_test_irq_get(void *data, u64 *val) 3390 { 3391 struct drm_device *dev = data; 3392 struct drm_i915_private *dev_priv = dev->dev_private; 3393 3394 *val = dev_priv->gpu_error.test_irq_rings; 3395 3396 return 0; 3397 } 3398 3399 static int 3400 i915_ring_test_irq_set(void *data, u64 val) 3401 { 3402 struct drm_device *dev = data; 3403 struct drm_i915_private *dev_priv = dev->dev_private; 3404 int ret; 3405 3406 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 3407 3408 /* Lock against concurrent debugfs callers */ 3409 ret = mutex_lock_interruptible(&dev->struct_mutex); 3410 if (ret) 3411 return ret; 3412 3413 dev_priv->gpu_error.test_irq_rings = val; 3414 mutex_unlock(&dev->struct_mutex); 3415 3416 return 0; 3417 } 3418 3419 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 3420 i915_ring_test_irq_get, i915_ring_test_irq_set, 3421 "0x%08llx\n"); 3422 3423 #define DROP_UNBOUND 0x1 3424 #define DROP_BOUND 0x2 3425 #define DROP_RETIRE 0x4 3426 #define DROP_ACTIVE 0x8 3427 #define DROP_ALL (DROP_UNBOUND | \ 3428 DROP_BOUND | \ 3429 DROP_RETIRE | \ 3430 DROP_ACTIVE) 3431 static int 3432 i915_drop_caches_get(void *data, u64 *val) 3433 { 3434 *val = DROP_ALL; 3435 3436 return 0; 3437 } 3438 3439 static int 3440 i915_drop_caches_set(void *data, u64 val) 3441 { 3442 struct drm_device *dev = data; 3443 struct drm_i915_private *dev_priv = dev->dev_private; 3444 struct drm_i915_gem_object *obj, *next; 3445 struct i915_address_space *vm; 3446 struct i915_vma *vma, *x; 3447 int ret; 3448 3449 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 3450 3451 /* No need to check and wait for gpu resets, only libdrm auto-restarts 3452 * on ioctls on -EAGAIN. */ 3453 ret = mutex_lock_interruptible(&dev->struct_mutex); 3454 if (ret) 3455 return ret; 3456 3457 if (val & DROP_ACTIVE) { 3458 ret = i915_gpu_idle(dev); 3459 if (ret) 3460 goto unlock; 3461 } 3462 3463 if (val & (DROP_RETIRE | DROP_ACTIVE)) 3464 i915_gem_retire_requests(dev); 3465 3466 if (val & DROP_BOUND) { 3467 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 3468 list_for_each_entry_safe(vma, x, &vm->inactive_list, 3469 mm_list) { 3470 if (vma->pin_count) 3471 continue; 3472 3473 ret = i915_vma_unbind(vma); 3474 if (ret) 3475 goto unlock; 3476 } 3477 } 3478 } 3479 3480 if (val & DROP_UNBOUND) { 3481 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, 3482 global_list) 3483 if (obj->pages_pin_count == 0) { 3484 ret = i915_gem_object_put_pages(obj); 3485 if (ret) 3486 goto unlock; 3487 } 3488 } 3489 3490 unlock: 3491 mutex_unlock(&dev->struct_mutex); 3492 3493 return ret; 3494 } 3495 3496 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 3497 i915_drop_caches_get, i915_drop_caches_set, 3498 "0x%08llx\n"); 3499 3500 static int 3501 i915_max_freq_get(void *data, u64 *val) 3502 { 3503 struct drm_device *dev = data; 3504 struct drm_i915_private *dev_priv = dev->dev_private; 3505 int ret; 3506 3507 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3508 return -ENODEV; 3509 3510 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3511 3512 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3513 if (ret) 3514 return ret; 3515 3516 if (IS_VALLEYVIEW(dev)) 3517 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); 3518 else 3519 *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER; 3520 mutex_unlock(&dev_priv->rps.hw_lock); 3521 3522 return 0; 3523 } 3524 3525 static int 3526 i915_max_freq_set(void *data, u64 val) 3527 { 3528 struct drm_device *dev = data; 3529 struct drm_i915_private *dev_priv = dev->dev_private; 3530 u32 rp_state_cap, hw_max, hw_min; 3531 int ret; 3532 3533 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3534 return -ENODEV; 3535 3536 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3537 3538 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 3539 3540 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3541 if (ret) 3542 return ret; 3543 3544 /* 3545 * Turbo will still be enabled, but won't go above the set value. 3546 */ 3547 if (IS_VALLEYVIEW(dev)) { 3548 val = vlv_freq_opcode(dev_priv, val); 3549 3550 hw_max = valleyview_rps_max_freq(dev_priv); 3551 hw_min = valleyview_rps_min_freq(dev_priv); 3552 } else { 3553 do_div(val, GT_FREQUENCY_MULTIPLIER); 3554 3555 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3556 hw_max = dev_priv->rps.max_freq; 3557 hw_min = (rp_state_cap >> 16) & 0xff; 3558 } 3559 3560 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { 3561 mutex_unlock(&dev_priv->rps.hw_lock); 3562 return -EINVAL; 3563 } 3564 3565 dev_priv->rps.max_freq_softlimit = val; 3566 3567 if (IS_VALLEYVIEW(dev)) 3568 valleyview_set_rps(dev, val); 3569 else 3570 gen6_set_rps(dev, val); 3571 3572 mutex_unlock(&dev_priv->rps.hw_lock); 3573 3574 return 0; 3575 } 3576 3577 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 3578 i915_max_freq_get, i915_max_freq_set, 3579 "%llu\n"); 3580 3581 static int 3582 i915_min_freq_get(void *data, u64 *val) 3583 { 3584 struct drm_device *dev = data; 3585 struct drm_i915_private *dev_priv = dev->dev_private; 3586 int ret; 3587 3588 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3589 return -ENODEV; 3590 3591 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3592 3593 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3594 if (ret) 3595 return ret; 3596 3597 if (IS_VALLEYVIEW(dev)) 3598 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); 3599 else 3600 *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER; 3601 mutex_unlock(&dev_priv->rps.hw_lock); 3602 3603 return 0; 3604 } 3605 3606 static int 3607 i915_min_freq_set(void *data, u64 val) 3608 { 3609 struct drm_device *dev = data; 3610 struct drm_i915_private *dev_priv = dev->dev_private; 3611 u32 rp_state_cap, hw_max, hw_min; 3612 int ret; 3613 3614 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3615 return -ENODEV; 3616 3617 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3618 3619 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 3620 3621 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3622 if (ret) 3623 return ret; 3624 3625 /* 3626 * Turbo will still be enabled, but won't go below the set value. 3627 */ 3628 if (IS_VALLEYVIEW(dev)) { 3629 val = vlv_freq_opcode(dev_priv, val); 3630 3631 hw_max = valleyview_rps_max_freq(dev_priv); 3632 hw_min = valleyview_rps_min_freq(dev_priv); 3633 } else { 3634 do_div(val, GT_FREQUENCY_MULTIPLIER); 3635 3636 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3637 hw_max = dev_priv->rps.max_freq; 3638 hw_min = (rp_state_cap >> 16) & 0xff; 3639 } 3640 3641 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { 3642 mutex_unlock(&dev_priv->rps.hw_lock); 3643 return -EINVAL; 3644 } 3645 3646 dev_priv->rps.min_freq_softlimit = val; 3647 3648 if (IS_VALLEYVIEW(dev)) 3649 valleyview_set_rps(dev, val); 3650 else 3651 gen6_set_rps(dev, val); 3652 3653 mutex_unlock(&dev_priv->rps.hw_lock); 3654 3655 return 0; 3656 } 3657 3658 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 3659 i915_min_freq_get, i915_min_freq_set, 3660 "%llu\n"); 3661 3662 static int 3663 i915_cache_sharing_get(void *data, u64 *val) 3664 { 3665 struct drm_device *dev = data; 3666 struct drm_i915_private *dev_priv = dev->dev_private; 3667 u32 snpcr; 3668 int ret; 3669 3670 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3671 return -ENODEV; 3672 3673 ret = mutex_lock_interruptible(&dev->struct_mutex); 3674 if (ret) 3675 return ret; 3676 intel_runtime_pm_get(dev_priv); 3677 3678 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 3679 3680 intel_runtime_pm_put(dev_priv); 3681 mutex_unlock(&dev_priv->dev->struct_mutex); 3682 3683 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 3684 3685 return 0; 3686 } 3687 3688 static int 3689 i915_cache_sharing_set(void *data, u64 val) 3690 { 3691 struct drm_device *dev = data; 3692 struct drm_i915_private *dev_priv = dev->dev_private; 3693 u32 snpcr; 3694 3695 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3696 return -ENODEV; 3697 3698 if (val > 3) 3699 return -EINVAL; 3700 3701 intel_runtime_pm_get(dev_priv); 3702 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 3703 3704 /* Update the cache sharing policy here as well */ 3705 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 3706 snpcr &= ~GEN6_MBC_SNPCR_MASK; 3707 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 3708 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 3709 3710 intel_runtime_pm_put(dev_priv); 3711 return 0; 3712 } 3713 3714 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 3715 i915_cache_sharing_get, i915_cache_sharing_set, 3716 "%llu\n"); 3717 3718 static int i915_forcewake_open(struct inode *inode, struct file *file) 3719 { 3720 struct drm_device *dev = inode->i_private; 3721 struct drm_i915_private *dev_priv = dev->dev_private; 3722 3723 if (INTEL_INFO(dev)->gen < 6) 3724 return 0; 3725 3726 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 3727 3728 return 0; 3729 } 3730 3731 static int i915_forcewake_release(struct inode *inode, struct file *file) 3732 { 3733 struct drm_device *dev = inode->i_private; 3734 struct drm_i915_private *dev_priv = dev->dev_private; 3735 3736 if (INTEL_INFO(dev)->gen < 6) 3737 return 0; 3738 3739 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3740 3741 return 0; 3742 } 3743 3744 static const struct file_operations i915_forcewake_fops = { 3745 .owner = THIS_MODULE, 3746 .open = i915_forcewake_open, 3747 .release = i915_forcewake_release, 3748 }; 3749 3750 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 3751 { 3752 struct drm_device *dev = minor->dev; 3753 struct dentry *ent; 3754 3755 ent = debugfs_create_file("i915_forcewake_user", 3756 S_IRUSR, 3757 root, dev, 3758 &i915_forcewake_fops); 3759 if (!ent) 3760 return -ENOMEM; 3761 3762 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 3763 } 3764 3765 static int i915_debugfs_create(struct dentry *root, 3766 struct drm_minor *minor, 3767 const char *name, 3768 const struct file_operations *fops) 3769 { 3770 struct drm_device *dev = minor->dev; 3771 struct dentry *ent; 3772 3773 ent = debugfs_create_file(name, 3774 S_IRUGO | S_IWUSR, 3775 root, dev, 3776 fops); 3777 if (!ent) 3778 return -ENOMEM; 3779 3780 return drm_add_fake_info_node(minor, ent, fops); 3781 } 3782 3783 static const struct drm_info_list i915_debugfs_list[] = { 3784 {"i915_capabilities", i915_capabilities, 0}, 3785 {"i915_gem_objects", i915_gem_object_info, 0}, 3786 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 3787 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 3788 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 3789 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 3790 {"i915_gem_stolen", i915_gem_stolen_list_info }, 3791 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 3792 {"i915_gem_request", i915_gem_request_info, 0}, 3793 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 3794 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 3795 {"i915_gem_interrupt", i915_interrupt_info, 0}, 3796 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 3797 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 3798 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 3799 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 3800 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 3801 {"i915_frequency_info", i915_frequency_info, 0}, 3802 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 3803 {"i915_inttoext_table", i915_inttoext_table, 0}, 3804 {"i915_drpc_info", i915_drpc_info, 0}, 3805 {"i915_emon_status", i915_emon_status, 0}, 3806 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 3807 {"i915_gfxec", i915_gfxec, 0}, 3808 {"i915_fbc_status", i915_fbc_status, 0}, 3809 {"i915_ips_status", i915_ips_status, 0}, 3810 {"i915_sr_status", i915_sr_status, 0}, 3811 {"i915_opregion", i915_opregion, 0}, 3812 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 3813 {"i915_context_status", i915_context_status, 0}, 3814 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 3815 {"i915_swizzle_info", i915_swizzle_info, 0}, 3816 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 3817 {"i915_llc", i915_llc, 0}, 3818 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 3819 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 3820 {"i915_energy_uJ", i915_energy_uJ, 0}, 3821 {"i915_pc8_status", i915_pc8_status, 0}, 3822 {"i915_power_domain_info", i915_power_domain_info, 0}, 3823 {"i915_display_info", i915_display_info, 0}, 3824 }; 3825 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 3826 3827 static const struct i915_debugfs_files { 3828 const char *name; 3829 const struct file_operations *fops; 3830 } i915_debugfs_files[] = { 3831 {"i915_wedged", &i915_wedged_fops}, 3832 {"i915_max_freq", &i915_max_freq_fops}, 3833 {"i915_min_freq", &i915_min_freq_fops}, 3834 {"i915_cache_sharing", &i915_cache_sharing_fops}, 3835 {"i915_ring_stop", &i915_ring_stop_fops}, 3836 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 3837 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 3838 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 3839 {"i915_error_state", &i915_error_state_fops}, 3840 {"i915_next_seqno", &i915_next_seqno_fops}, 3841 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 3842 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 3843 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 3844 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 3845 }; 3846 3847 void intel_display_crc_init(struct drm_device *dev) 3848 { 3849 struct drm_i915_private *dev_priv = dev->dev_private; 3850 enum pipe pipe; 3851 3852 for_each_pipe(pipe) { 3853 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 3854 3855 pipe_crc->opened = false; 3856 spin_lock_init(&pipe_crc->lock); 3857 init_waitqueue_head(&pipe_crc->wq); 3858 } 3859 } 3860 3861 int i915_debugfs_init(struct drm_minor *minor) 3862 { 3863 int ret, i; 3864 3865 ret = i915_forcewake_create(minor->debugfs_root, minor); 3866 if (ret) 3867 return ret; 3868 3869 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 3870 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); 3871 if (ret) 3872 return ret; 3873 } 3874 3875 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 3876 ret = i915_debugfs_create(minor->debugfs_root, minor, 3877 i915_debugfs_files[i].name, 3878 i915_debugfs_files[i].fops); 3879 if (ret) 3880 return ret; 3881 } 3882 3883 return drm_debugfs_create_files(i915_debugfs_list, 3884 I915_DEBUGFS_ENTRIES, 3885 minor->debugfs_root, minor); 3886 } 3887 3888 void i915_debugfs_cleanup(struct drm_minor *minor) 3889 { 3890 int i; 3891 3892 drm_debugfs_remove_files(i915_debugfs_list, 3893 I915_DEBUGFS_ENTRIES, minor); 3894 3895 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 3896 1, minor); 3897 3898 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 3899 struct drm_info_list *info_list = 3900 (struct drm_info_list *)&i915_pipe_crc_data[i]; 3901 3902 drm_debugfs_remove_files(info_list, 1, minor); 3903 } 3904 3905 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 3906 struct drm_info_list *info_list = 3907 (struct drm_info_list *) i915_debugfs_files[i].fops; 3908 3909 drm_debugfs_remove_files(info_list, 1, minor); 3910 } 3911 } 3912