1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/circ_buf.h> 31 #include <linux/ctype.h> 32 #include <linux/debugfs.h> 33 #include <linux/slab.h> 34 #include <linux/export.h> 35 #include <linux/list_sort.h> 36 #include <asm/msr-index.h> 37 #include <drm/drmP.h> 38 #include "intel_drv.h" 39 #include "intel_ringbuffer.h" 40 #include <drm/i915_drm.h> 41 #include "i915_drv.h" 42 43 enum { 44 ACTIVE_LIST, 45 INACTIVE_LIST, 46 PINNED_LIST, 47 }; 48 49 static const char *yesno(int v) 50 { 51 return v ? "yes" : "no"; 52 } 53 54 /* As the drm_debugfs_init() routines are called before dev->dev_private is 55 * allocated we need to hook into the minor for release. */ 56 static int 57 drm_add_fake_info_node(struct drm_minor *minor, 58 struct dentry *ent, 59 const void *key) 60 { 61 struct drm_info_node *node; 62 63 node = kmalloc(sizeof(*node), GFP_KERNEL); 64 if (node == NULL) { 65 debugfs_remove(ent); 66 return -ENOMEM; 67 } 68 69 node->minor = minor; 70 node->dent = ent; 71 node->info_ent = (void *) key; 72 73 mutex_lock(&minor->debugfs_lock); 74 list_add(&node->list, &minor->debugfs_list); 75 mutex_unlock(&minor->debugfs_lock); 76 77 return 0; 78 } 79 80 static int i915_capabilities(struct seq_file *m, void *data) 81 { 82 struct drm_info_node *node = (struct drm_info_node *) m->private; 83 struct drm_device *dev = node->minor->dev; 84 const struct intel_device_info *info = INTEL_INFO(dev); 85 86 seq_printf(m, "gen: %d\n", info->gen); 87 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 88 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 89 #define SEP_SEMICOLON ; 90 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON); 91 #undef PRINT_FLAG 92 #undef SEP_SEMICOLON 93 94 return 0; 95 } 96 97 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 98 { 99 if (obj->user_pin_count > 0) 100 return "P"; 101 else if (i915_gem_obj_is_pinned(obj)) 102 return "p"; 103 else 104 return " "; 105 } 106 107 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 108 { 109 switch (obj->tiling_mode) { 110 default: 111 case I915_TILING_NONE: return " "; 112 case I915_TILING_X: return "X"; 113 case I915_TILING_Y: return "Y"; 114 } 115 } 116 117 static inline const char *get_global_flag(struct drm_i915_gem_object *obj) 118 { 119 return obj->has_global_gtt_mapping ? "g" : " "; 120 } 121 122 static void 123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 124 { 125 struct i915_vma *vma; 126 int pin_count = 0; 127 128 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s", 129 &obj->base, 130 get_pin_flag(obj), 131 get_tiling_flag(obj), 132 get_global_flag(obj), 133 obj->base.size / 1024, 134 obj->base.read_domains, 135 obj->base.write_domain, 136 obj->last_read_seqno, 137 obj->last_write_seqno, 138 obj->last_fenced_seqno, 139 i915_cache_level_str(obj->cache_level), 140 obj->dirty ? " dirty" : "", 141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 142 if (obj->base.name) 143 seq_printf(m, " (name: %d)", obj->base.name); 144 list_for_each_entry(vma, &obj->vma_list, vma_link) 145 if (vma->pin_count > 0) 146 pin_count++; 147 seq_printf(m, " (pinned x %d)", pin_count); 148 if (obj->pin_display) 149 seq_printf(m, " (display)"); 150 if (obj->fence_reg != I915_FENCE_REG_NONE) 151 seq_printf(m, " (fence: %d)", obj->fence_reg); 152 list_for_each_entry(vma, &obj->vma_list, vma_link) { 153 if (!i915_is_ggtt(vma->vm)) 154 seq_puts(m, " (pp"); 155 else 156 seq_puts(m, " (g"); 157 seq_printf(m, "gtt offset: %08lx, size: %08lx)", 158 vma->node.start, vma->node.size); 159 } 160 if (obj->stolen) 161 seq_printf(m, " (stolen: %08lx)", obj->stolen->start); 162 if (obj->pin_mappable || obj->fault_mappable) { 163 char s[3], *t = s; 164 if (obj->pin_mappable) 165 *t++ = 'p'; 166 if (obj->fault_mappable) 167 *t++ = 'f'; 168 *t = '\0'; 169 seq_printf(m, " (%s mappable)", s); 170 } 171 if (obj->ring != NULL) 172 seq_printf(m, " (%s)", obj->ring->name); 173 } 174 175 static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx) 176 { 177 seq_putc(m, ctx->is_initialized ? 'I' : 'i'); 178 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 179 seq_putc(m, ' '); 180 } 181 182 static int i915_gem_object_list_info(struct seq_file *m, void *data) 183 { 184 struct drm_info_node *node = (struct drm_info_node *) m->private; 185 uintptr_t list = (uintptr_t) node->info_ent->data; 186 struct list_head *head; 187 struct drm_device *dev = node->minor->dev; 188 struct drm_i915_private *dev_priv = dev->dev_private; 189 struct i915_address_space *vm = &dev_priv->gtt.base; 190 struct i915_vma *vma; 191 size_t total_obj_size, total_gtt_size; 192 int count, ret; 193 194 ret = mutex_lock_interruptible(&dev->struct_mutex); 195 if (ret) 196 return ret; 197 198 /* FIXME: the user of this interface might want more than just GGTT */ 199 switch (list) { 200 case ACTIVE_LIST: 201 seq_puts(m, "Active:\n"); 202 head = &vm->active_list; 203 break; 204 case INACTIVE_LIST: 205 seq_puts(m, "Inactive:\n"); 206 head = &vm->inactive_list; 207 break; 208 default: 209 mutex_unlock(&dev->struct_mutex); 210 return -EINVAL; 211 } 212 213 total_obj_size = total_gtt_size = count = 0; 214 list_for_each_entry(vma, head, mm_list) { 215 seq_printf(m, " "); 216 describe_obj(m, vma->obj); 217 seq_printf(m, "\n"); 218 total_obj_size += vma->obj->base.size; 219 total_gtt_size += vma->node.size; 220 count++; 221 } 222 mutex_unlock(&dev->struct_mutex); 223 224 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 225 count, total_obj_size, total_gtt_size); 226 return 0; 227 } 228 229 static int obj_rank_by_stolen(void *priv, 230 struct list_head *A, struct list_head *B) 231 { 232 struct drm_i915_gem_object *a = 233 container_of(A, struct drm_i915_gem_object, obj_exec_link); 234 struct drm_i915_gem_object *b = 235 container_of(B, struct drm_i915_gem_object, obj_exec_link); 236 237 return a->stolen->start - b->stolen->start; 238 } 239 240 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 241 { 242 struct drm_info_node *node = (struct drm_info_node *) m->private; 243 struct drm_device *dev = node->minor->dev; 244 struct drm_i915_private *dev_priv = dev->dev_private; 245 struct drm_i915_gem_object *obj; 246 size_t total_obj_size, total_gtt_size; 247 LIST_HEAD(stolen); 248 int count, ret; 249 250 ret = mutex_lock_interruptible(&dev->struct_mutex); 251 if (ret) 252 return ret; 253 254 total_obj_size = total_gtt_size = count = 0; 255 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 256 if (obj->stolen == NULL) 257 continue; 258 259 list_add(&obj->obj_exec_link, &stolen); 260 261 total_obj_size += obj->base.size; 262 total_gtt_size += i915_gem_obj_ggtt_size(obj); 263 count++; 264 } 265 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 266 if (obj->stolen == NULL) 267 continue; 268 269 list_add(&obj->obj_exec_link, &stolen); 270 271 total_obj_size += obj->base.size; 272 count++; 273 } 274 list_sort(NULL, &stolen, obj_rank_by_stolen); 275 seq_puts(m, "Stolen:\n"); 276 while (!list_empty(&stolen)) { 277 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link); 278 seq_puts(m, " "); 279 describe_obj(m, obj); 280 seq_putc(m, '\n'); 281 list_del_init(&obj->obj_exec_link); 282 } 283 mutex_unlock(&dev->struct_mutex); 284 285 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 286 count, total_obj_size, total_gtt_size); 287 return 0; 288 } 289 290 #define count_objects(list, member) do { \ 291 list_for_each_entry(obj, list, member) { \ 292 size += i915_gem_obj_ggtt_size(obj); \ 293 ++count; \ 294 if (obj->map_and_fenceable) { \ 295 mappable_size += i915_gem_obj_ggtt_size(obj); \ 296 ++mappable_count; \ 297 } \ 298 } \ 299 } while (0) 300 301 struct file_stats { 302 struct drm_i915_file_private *file_priv; 303 int count; 304 size_t total, unbound; 305 size_t global, shared; 306 size_t active, inactive; 307 }; 308 309 static int per_file_stats(int id, void *ptr, void *data) 310 { 311 struct drm_i915_gem_object *obj = ptr; 312 struct file_stats *stats = data; 313 struct i915_vma *vma; 314 315 stats->count++; 316 stats->total += obj->base.size; 317 318 if (obj->base.name || obj->base.dma_buf) 319 stats->shared += obj->base.size; 320 321 if (USES_FULL_PPGTT(obj->base.dev)) { 322 list_for_each_entry(vma, &obj->vma_list, vma_link) { 323 struct i915_hw_ppgtt *ppgtt; 324 325 if (!drm_mm_node_allocated(&vma->node)) 326 continue; 327 328 if (i915_is_ggtt(vma->vm)) { 329 stats->global += obj->base.size; 330 continue; 331 } 332 333 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); 334 if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv) 335 continue; 336 337 if (obj->ring) /* XXX per-vma statistic */ 338 stats->active += obj->base.size; 339 else 340 stats->inactive += obj->base.size; 341 342 return 0; 343 } 344 } else { 345 if (i915_gem_obj_ggtt_bound(obj)) { 346 stats->global += obj->base.size; 347 if (obj->ring) 348 stats->active += obj->base.size; 349 else 350 stats->inactive += obj->base.size; 351 return 0; 352 } 353 } 354 355 if (!list_empty(&obj->global_list)) 356 stats->unbound += obj->base.size; 357 358 return 0; 359 } 360 361 #define count_vmas(list, member) do { \ 362 list_for_each_entry(vma, list, member) { \ 363 size += i915_gem_obj_ggtt_size(vma->obj); \ 364 ++count; \ 365 if (vma->obj->map_and_fenceable) { \ 366 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ 367 ++mappable_count; \ 368 } \ 369 } \ 370 } while (0) 371 372 static int i915_gem_object_info(struct seq_file *m, void* data) 373 { 374 struct drm_info_node *node = (struct drm_info_node *) m->private; 375 struct drm_device *dev = node->minor->dev; 376 struct drm_i915_private *dev_priv = dev->dev_private; 377 u32 count, mappable_count, purgeable_count; 378 size_t size, mappable_size, purgeable_size; 379 struct drm_i915_gem_object *obj; 380 struct i915_address_space *vm = &dev_priv->gtt.base; 381 struct drm_file *file; 382 struct i915_vma *vma; 383 int ret; 384 385 ret = mutex_lock_interruptible(&dev->struct_mutex); 386 if (ret) 387 return ret; 388 389 seq_printf(m, "%u objects, %zu bytes\n", 390 dev_priv->mm.object_count, 391 dev_priv->mm.object_memory); 392 393 size = count = mappable_size = mappable_count = 0; 394 count_objects(&dev_priv->mm.bound_list, global_list); 395 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 396 count, mappable_count, size, mappable_size); 397 398 size = count = mappable_size = mappable_count = 0; 399 count_vmas(&vm->active_list, mm_list); 400 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 401 count, mappable_count, size, mappable_size); 402 403 size = count = mappable_size = mappable_count = 0; 404 count_vmas(&vm->inactive_list, mm_list); 405 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 406 count, mappable_count, size, mappable_size); 407 408 size = count = purgeable_size = purgeable_count = 0; 409 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 410 size += obj->base.size, ++count; 411 if (obj->madv == I915_MADV_DONTNEED) 412 purgeable_size += obj->base.size, ++purgeable_count; 413 } 414 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); 415 416 size = count = mappable_size = mappable_count = 0; 417 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 418 if (obj->fault_mappable) { 419 size += i915_gem_obj_ggtt_size(obj); 420 ++count; 421 } 422 if (obj->pin_mappable) { 423 mappable_size += i915_gem_obj_ggtt_size(obj); 424 ++mappable_count; 425 } 426 if (obj->madv == I915_MADV_DONTNEED) { 427 purgeable_size += obj->base.size; 428 ++purgeable_count; 429 } 430 } 431 seq_printf(m, "%u purgeable objects, %zu bytes\n", 432 purgeable_count, purgeable_size); 433 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 434 mappable_count, mappable_size); 435 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 436 count, size); 437 438 seq_printf(m, "%zu [%lu] gtt total\n", 439 dev_priv->gtt.base.total, 440 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); 441 442 seq_putc(m, '\n'); 443 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 444 struct file_stats stats; 445 struct task_struct *task; 446 447 memset(&stats, 0, sizeof(stats)); 448 stats.file_priv = file->driver_priv; 449 idr_for_each(&file->object_idr, per_file_stats, &stats); 450 /* 451 * Although we have a valid reference on file->pid, that does 452 * not guarantee that the task_struct who called get_pid() is 453 * still alive (e.g. get_pid(current) => fork() => exit()). 454 * Therefore, we need to protect this ->comm access using RCU. 455 */ 456 rcu_read_lock(); 457 task = pid_task(file->pid, PIDTYPE_PID); 458 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", 459 task ? task->comm : "<unknown>", 460 stats.count, 461 stats.total, 462 stats.active, 463 stats.inactive, 464 stats.global, 465 stats.shared, 466 stats.unbound); 467 rcu_read_unlock(); 468 } 469 470 mutex_unlock(&dev->struct_mutex); 471 472 return 0; 473 } 474 475 static int i915_gem_gtt_info(struct seq_file *m, void *data) 476 { 477 struct drm_info_node *node = (struct drm_info_node *) m->private; 478 struct drm_device *dev = node->minor->dev; 479 uintptr_t list = (uintptr_t) node->info_ent->data; 480 struct drm_i915_private *dev_priv = dev->dev_private; 481 struct drm_i915_gem_object *obj; 482 size_t total_obj_size, total_gtt_size; 483 int count, ret; 484 485 ret = mutex_lock_interruptible(&dev->struct_mutex); 486 if (ret) 487 return ret; 488 489 total_obj_size = total_gtt_size = count = 0; 490 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 491 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj)) 492 continue; 493 494 seq_puts(m, " "); 495 describe_obj(m, obj); 496 seq_putc(m, '\n'); 497 total_obj_size += obj->base.size; 498 total_gtt_size += i915_gem_obj_ggtt_size(obj); 499 count++; 500 } 501 502 mutex_unlock(&dev->struct_mutex); 503 504 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 505 count, total_obj_size, total_gtt_size); 506 507 return 0; 508 } 509 510 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 511 { 512 struct drm_info_node *node = (struct drm_info_node *) m->private; 513 struct drm_device *dev = node->minor->dev; 514 unsigned long flags; 515 struct intel_crtc *crtc; 516 517 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 518 const char pipe = pipe_name(crtc->pipe); 519 const char plane = plane_name(crtc->plane); 520 struct intel_unpin_work *work; 521 522 spin_lock_irqsave(&dev->event_lock, flags); 523 work = crtc->unpin_work; 524 if (work == NULL) { 525 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 526 pipe, plane); 527 } else { 528 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 529 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 530 pipe, plane); 531 } else { 532 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 533 pipe, plane); 534 } 535 if (work->enable_stall_check) 536 seq_puts(m, "Stall check enabled, "); 537 else 538 seq_puts(m, "Stall check waiting for page flip ioctl, "); 539 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 540 541 if (work->old_fb_obj) { 542 struct drm_i915_gem_object *obj = work->old_fb_obj; 543 if (obj) 544 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n", 545 i915_gem_obj_ggtt_offset(obj)); 546 } 547 if (work->pending_flip_obj) { 548 struct drm_i915_gem_object *obj = work->pending_flip_obj; 549 if (obj) 550 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n", 551 i915_gem_obj_ggtt_offset(obj)); 552 } 553 } 554 spin_unlock_irqrestore(&dev->event_lock, flags); 555 } 556 557 return 0; 558 } 559 560 static int i915_gem_request_info(struct seq_file *m, void *data) 561 { 562 struct drm_info_node *node = (struct drm_info_node *) m->private; 563 struct drm_device *dev = node->minor->dev; 564 struct drm_i915_private *dev_priv = dev->dev_private; 565 struct intel_ring_buffer *ring; 566 struct drm_i915_gem_request *gem_request; 567 int ret, count, i; 568 569 ret = mutex_lock_interruptible(&dev->struct_mutex); 570 if (ret) 571 return ret; 572 573 count = 0; 574 for_each_ring(ring, dev_priv, i) { 575 if (list_empty(&ring->request_list)) 576 continue; 577 578 seq_printf(m, "%s requests:\n", ring->name); 579 list_for_each_entry(gem_request, 580 &ring->request_list, 581 list) { 582 seq_printf(m, " %d @ %d\n", 583 gem_request->seqno, 584 (int) (jiffies - gem_request->emitted_jiffies)); 585 } 586 count++; 587 } 588 mutex_unlock(&dev->struct_mutex); 589 590 if (count == 0) 591 seq_puts(m, "No requests\n"); 592 593 return 0; 594 } 595 596 static void i915_ring_seqno_info(struct seq_file *m, 597 struct intel_ring_buffer *ring) 598 { 599 if (ring->get_seqno) { 600 seq_printf(m, "Current sequence (%s): %u\n", 601 ring->name, ring->get_seqno(ring, false)); 602 } 603 } 604 605 static int i915_gem_seqno_info(struct seq_file *m, void *data) 606 { 607 struct drm_info_node *node = (struct drm_info_node *) m->private; 608 struct drm_device *dev = node->minor->dev; 609 struct drm_i915_private *dev_priv = dev->dev_private; 610 struct intel_ring_buffer *ring; 611 int ret, i; 612 613 ret = mutex_lock_interruptible(&dev->struct_mutex); 614 if (ret) 615 return ret; 616 intel_runtime_pm_get(dev_priv); 617 618 for_each_ring(ring, dev_priv, i) 619 i915_ring_seqno_info(m, ring); 620 621 intel_runtime_pm_put(dev_priv); 622 mutex_unlock(&dev->struct_mutex); 623 624 return 0; 625 } 626 627 628 static int i915_interrupt_info(struct seq_file *m, void *data) 629 { 630 struct drm_info_node *node = (struct drm_info_node *) m->private; 631 struct drm_device *dev = node->minor->dev; 632 struct drm_i915_private *dev_priv = dev->dev_private; 633 struct intel_ring_buffer *ring; 634 int ret, i, pipe; 635 636 ret = mutex_lock_interruptible(&dev->struct_mutex); 637 if (ret) 638 return ret; 639 intel_runtime_pm_get(dev_priv); 640 641 if (INTEL_INFO(dev)->gen >= 8) { 642 seq_printf(m, "Master Interrupt Control:\t%08x\n", 643 I915_READ(GEN8_MASTER_IRQ)); 644 645 for (i = 0; i < 4; i++) { 646 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 647 i, I915_READ(GEN8_GT_IMR(i))); 648 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 649 i, I915_READ(GEN8_GT_IIR(i))); 650 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 651 i, I915_READ(GEN8_GT_IER(i))); 652 } 653 654 for_each_pipe(pipe) { 655 seq_printf(m, "Pipe %c IMR:\t%08x\n", 656 pipe_name(pipe), 657 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 658 seq_printf(m, "Pipe %c IIR:\t%08x\n", 659 pipe_name(pipe), 660 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 661 seq_printf(m, "Pipe %c IER:\t%08x\n", 662 pipe_name(pipe), 663 I915_READ(GEN8_DE_PIPE_IER(pipe))); 664 } 665 666 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 667 I915_READ(GEN8_DE_PORT_IMR)); 668 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 669 I915_READ(GEN8_DE_PORT_IIR)); 670 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 671 I915_READ(GEN8_DE_PORT_IER)); 672 673 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 674 I915_READ(GEN8_DE_MISC_IMR)); 675 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 676 I915_READ(GEN8_DE_MISC_IIR)); 677 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 678 I915_READ(GEN8_DE_MISC_IER)); 679 680 seq_printf(m, "PCU interrupt mask:\t%08x\n", 681 I915_READ(GEN8_PCU_IMR)); 682 seq_printf(m, "PCU interrupt identity:\t%08x\n", 683 I915_READ(GEN8_PCU_IIR)); 684 seq_printf(m, "PCU interrupt enable:\t%08x\n", 685 I915_READ(GEN8_PCU_IER)); 686 } else if (IS_VALLEYVIEW(dev)) { 687 seq_printf(m, "Display IER:\t%08x\n", 688 I915_READ(VLV_IER)); 689 seq_printf(m, "Display IIR:\t%08x\n", 690 I915_READ(VLV_IIR)); 691 seq_printf(m, "Display IIR_RW:\t%08x\n", 692 I915_READ(VLV_IIR_RW)); 693 seq_printf(m, "Display IMR:\t%08x\n", 694 I915_READ(VLV_IMR)); 695 for_each_pipe(pipe) 696 seq_printf(m, "Pipe %c stat:\t%08x\n", 697 pipe_name(pipe), 698 I915_READ(PIPESTAT(pipe))); 699 700 seq_printf(m, "Master IER:\t%08x\n", 701 I915_READ(VLV_MASTER_IER)); 702 703 seq_printf(m, "Render IER:\t%08x\n", 704 I915_READ(GTIER)); 705 seq_printf(m, "Render IIR:\t%08x\n", 706 I915_READ(GTIIR)); 707 seq_printf(m, "Render IMR:\t%08x\n", 708 I915_READ(GTIMR)); 709 710 seq_printf(m, "PM IER:\t\t%08x\n", 711 I915_READ(GEN6_PMIER)); 712 seq_printf(m, "PM IIR:\t\t%08x\n", 713 I915_READ(GEN6_PMIIR)); 714 seq_printf(m, "PM IMR:\t\t%08x\n", 715 I915_READ(GEN6_PMIMR)); 716 717 seq_printf(m, "Port hotplug:\t%08x\n", 718 I915_READ(PORT_HOTPLUG_EN)); 719 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 720 I915_READ(VLV_DPFLIPSTAT)); 721 seq_printf(m, "DPINVGTT:\t%08x\n", 722 I915_READ(DPINVGTT)); 723 724 } else if (!HAS_PCH_SPLIT(dev)) { 725 seq_printf(m, "Interrupt enable: %08x\n", 726 I915_READ(IER)); 727 seq_printf(m, "Interrupt identity: %08x\n", 728 I915_READ(IIR)); 729 seq_printf(m, "Interrupt mask: %08x\n", 730 I915_READ(IMR)); 731 for_each_pipe(pipe) 732 seq_printf(m, "Pipe %c stat: %08x\n", 733 pipe_name(pipe), 734 I915_READ(PIPESTAT(pipe))); 735 } else { 736 seq_printf(m, "North Display Interrupt enable: %08x\n", 737 I915_READ(DEIER)); 738 seq_printf(m, "North Display Interrupt identity: %08x\n", 739 I915_READ(DEIIR)); 740 seq_printf(m, "North Display Interrupt mask: %08x\n", 741 I915_READ(DEIMR)); 742 seq_printf(m, "South Display Interrupt enable: %08x\n", 743 I915_READ(SDEIER)); 744 seq_printf(m, "South Display Interrupt identity: %08x\n", 745 I915_READ(SDEIIR)); 746 seq_printf(m, "South Display Interrupt mask: %08x\n", 747 I915_READ(SDEIMR)); 748 seq_printf(m, "Graphics Interrupt enable: %08x\n", 749 I915_READ(GTIER)); 750 seq_printf(m, "Graphics Interrupt identity: %08x\n", 751 I915_READ(GTIIR)); 752 seq_printf(m, "Graphics Interrupt mask: %08x\n", 753 I915_READ(GTIMR)); 754 } 755 for_each_ring(ring, dev_priv, i) { 756 if (INTEL_INFO(dev)->gen >= 6) { 757 seq_printf(m, 758 "Graphics Interrupt mask (%s): %08x\n", 759 ring->name, I915_READ_IMR(ring)); 760 } 761 i915_ring_seqno_info(m, ring); 762 } 763 intel_runtime_pm_put(dev_priv); 764 mutex_unlock(&dev->struct_mutex); 765 766 return 0; 767 } 768 769 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 770 { 771 struct drm_info_node *node = (struct drm_info_node *) m->private; 772 struct drm_device *dev = node->minor->dev; 773 struct drm_i915_private *dev_priv = dev->dev_private; 774 int i, ret; 775 776 ret = mutex_lock_interruptible(&dev->struct_mutex); 777 if (ret) 778 return ret; 779 780 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 781 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 782 for (i = 0; i < dev_priv->num_fence_regs; i++) { 783 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 784 785 seq_printf(m, "Fence %d, pin count = %d, object = ", 786 i, dev_priv->fence_regs[i].pin_count); 787 if (obj == NULL) 788 seq_puts(m, "unused"); 789 else 790 describe_obj(m, obj); 791 seq_putc(m, '\n'); 792 } 793 794 mutex_unlock(&dev->struct_mutex); 795 return 0; 796 } 797 798 static int i915_hws_info(struct seq_file *m, void *data) 799 { 800 struct drm_info_node *node = (struct drm_info_node *) m->private; 801 struct drm_device *dev = node->minor->dev; 802 struct drm_i915_private *dev_priv = dev->dev_private; 803 struct intel_ring_buffer *ring; 804 const u32 *hws; 805 int i; 806 807 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 808 hws = ring->status_page.page_addr; 809 if (hws == NULL) 810 return 0; 811 812 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 813 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 814 i * 4, 815 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 816 } 817 return 0; 818 } 819 820 static ssize_t 821 i915_error_state_write(struct file *filp, 822 const char __user *ubuf, 823 size_t cnt, 824 loff_t *ppos) 825 { 826 struct i915_error_state_file_priv *error_priv = filp->private_data; 827 struct drm_device *dev = error_priv->dev; 828 int ret; 829 830 DRM_DEBUG_DRIVER("Resetting error state\n"); 831 832 ret = mutex_lock_interruptible(&dev->struct_mutex); 833 if (ret) 834 return ret; 835 836 i915_destroy_error_state(dev); 837 mutex_unlock(&dev->struct_mutex); 838 839 return cnt; 840 } 841 842 static int i915_error_state_open(struct inode *inode, struct file *file) 843 { 844 struct drm_device *dev = inode->i_private; 845 struct i915_error_state_file_priv *error_priv; 846 847 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 848 if (!error_priv) 849 return -ENOMEM; 850 851 error_priv->dev = dev; 852 853 i915_error_state_get(dev, error_priv); 854 855 file->private_data = error_priv; 856 857 return 0; 858 } 859 860 static int i915_error_state_release(struct inode *inode, struct file *file) 861 { 862 struct i915_error_state_file_priv *error_priv = file->private_data; 863 864 i915_error_state_put(error_priv); 865 kfree(error_priv); 866 867 return 0; 868 } 869 870 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, 871 size_t count, loff_t *pos) 872 { 873 struct i915_error_state_file_priv *error_priv = file->private_data; 874 struct drm_i915_error_state_buf error_str; 875 loff_t tmp_pos = 0; 876 ssize_t ret_count = 0; 877 int ret; 878 879 ret = i915_error_state_buf_init(&error_str, count, *pos); 880 if (ret) 881 return ret; 882 883 ret = i915_error_state_to_str(&error_str, error_priv); 884 if (ret) 885 goto out; 886 887 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, 888 error_str.buf, 889 error_str.bytes); 890 891 if (ret_count < 0) 892 ret = ret_count; 893 else 894 *pos = error_str.start + ret_count; 895 out: 896 i915_error_state_buf_release(&error_str); 897 return ret ?: ret_count; 898 } 899 900 static const struct file_operations i915_error_state_fops = { 901 .owner = THIS_MODULE, 902 .open = i915_error_state_open, 903 .read = i915_error_state_read, 904 .write = i915_error_state_write, 905 .llseek = default_llseek, 906 .release = i915_error_state_release, 907 }; 908 909 static int 910 i915_next_seqno_get(void *data, u64 *val) 911 { 912 struct drm_device *dev = data; 913 struct drm_i915_private *dev_priv = dev->dev_private; 914 int ret; 915 916 ret = mutex_lock_interruptible(&dev->struct_mutex); 917 if (ret) 918 return ret; 919 920 *val = dev_priv->next_seqno; 921 mutex_unlock(&dev->struct_mutex); 922 923 return 0; 924 } 925 926 static int 927 i915_next_seqno_set(void *data, u64 val) 928 { 929 struct drm_device *dev = data; 930 int ret; 931 932 ret = mutex_lock_interruptible(&dev->struct_mutex); 933 if (ret) 934 return ret; 935 936 ret = i915_gem_set_seqno(dev, val); 937 mutex_unlock(&dev->struct_mutex); 938 939 return ret; 940 } 941 942 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 943 i915_next_seqno_get, i915_next_seqno_set, 944 "0x%llx\n"); 945 946 static int i915_rstdby_delays(struct seq_file *m, void *unused) 947 { 948 struct drm_info_node *node = (struct drm_info_node *) m->private; 949 struct drm_device *dev = node->minor->dev; 950 struct drm_i915_private *dev_priv = dev->dev_private; 951 u16 crstanddelay; 952 int ret; 953 954 ret = mutex_lock_interruptible(&dev->struct_mutex); 955 if (ret) 956 return ret; 957 intel_runtime_pm_get(dev_priv); 958 959 crstanddelay = I915_READ16(CRSTANDVID); 960 961 intel_runtime_pm_put(dev_priv); 962 mutex_unlock(&dev->struct_mutex); 963 964 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 965 966 return 0; 967 } 968 969 static int i915_cur_delayinfo(struct seq_file *m, void *unused) 970 { 971 struct drm_info_node *node = (struct drm_info_node *) m->private; 972 struct drm_device *dev = node->minor->dev; 973 struct drm_i915_private *dev_priv = dev->dev_private; 974 int ret = 0; 975 976 intel_runtime_pm_get(dev_priv); 977 978 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 979 980 if (IS_GEN5(dev)) { 981 u16 rgvswctl = I915_READ16(MEMSWCTL); 982 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 983 984 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 985 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 986 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 987 MEMSTAT_VID_SHIFT); 988 seq_printf(m, "Current P-state: %d\n", 989 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 990 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { 991 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 992 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 993 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 994 u32 rpstat, cagf, reqf; 995 u32 rpupei, rpcurup, rpprevup; 996 u32 rpdownei, rpcurdown, rpprevdown; 997 int max_freq; 998 999 /* RPSTAT1 is in the GT power well */ 1000 ret = mutex_lock_interruptible(&dev->struct_mutex); 1001 if (ret) 1002 goto out; 1003 1004 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 1005 1006 reqf = I915_READ(GEN6_RPNSWREQ); 1007 reqf &= ~GEN6_TURBO_DISABLE; 1008 if (IS_HASWELL(dev)) 1009 reqf >>= 24; 1010 else 1011 reqf >>= 25; 1012 reqf *= GT_FREQUENCY_MULTIPLIER; 1013 1014 rpstat = I915_READ(GEN6_RPSTAT1); 1015 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 1016 rpcurup = I915_READ(GEN6_RP_CUR_UP); 1017 rpprevup = I915_READ(GEN6_RP_PREV_UP); 1018 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 1019 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 1020 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 1021 if (IS_HASWELL(dev)) 1022 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1023 else 1024 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1025 cagf *= GT_FREQUENCY_MULTIPLIER; 1026 1027 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 1028 mutex_unlock(&dev->struct_mutex); 1029 1030 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1031 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1032 seq_printf(m, "Render p-state ratio: %d\n", 1033 (gt_perf_status & 0xff00) >> 8); 1034 seq_printf(m, "Render p-state VID: %d\n", 1035 gt_perf_status & 0xff); 1036 seq_printf(m, "Render p-state limit: %d\n", 1037 rp_state_limits & 0xff); 1038 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1039 seq_printf(m, "CAGF: %dMHz\n", cagf); 1040 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 1041 GEN6_CURICONT_MASK); 1042 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 1043 GEN6_CURBSYTAVG_MASK); 1044 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 1045 GEN6_CURBSYTAVG_MASK); 1046 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 1047 GEN6_CURIAVG_MASK); 1048 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 1049 GEN6_CURBSYTAVG_MASK); 1050 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 1051 GEN6_CURBSYTAVG_MASK); 1052 1053 max_freq = (rp_state_cap & 0xff0000) >> 16; 1054 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1055 max_freq * GT_FREQUENCY_MULTIPLIER); 1056 1057 max_freq = (rp_state_cap & 0xff00) >> 8; 1058 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1059 max_freq * GT_FREQUENCY_MULTIPLIER); 1060 1061 max_freq = rp_state_cap & 0xff; 1062 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1063 max_freq * GT_FREQUENCY_MULTIPLIER); 1064 1065 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1066 dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER); 1067 } else if (IS_VALLEYVIEW(dev)) { 1068 u32 freq_sts, val; 1069 1070 mutex_lock(&dev_priv->rps.hw_lock); 1071 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1072 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1073 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1074 1075 val = valleyview_rps_max_freq(dev_priv); 1076 seq_printf(m, "max GPU freq: %d MHz\n", 1077 vlv_gpu_freq(dev_priv, val)); 1078 1079 val = valleyview_rps_min_freq(dev_priv); 1080 seq_printf(m, "min GPU freq: %d MHz\n", 1081 vlv_gpu_freq(dev_priv, val)); 1082 1083 seq_printf(m, "current GPU freq: %d MHz\n", 1084 vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1085 mutex_unlock(&dev_priv->rps.hw_lock); 1086 } else { 1087 seq_puts(m, "no P-state info available\n"); 1088 } 1089 1090 out: 1091 intel_runtime_pm_put(dev_priv); 1092 return ret; 1093 } 1094 1095 static int i915_delayfreq_table(struct seq_file *m, void *unused) 1096 { 1097 struct drm_info_node *node = (struct drm_info_node *) m->private; 1098 struct drm_device *dev = node->minor->dev; 1099 struct drm_i915_private *dev_priv = dev->dev_private; 1100 u32 delayfreq; 1101 int ret, i; 1102 1103 ret = mutex_lock_interruptible(&dev->struct_mutex); 1104 if (ret) 1105 return ret; 1106 intel_runtime_pm_get(dev_priv); 1107 1108 for (i = 0; i < 16; i++) { 1109 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 1110 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 1111 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 1112 } 1113 1114 intel_runtime_pm_put(dev_priv); 1115 1116 mutex_unlock(&dev->struct_mutex); 1117 1118 return 0; 1119 } 1120 1121 static inline int MAP_TO_MV(int map) 1122 { 1123 return 1250 - (map * 25); 1124 } 1125 1126 static int i915_inttoext_table(struct seq_file *m, void *unused) 1127 { 1128 struct drm_info_node *node = (struct drm_info_node *) m->private; 1129 struct drm_device *dev = node->minor->dev; 1130 struct drm_i915_private *dev_priv = dev->dev_private; 1131 u32 inttoext; 1132 int ret, i; 1133 1134 ret = mutex_lock_interruptible(&dev->struct_mutex); 1135 if (ret) 1136 return ret; 1137 intel_runtime_pm_get(dev_priv); 1138 1139 for (i = 1; i <= 32; i++) { 1140 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 1141 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 1142 } 1143 1144 intel_runtime_pm_put(dev_priv); 1145 mutex_unlock(&dev->struct_mutex); 1146 1147 return 0; 1148 } 1149 1150 static int ironlake_drpc_info(struct seq_file *m) 1151 { 1152 struct drm_info_node *node = (struct drm_info_node *) m->private; 1153 struct drm_device *dev = node->minor->dev; 1154 struct drm_i915_private *dev_priv = dev->dev_private; 1155 u32 rgvmodectl, rstdbyctl; 1156 u16 crstandvid; 1157 int ret; 1158 1159 ret = mutex_lock_interruptible(&dev->struct_mutex); 1160 if (ret) 1161 return ret; 1162 intel_runtime_pm_get(dev_priv); 1163 1164 rgvmodectl = I915_READ(MEMMODECTL); 1165 rstdbyctl = I915_READ(RSTDBYCTL); 1166 crstandvid = I915_READ16(CRSTANDVID); 1167 1168 intel_runtime_pm_put(dev_priv); 1169 mutex_unlock(&dev->struct_mutex); 1170 1171 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1172 "yes" : "no"); 1173 seq_printf(m, "Boost freq: %d\n", 1174 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1175 MEMMODE_BOOST_FREQ_SHIFT); 1176 seq_printf(m, "HW control enabled: %s\n", 1177 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 1178 seq_printf(m, "SW control enabled: %s\n", 1179 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 1180 seq_printf(m, "Gated voltage change: %s\n", 1181 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 1182 seq_printf(m, "Starting frequency: P%d\n", 1183 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1184 seq_printf(m, "Max P-state: P%d\n", 1185 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1186 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1187 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1188 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1189 seq_printf(m, "Render standby enabled: %s\n", 1190 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1191 seq_puts(m, "Current RS state: "); 1192 switch (rstdbyctl & RSX_STATUS_MASK) { 1193 case RSX_STATUS_ON: 1194 seq_puts(m, "on\n"); 1195 break; 1196 case RSX_STATUS_RC1: 1197 seq_puts(m, "RC1\n"); 1198 break; 1199 case RSX_STATUS_RC1E: 1200 seq_puts(m, "RC1E\n"); 1201 break; 1202 case RSX_STATUS_RS1: 1203 seq_puts(m, "RS1\n"); 1204 break; 1205 case RSX_STATUS_RS2: 1206 seq_puts(m, "RS2 (RC6)\n"); 1207 break; 1208 case RSX_STATUS_RS3: 1209 seq_puts(m, "RC3 (RC6+)\n"); 1210 break; 1211 default: 1212 seq_puts(m, "unknown\n"); 1213 break; 1214 } 1215 1216 return 0; 1217 } 1218 1219 static int vlv_drpc_info(struct seq_file *m) 1220 { 1221 1222 struct drm_info_node *node = (struct drm_info_node *) m->private; 1223 struct drm_device *dev = node->minor->dev; 1224 struct drm_i915_private *dev_priv = dev->dev_private; 1225 u32 rpmodectl1, rcctl1; 1226 unsigned fw_rendercount = 0, fw_mediacount = 0; 1227 1228 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1229 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1230 1231 seq_printf(m, "Video Turbo Mode: %s\n", 1232 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1233 seq_printf(m, "Turbo enabled: %s\n", 1234 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1235 seq_printf(m, "HW control enabled: %s\n", 1236 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1237 seq_printf(m, "SW control enabled: %s\n", 1238 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1239 GEN6_RP_MEDIA_SW_MODE)); 1240 seq_printf(m, "RC6 Enabled: %s\n", 1241 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1242 GEN6_RC_CTL_EI_MODE(1)))); 1243 seq_printf(m, "Render Power Well: %s\n", 1244 (I915_READ(VLV_GTLC_PW_STATUS) & 1245 VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1246 seq_printf(m, "Media Power Well: %s\n", 1247 (I915_READ(VLV_GTLC_PW_STATUS) & 1248 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1249 1250 spin_lock_irq(&dev_priv->uncore.lock); 1251 fw_rendercount = dev_priv->uncore.fw_rendercount; 1252 fw_mediacount = dev_priv->uncore.fw_mediacount; 1253 spin_unlock_irq(&dev_priv->uncore.lock); 1254 1255 seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount); 1256 seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount); 1257 1258 1259 return 0; 1260 } 1261 1262 1263 static int gen6_drpc_info(struct seq_file *m) 1264 { 1265 1266 struct drm_info_node *node = (struct drm_info_node *) m->private; 1267 struct drm_device *dev = node->minor->dev; 1268 struct drm_i915_private *dev_priv = dev->dev_private; 1269 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1270 unsigned forcewake_count; 1271 int count = 0, ret; 1272 1273 ret = mutex_lock_interruptible(&dev->struct_mutex); 1274 if (ret) 1275 return ret; 1276 intel_runtime_pm_get(dev_priv); 1277 1278 spin_lock_irq(&dev_priv->uncore.lock); 1279 forcewake_count = dev_priv->uncore.forcewake_count; 1280 spin_unlock_irq(&dev_priv->uncore.lock); 1281 1282 if (forcewake_count) { 1283 seq_puts(m, "RC information inaccurate because somebody " 1284 "holds a forcewake reference \n"); 1285 } else { 1286 /* NB: we cannot use forcewake, else we read the wrong values */ 1287 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1288 udelay(10); 1289 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1290 } 1291 1292 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1293 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1294 1295 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1296 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1297 mutex_unlock(&dev->struct_mutex); 1298 mutex_lock(&dev_priv->rps.hw_lock); 1299 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1300 mutex_unlock(&dev_priv->rps.hw_lock); 1301 1302 intel_runtime_pm_put(dev_priv); 1303 1304 seq_printf(m, "Video Turbo Mode: %s\n", 1305 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1306 seq_printf(m, "HW control enabled: %s\n", 1307 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1308 seq_printf(m, "SW control enabled: %s\n", 1309 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1310 GEN6_RP_MEDIA_SW_MODE)); 1311 seq_printf(m, "RC1e Enabled: %s\n", 1312 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1313 seq_printf(m, "RC6 Enabled: %s\n", 1314 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1315 seq_printf(m, "Deep RC6 Enabled: %s\n", 1316 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1317 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1318 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1319 seq_puts(m, "Current RC state: "); 1320 switch (gt_core_status & GEN6_RCn_MASK) { 1321 case GEN6_RC0: 1322 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1323 seq_puts(m, "Core Power Down\n"); 1324 else 1325 seq_puts(m, "on\n"); 1326 break; 1327 case GEN6_RC3: 1328 seq_puts(m, "RC3\n"); 1329 break; 1330 case GEN6_RC6: 1331 seq_puts(m, "RC6\n"); 1332 break; 1333 case GEN6_RC7: 1334 seq_puts(m, "RC7\n"); 1335 break; 1336 default: 1337 seq_puts(m, "Unknown\n"); 1338 break; 1339 } 1340 1341 seq_printf(m, "Core Power Down: %s\n", 1342 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1343 1344 /* Not exactly sure what this is */ 1345 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1346 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1347 seq_printf(m, "RC6 residency since boot: %u\n", 1348 I915_READ(GEN6_GT_GFX_RC6)); 1349 seq_printf(m, "RC6+ residency since boot: %u\n", 1350 I915_READ(GEN6_GT_GFX_RC6p)); 1351 seq_printf(m, "RC6++ residency since boot: %u\n", 1352 I915_READ(GEN6_GT_GFX_RC6pp)); 1353 1354 seq_printf(m, "RC6 voltage: %dmV\n", 1355 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1356 seq_printf(m, "RC6+ voltage: %dmV\n", 1357 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1358 seq_printf(m, "RC6++ voltage: %dmV\n", 1359 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1360 return 0; 1361 } 1362 1363 static int i915_drpc_info(struct seq_file *m, void *unused) 1364 { 1365 struct drm_info_node *node = (struct drm_info_node *) m->private; 1366 struct drm_device *dev = node->minor->dev; 1367 1368 if (IS_VALLEYVIEW(dev)) 1369 return vlv_drpc_info(m); 1370 else if (IS_GEN6(dev) || IS_GEN7(dev)) 1371 return gen6_drpc_info(m); 1372 else 1373 return ironlake_drpc_info(m); 1374 } 1375 1376 static int i915_fbc_status(struct seq_file *m, void *unused) 1377 { 1378 struct drm_info_node *node = (struct drm_info_node *) m->private; 1379 struct drm_device *dev = node->minor->dev; 1380 struct drm_i915_private *dev_priv = dev->dev_private; 1381 1382 if (!HAS_FBC(dev)) { 1383 seq_puts(m, "FBC unsupported on this chipset\n"); 1384 return 0; 1385 } 1386 1387 intel_runtime_pm_get(dev_priv); 1388 1389 if (intel_fbc_enabled(dev)) { 1390 seq_puts(m, "FBC enabled\n"); 1391 } else { 1392 seq_puts(m, "FBC disabled: "); 1393 switch (dev_priv->fbc.no_fbc_reason) { 1394 case FBC_OK: 1395 seq_puts(m, "FBC actived, but currently disabled in hardware"); 1396 break; 1397 case FBC_UNSUPPORTED: 1398 seq_puts(m, "unsupported by this chipset"); 1399 break; 1400 case FBC_NO_OUTPUT: 1401 seq_puts(m, "no outputs"); 1402 break; 1403 case FBC_STOLEN_TOO_SMALL: 1404 seq_puts(m, "not enough stolen memory"); 1405 break; 1406 case FBC_UNSUPPORTED_MODE: 1407 seq_puts(m, "mode not supported"); 1408 break; 1409 case FBC_MODE_TOO_LARGE: 1410 seq_puts(m, "mode too large"); 1411 break; 1412 case FBC_BAD_PLANE: 1413 seq_puts(m, "FBC unsupported on plane"); 1414 break; 1415 case FBC_NOT_TILED: 1416 seq_puts(m, "scanout buffer not tiled"); 1417 break; 1418 case FBC_MULTIPLE_PIPES: 1419 seq_puts(m, "multiple pipes are enabled"); 1420 break; 1421 case FBC_MODULE_PARAM: 1422 seq_puts(m, "disabled per module param (default off)"); 1423 break; 1424 case FBC_CHIP_DEFAULT: 1425 seq_puts(m, "disabled per chip default"); 1426 break; 1427 default: 1428 seq_puts(m, "unknown reason"); 1429 } 1430 seq_putc(m, '\n'); 1431 } 1432 1433 intel_runtime_pm_put(dev_priv); 1434 1435 return 0; 1436 } 1437 1438 static int i915_ips_status(struct seq_file *m, void *unused) 1439 { 1440 struct drm_info_node *node = (struct drm_info_node *) m->private; 1441 struct drm_device *dev = node->minor->dev; 1442 struct drm_i915_private *dev_priv = dev->dev_private; 1443 1444 if (!HAS_IPS(dev)) { 1445 seq_puts(m, "not supported\n"); 1446 return 0; 1447 } 1448 1449 intel_runtime_pm_get(dev_priv); 1450 1451 if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE) 1452 seq_puts(m, "enabled\n"); 1453 else 1454 seq_puts(m, "disabled\n"); 1455 1456 intel_runtime_pm_put(dev_priv); 1457 1458 return 0; 1459 } 1460 1461 static int i915_sr_status(struct seq_file *m, void *unused) 1462 { 1463 struct drm_info_node *node = (struct drm_info_node *) m->private; 1464 struct drm_device *dev = node->minor->dev; 1465 struct drm_i915_private *dev_priv = dev->dev_private; 1466 bool sr_enabled = false; 1467 1468 intel_runtime_pm_get(dev_priv); 1469 1470 if (HAS_PCH_SPLIT(dev)) 1471 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1472 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1473 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1474 else if (IS_I915GM(dev)) 1475 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1476 else if (IS_PINEVIEW(dev)) 1477 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1478 1479 intel_runtime_pm_put(dev_priv); 1480 1481 seq_printf(m, "self-refresh: %s\n", 1482 sr_enabled ? "enabled" : "disabled"); 1483 1484 return 0; 1485 } 1486 1487 static int i915_emon_status(struct seq_file *m, void *unused) 1488 { 1489 struct drm_info_node *node = (struct drm_info_node *) m->private; 1490 struct drm_device *dev = node->minor->dev; 1491 struct drm_i915_private *dev_priv = dev->dev_private; 1492 unsigned long temp, chipset, gfx; 1493 int ret; 1494 1495 if (!IS_GEN5(dev)) 1496 return -ENODEV; 1497 1498 ret = mutex_lock_interruptible(&dev->struct_mutex); 1499 if (ret) 1500 return ret; 1501 1502 temp = i915_mch_val(dev_priv); 1503 chipset = i915_chipset_val(dev_priv); 1504 gfx = i915_gfx_val(dev_priv); 1505 mutex_unlock(&dev->struct_mutex); 1506 1507 seq_printf(m, "GMCH temp: %ld\n", temp); 1508 seq_printf(m, "Chipset power: %ld\n", chipset); 1509 seq_printf(m, "GFX power: %ld\n", gfx); 1510 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1511 1512 return 0; 1513 } 1514 1515 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1516 { 1517 struct drm_info_node *node = (struct drm_info_node *) m->private; 1518 struct drm_device *dev = node->minor->dev; 1519 struct drm_i915_private *dev_priv = dev->dev_private; 1520 int ret = 0; 1521 int gpu_freq, ia_freq; 1522 1523 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1524 seq_puts(m, "unsupported on this chipset\n"); 1525 return 0; 1526 } 1527 1528 intel_runtime_pm_get(dev_priv); 1529 1530 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1531 1532 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1533 if (ret) 1534 goto out; 1535 1536 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1537 1538 for (gpu_freq = dev_priv->rps.min_freq_softlimit; 1539 gpu_freq <= dev_priv->rps.max_freq_softlimit; 1540 gpu_freq++) { 1541 ia_freq = gpu_freq; 1542 sandybridge_pcode_read(dev_priv, 1543 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1544 &ia_freq); 1545 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1546 gpu_freq * GT_FREQUENCY_MULTIPLIER, 1547 ((ia_freq >> 0) & 0xff) * 100, 1548 ((ia_freq >> 8) & 0xff) * 100); 1549 } 1550 1551 mutex_unlock(&dev_priv->rps.hw_lock); 1552 1553 out: 1554 intel_runtime_pm_put(dev_priv); 1555 return ret; 1556 } 1557 1558 static int i915_gfxec(struct seq_file *m, void *unused) 1559 { 1560 struct drm_info_node *node = (struct drm_info_node *) m->private; 1561 struct drm_device *dev = node->minor->dev; 1562 struct drm_i915_private *dev_priv = dev->dev_private; 1563 int ret; 1564 1565 ret = mutex_lock_interruptible(&dev->struct_mutex); 1566 if (ret) 1567 return ret; 1568 intel_runtime_pm_get(dev_priv); 1569 1570 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1571 intel_runtime_pm_put(dev_priv); 1572 1573 mutex_unlock(&dev->struct_mutex); 1574 1575 return 0; 1576 } 1577 1578 static int i915_opregion(struct seq_file *m, void *unused) 1579 { 1580 struct drm_info_node *node = (struct drm_info_node *) m->private; 1581 struct drm_device *dev = node->minor->dev; 1582 struct drm_i915_private *dev_priv = dev->dev_private; 1583 struct intel_opregion *opregion = &dev_priv->opregion; 1584 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); 1585 int ret; 1586 1587 if (data == NULL) 1588 return -ENOMEM; 1589 1590 ret = mutex_lock_interruptible(&dev->struct_mutex); 1591 if (ret) 1592 goto out; 1593 1594 if (opregion->header) { 1595 memcpy_fromio(data, opregion->header, OPREGION_SIZE); 1596 seq_write(m, data, OPREGION_SIZE); 1597 } 1598 1599 mutex_unlock(&dev->struct_mutex); 1600 1601 out: 1602 kfree(data); 1603 return 0; 1604 } 1605 1606 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1607 { 1608 struct drm_info_node *node = (struct drm_info_node *) m->private; 1609 struct drm_device *dev = node->minor->dev; 1610 struct intel_fbdev *ifbdev = NULL; 1611 struct intel_framebuffer *fb; 1612 1613 #ifdef CONFIG_DRM_I915_FBDEV 1614 struct drm_i915_private *dev_priv = dev->dev_private; 1615 int ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1616 if (ret) 1617 return ret; 1618 1619 ifbdev = dev_priv->fbdev; 1620 fb = to_intel_framebuffer(ifbdev->helper.fb); 1621 1622 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1623 fb->base.width, 1624 fb->base.height, 1625 fb->base.depth, 1626 fb->base.bits_per_pixel, 1627 atomic_read(&fb->base.refcount.refcount)); 1628 describe_obj(m, fb->obj); 1629 seq_putc(m, '\n'); 1630 mutex_unlock(&dev->mode_config.mutex); 1631 #endif 1632 1633 mutex_lock(&dev->mode_config.fb_lock); 1634 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1635 if (ifbdev && &fb->base == ifbdev->helper.fb) 1636 continue; 1637 1638 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1639 fb->base.width, 1640 fb->base.height, 1641 fb->base.depth, 1642 fb->base.bits_per_pixel, 1643 atomic_read(&fb->base.refcount.refcount)); 1644 describe_obj(m, fb->obj); 1645 seq_putc(m, '\n'); 1646 } 1647 mutex_unlock(&dev->mode_config.fb_lock); 1648 1649 return 0; 1650 } 1651 1652 static int i915_context_status(struct seq_file *m, void *unused) 1653 { 1654 struct drm_info_node *node = (struct drm_info_node *) m->private; 1655 struct drm_device *dev = node->minor->dev; 1656 struct drm_i915_private *dev_priv = dev->dev_private; 1657 struct intel_ring_buffer *ring; 1658 struct i915_hw_context *ctx; 1659 int ret, i; 1660 1661 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1662 if (ret) 1663 return ret; 1664 1665 if (dev_priv->ips.pwrctx) { 1666 seq_puts(m, "power context "); 1667 describe_obj(m, dev_priv->ips.pwrctx); 1668 seq_putc(m, '\n'); 1669 } 1670 1671 if (dev_priv->ips.renderctx) { 1672 seq_puts(m, "render context "); 1673 describe_obj(m, dev_priv->ips.renderctx); 1674 seq_putc(m, '\n'); 1675 } 1676 1677 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1678 seq_puts(m, "HW context "); 1679 describe_ctx(m, ctx); 1680 for_each_ring(ring, dev_priv, i) 1681 if (ring->default_context == ctx) 1682 seq_printf(m, "(default context %s) ", ring->name); 1683 1684 describe_obj(m, ctx->obj); 1685 seq_putc(m, '\n'); 1686 } 1687 1688 mutex_unlock(&dev->mode_config.mutex); 1689 1690 return 0; 1691 } 1692 1693 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1694 { 1695 struct drm_info_node *node = (struct drm_info_node *) m->private; 1696 struct drm_device *dev = node->minor->dev; 1697 struct drm_i915_private *dev_priv = dev->dev_private; 1698 unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0; 1699 1700 spin_lock_irq(&dev_priv->uncore.lock); 1701 if (IS_VALLEYVIEW(dev)) { 1702 fw_rendercount = dev_priv->uncore.fw_rendercount; 1703 fw_mediacount = dev_priv->uncore.fw_mediacount; 1704 } else 1705 forcewake_count = dev_priv->uncore.forcewake_count; 1706 spin_unlock_irq(&dev_priv->uncore.lock); 1707 1708 if (IS_VALLEYVIEW(dev)) { 1709 seq_printf(m, "fw_rendercount = %u\n", fw_rendercount); 1710 seq_printf(m, "fw_mediacount = %u\n", fw_mediacount); 1711 } else 1712 seq_printf(m, "forcewake count = %u\n", forcewake_count); 1713 1714 return 0; 1715 } 1716 1717 static const char *swizzle_string(unsigned swizzle) 1718 { 1719 switch (swizzle) { 1720 case I915_BIT_6_SWIZZLE_NONE: 1721 return "none"; 1722 case I915_BIT_6_SWIZZLE_9: 1723 return "bit9"; 1724 case I915_BIT_6_SWIZZLE_9_10: 1725 return "bit9/bit10"; 1726 case I915_BIT_6_SWIZZLE_9_11: 1727 return "bit9/bit11"; 1728 case I915_BIT_6_SWIZZLE_9_10_11: 1729 return "bit9/bit10/bit11"; 1730 case I915_BIT_6_SWIZZLE_9_17: 1731 return "bit9/bit17"; 1732 case I915_BIT_6_SWIZZLE_9_10_17: 1733 return "bit9/bit10/bit17"; 1734 case I915_BIT_6_SWIZZLE_UNKNOWN: 1735 return "unknown"; 1736 } 1737 1738 return "bug"; 1739 } 1740 1741 static int i915_swizzle_info(struct seq_file *m, void *data) 1742 { 1743 struct drm_info_node *node = (struct drm_info_node *) m->private; 1744 struct drm_device *dev = node->minor->dev; 1745 struct drm_i915_private *dev_priv = dev->dev_private; 1746 int ret; 1747 1748 ret = mutex_lock_interruptible(&dev->struct_mutex); 1749 if (ret) 1750 return ret; 1751 intel_runtime_pm_get(dev_priv); 1752 1753 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1754 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1755 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1756 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1757 1758 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1759 seq_printf(m, "DDC = 0x%08x\n", 1760 I915_READ(DCC)); 1761 seq_printf(m, "C0DRB3 = 0x%04x\n", 1762 I915_READ16(C0DRB3)); 1763 seq_printf(m, "C1DRB3 = 0x%04x\n", 1764 I915_READ16(C1DRB3)); 1765 } else if (INTEL_INFO(dev)->gen >= 6) { 1766 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1767 I915_READ(MAD_DIMM_C0)); 1768 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1769 I915_READ(MAD_DIMM_C1)); 1770 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1771 I915_READ(MAD_DIMM_C2)); 1772 seq_printf(m, "TILECTL = 0x%08x\n", 1773 I915_READ(TILECTL)); 1774 if (IS_GEN8(dev)) 1775 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 1776 I915_READ(GAMTARBMODE)); 1777 else 1778 seq_printf(m, "ARB_MODE = 0x%08x\n", 1779 I915_READ(ARB_MODE)); 1780 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1781 I915_READ(DISP_ARB_CTL)); 1782 } 1783 intel_runtime_pm_put(dev_priv); 1784 mutex_unlock(&dev->struct_mutex); 1785 1786 return 0; 1787 } 1788 1789 static int per_file_ctx(int id, void *ptr, void *data) 1790 { 1791 struct i915_hw_context *ctx = ptr; 1792 struct seq_file *m = data; 1793 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx); 1794 1795 ppgtt->debug_dump(ppgtt, m); 1796 1797 return 0; 1798 } 1799 1800 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 1801 { 1802 struct drm_i915_private *dev_priv = dev->dev_private; 1803 struct intel_ring_buffer *ring; 1804 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1805 int unused, i; 1806 1807 if (!ppgtt) 1808 return; 1809 1810 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages); 1811 seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries); 1812 for_each_ring(ring, dev_priv, unused) { 1813 seq_printf(m, "%s\n", ring->name); 1814 for (i = 0; i < 4; i++) { 1815 u32 offset = 0x270 + i * 8; 1816 u64 pdp = I915_READ(ring->mmio_base + offset + 4); 1817 pdp <<= 32; 1818 pdp |= I915_READ(ring->mmio_base + offset); 1819 for (i = 0; i < 4; i++) 1820 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 1821 } 1822 } 1823 } 1824 1825 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) 1826 { 1827 struct drm_i915_private *dev_priv = dev->dev_private; 1828 struct intel_ring_buffer *ring; 1829 struct drm_file *file; 1830 int i; 1831 1832 if (INTEL_INFO(dev)->gen == 6) 1833 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1834 1835 for_each_ring(ring, dev_priv, i) { 1836 seq_printf(m, "%s\n", ring->name); 1837 if (INTEL_INFO(dev)->gen == 7) 1838 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 1839 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 1840 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 1841 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 1842 } 1843 if (dev_priv->mm.aliasing_ppgtt) { 1844 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1845 1846 seq_puts(m, "aliasing PPGTT:\n"); 1847 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1848 1849 ppgtt->debug_dump(ppgtt, m); 1850 } else 1851 return; 1852 1853 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 1854 struct drm_i915_file_private *file_priv = file->driver_priv; 1855 struct i915_hw_ppgtt *pvt_ppgtt; 1856 1857 pvt_ppgtt = ctx_to_ppgtt(file_priv->private_default_ctx); 1858 seq_printf(m, "proc: %s\n", 1859 get_pid_task(file->pid, PIDTYPE_PID)->comm); 1860 seq_puts(m, " default context:\n"); 1861 idr_for_each(&file_priv->context_idr, per_file_ctx, m); 1862 } 1863 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1864 } 1865 1866 static int i915_ppgtt_info(struct seq_file *m, void *data) 1867 { 1868 struct drm_info_node *node = (struct drm_info_node *) m->private; 1869 struct drm_device *dev = node->minor->dev; 1870 struct drm_i915_private *dev_priv = dev->dev_private; 1871 1872 int ret = mutex_lock_interruptible(&dev->struct_mutex); 1873 if (ret) 1874 return ret; 1875 intel_runtime_pm_get(dev_priv); 1876 1877 if (INTEL_INFO(dev)->gen >= 8) 1878 gen8_ppgtt_info(m, dev); 1879 else if (INTEL_INFO(dev)->gen >= 6) 1880 gen6_ppgtt_info(m, dev); 1881 1882 intel_runtime_pm_put(dev_priv); 1883 mutex_unlock(&dev->struct_mutex); 1884 1885 return 0; 1886 } 1887 1888 static int i915_dpio_info(struct seq_file *m, void *data) 1889 { 1890 struct drm_info_node *node = (struct drm_info_node *) m->private; 1891 struct drm_device *dev = node->minor->dev; 1892 struct drm_i915_private *dev_priv = dev->dev_private; 1893 int ret; 1894 1895 1896 if (!IS_VALLEYVIEW(dev)) { 1897 seq_puts(m, "unsupported\n"); 1898 return 0; 1899 } 1900 1901 ret = mutex_lock_interruptible(&dev_priv->dpio_lock); 1902 if (ret) 1903 return ret; 1904 1905 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); 1906 1907 seq_printf(m, "DPIO PLL DW3 CH0 : 0x%08x\n", 1908 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(0))); 1909 seq_printf(m, "DPIO PLL DW3 CH1: 0x%08x\n", 1910 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(1))); 1911 1912 seq_printf(m, "DPIO PLL DW5 CH0: 0x%08x\n", 1913 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(0))); 1914 seq_printf(m, "DPIO PLL DW5 CH1: 0x%08x\n", 1915 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(1))); 1916 1917 seq_printf(m, "DPIO PLL DW7 CH0: 0x%08x\n", 1918 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(0))); 1919 seq_printf(m, "DPIO PLL DW7 CH1: 0x%08x\n", 1920 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(1))); 1921 1922 seq_printf(m, "DPIO PLL DW10 CH0: 0x%08x\n", 1923 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(0))); 1924 seq_printf(m, "DPIO PLL DW10 CH1: 0x%08x\n", 1925 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(1))); 1926 1927 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1928 vlv_dpio_read(dev_priv, PIPE_A, VLV_CMN_DW0)); 1929 1930 mutex_unlock(&dev_priv->dpio_lock); 1931 1932 return 0; 1933 } 1934 1935 static int i915_llc(struct seq_file *m, void *data) 1936 { 1937 struct drm_info_node *node = (struct drm_info_node *) m->private; 1938 struct drm_device *dev = node->minor->dev; 1939 struct drm_i915_private *dev_priv = dev->dev_private; 1940 1941 /* Size calculation for LLC is a bit of a pain. Ignore for now. */ 1942 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); 1943 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size); 1944 1945 return 0; 1946 } 1947 1948 static int i915_edp_psr_status(struct seq_file *m, void *data) 1949 { 1950 struct drm_info_node *node = m->private; 1951 struct drm_device *dev = node->minor->dev; 1952 struct drm_i915_private *dev_priv = dev->dev_private; 1953 u32 psrperf = 0; 1954 bool enabled = false; 1955 1956 intel_runtime_pm_get(dev_priv); 1957 1958 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 1959 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 1960 1961 enabled = HAS_PSR(dev) && 1962 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; 1963 seq_printf(m, "Enabled: %s\n", yesno(enabled)); 1964 1965 if (HAS_PSR(dev)) 1966 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & 1967 EDP_PSR_PERF_CNT_MASK; 1968 seq_printf(m, "Performance_Counter: %u\n", psrperf); 1969 1970 intel_runtime_pm_put(dev_priv); 1971 return 0; 1972 } 1973 1974 static int i915_sink_crc(struct seq_file *m, void *data) 1975 { 1976 struct drm_info_node *node = m->private; 1977 struct drm_device *dev = node->minor->dev; 1978 struct intel_encoder *encoder; 1979 struct intel_connector *connector; 1980 struct intel_dp *intel_dp = NULL; 1981 int ret; 1982 u8 crc[6]; 1983 1984 drm_modeset_lock_all(dev); 1985 list_for_each_entry(connector, &dev->mode_config.connector_list, 1986 base.head) { 1987 1988 if (connector->base.dpms != DRM_MODE_DPMS_ON) 1989 continue; 1990 1991 if (!connector->base.encoder) 1992 continue; 1993 1994 encoder = to_intel_encoder(connector->base.encoder); 1995 if (encoder->type != INTEL_OUTPUT_EDP) 1996 continue; 1997 1998 intel_dp = enc_to_intel_dp(&encoder->base); 1999 2000 ret = intel_dp_sink_crc(intel_dp, crc); 2001 if (ret) 2002 goto out; 2003 2004 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 2005 crc[0], crc[1], crc[2], 2006 crc[3], crc[4], crc[5]); 2007 goto out; 2008 } 2009 ret = -ENODEV; 2010 out: 2011 drm_modeset_unlock_all(dev); 2012 return ret; 2013 } 2014 2015 static int i915_energy_uJ(struct seq_file *m, void *data) 2016 { 2017 struct drm_info_node *node = m->private; 2018 struct drm_device *dev = node->minor->dev; 2019 struct drm_i915_private *dev_priv = dev->dev_private; 2020 u64 power; 2021 u32 units; 2022 2023 if (INTEL_INFO(dev)->gen < 6) 2024 return -ENODEV; 2025 2026 intel_runtime_pm_get(dev_priv); 2027 2028 rdmsrl(MSR_RAPL_POWER_UNIT, power); 2029 power = (power & 0x1f00) >> 8; 2030 units = 1000000 / (1 << power); /* convert to uJ */ 2031 power = I915_READ(MCH_SECP_NRG_STTS); 2032 power *= units; 2033 2034 intel_runtime_pm_put(dev_priv); 2035 2036 seq_printf(m, "%llu", (long long unsigned)power); 2037 2038 return 0; 2039 } 2040 2041 static int i915_pc8_status(struct seq_file *m, void *unused) 2042 { 2043 struct drm_info_node *node = (struct drm_info_node *) m->private; 2044 struct drm_device *dev = node->minor->dev; 2045 struct drm_i915_private *dev_priv = dev->dev_private; 2046 2047 if (!IS_HASWELL(dev)) { 2048 seq_puts(m, "not supported\n"); 2049 return 0; 2050 } 2051 2052 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); 2053 seq_printf(m, "IRQs disabled: %s\n", 2054 yesno(dev_priv->pm.irqs_disabled)); 2055 2056 return 0; 2057 } 2058 2059 static const char *power_domain_str(enum intel_display_power_domain domain) 2060 { 2061 switch (domain) { 2062 case POWER_DOMAIN_PIPE_A: 2063 return "PIPE_A"; 2064 case POWER_DOMAIN_PIPE_B: 2065 return "PIPE_B"; 2066 case POWER_DOMAIN_PIPE_C: 2067 return "PIPE_C"; 2068 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 2069 return "PIPE_A_PANEL_FITTER"; 2070 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 2071 return "PIPE_B_PANEL_FITTER"; 2072 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 2073 return "PIPE_C_PANEL_FITTER"; 2074 case POWER_DOMAIN_TRANSCODER_A: 2075 return "TRANSCODER_A"; 2076 case POWER_DOMAIN_TRANSCODER_B: 2077 return "TRANSCODER_B"; 2078 case POWER_DOMAIN_TRANSCODER_C: 2079 return "TRANSCODER_C"; 2080 case POWER_DOMAIN_TRANSCODER_EDP: 2081 return "TRANSCODER_EDP"; 2082 case POWER_DOMAIN_PORT_DDI_A_2_LANES: 2083 return "PORT_DDI_A_2_LANES"; 2084 case POWER_DOMAIN_PORT_DDI_A_4_LANES: 2085 return "PORT_DDI_A_4_LANES"; 2086 case POWER_DOMAIN_PORT_DDI_B_2_LANES: 2087 return "PORT_DDI_B_2_LANES"; 2088 case POWER_DOMAIN_PORT_DDI_B_4_LANES: 2089 return "PORT_DDI_B_4_LANES"; 2090 case POWER_DOMAIN_PORT_DDI_C_2_LANES: 2091 return "PORT_DDI_C_2_LANES"; 2092 case POWER_DOMAIN_PORT_DDI_C_4_LANES: 2093 return "PORT_DDI_C_4_LANES"; 2094 case POWER_DOMAIN_PORT_DDI_D_2_LANES: 2095 return "PORT_DDI_D_2_LANES"; 2096 case POWER_DOMAIN_PORT_DDI_D_4_LANES: 2097 return "PORT_DDI_D_4_LANES"; 2098 case POWER_DOMAIN_PORT_DSI: 2099 return "PORT_DSI"; 2100 case POWER_DOMAIN_PORT_CRT: 2101 return "PORT_CRT"; 2102 case POWER_DOMAIN_PORT_OTHER: 2103 return "PORT_OTHER"; 2104 case POWER_DOMAIN_VGA: 2105 return "VGA"; 2106 case POWER_DOMAIN_AUDIO: 2107 return "AUDIO"; 2108 case POWER_DOMAIN_INIT: 2109 return "INIT"; 2110 default: 2111 WARN_ON(1); 2112 return "?"; 2113 } 2114 } 2115 2116 static int i915_power_domain_info(struct seq_file *m, void *unused) 2117 { 2118 struct drm_info_node *node = (struct drm_info_node *) m->private; 2119 struct drm_device *dev = node->minor->dev; 2120 struct drm_i915_private *dev_priv = dev->dev_private; 2121 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2122 int i; 2123 2124 mutex_lock(&power_domains->lock); 2125 2126 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2127 for (i = 0; i < power_domains->power_well_count; i++) { 2128 struct i915_power_well *power_well; 2129 enum intel_display_power_domain power_domain; 2130 2131 power_well = &power_domains->power_wells[i]; 2132 seq_printf(m, "%-25s %d\n", power_well->name, 2133 power_well->count); 2134 2135 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM; 2136 power_domain++) { 2137 if (!(BIT(power_domain) & power_well->domains)) 2138 continue; 2139 2140 seq_printf(m, " %-23s %d\n", 2141 power_domain_str(power_domain), 2142 power_domains->domain_use_count[power_domain]); 2143 } 2144 } 2145 2146 mutex_unlock(&power_domains->lock); 2147 2148 return 0; 2149 } 2150 2151 static void intel_seq_print_mode(struct seq_file *m, int tabs, 2152 struct drm_display_mode *mode) 2153 { 2154 int i; 2155 2156 for (i = 0; i < tabs; i++) 2157 seq_putc(m, '\t'); 2158 2159 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2160 mode->base.id, mode->name, 2161 mode->vrefresh, mode->clock, 2162 mode->hdisplay, mode->hsync_start, 2163 mode->hsync_end, mode->htotal, 2164 mode->vdisplay, mode->vsync_start, 2165 mode->vsync_end, mode->vtotal, 2166 mode->type, mode->flags); 2167 } 2168 2169 static void intel_encoder_info(struct seq_file *m, 2170 struct intel_crtc *intel_crtc, 2171 struct intel_encoder *intel_encoder) 2172 { 2173 struct drm_info_node *node = (struct drm_info_node *) m->private; 2174 struct drm_device *dev = node->minor->dev; 2175 struct drm_crtc *crtc = &intel_crtc->base; 2176 struct intel_connector *intel_connector; 2177 struct drm_encoder *encoder; 2178 2179 encoder = &intel_encoder->base; 2180 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2181 encoder->base.id, drm_get_encoder_name(encoder)); 2182 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2183 struct drm_connector *connector = &intel_connector->base; 2184 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2185 connector->base.id, 2186 drm_get_connector_name(connector), 2187 drm_get_connector_status_name(connector->status)); 2188 if (connector->status == connector_status_connected) { 2189 struct drm_display_mode *mode = &crtc->mode; 2190 seq_printf(m, ", mode:\n"); 2191 intel_seq_print_mode(m, 2, mode); 2192 } else { 2193 seq_putc(m, '\n'); 2194 } 2195 } 2196 } 2197 2198 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2199 { 2200 struct drm_info_node *node = (struct drm_info_node *) m->private; 2201 struct drm_device *dev = node->minor->dev; 2202 struct drm_crtc *crtc = &intel_crtc->base; 2203 struct intel_encoder *intel_encoder; 2204 2205 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2206 crtc->primary->fb->base.id, crtc->x, crtc->y, 2207 crtc->primary->fb->width, crtc->primary->fb->height); 2208 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2209 intel_encoder_info(m, intel_crtc, intel_encoder); 2210 } 2211 2212 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 2213 { 2214 struct drm_display_mode *mode = panel->fixed_mode; 2215 2216 seq_printf(m, "\tfixed mode:\n"); 2217 intel_seq_print_mode(m, 2, mode); 2218 } 2219 2220 static void intel_dp_info(struct seq_file *m, 2221 struct intel_connector *intel_connector) 2222 { 2223 struct intel_encoder *intel_encoder = intel_connector->encoder; 2224 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2225 2226 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2227 seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" : 2228 "no"); 2229 if (intel_encoder->type == INTEL_OUTPUT_EDP) 2230 intel_panel_info(m, &intel_connector->panel); 2231 } 2232 2233 static void intel_hdmi_info(struct seq_file *m, 2234 struct intel_connector *intel_connector) 2235 { 2236 struct intel_encoder *intel_encoder = intel_connector->encoder; 2237 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 2238 2239 seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" : 2240 "no"); 2241 } 2242 2243 static void intel_lvds_info(struct seq_file *m, 2244 struct intel_connector *intel_connector) 2245 { 2246 intel_panel_info(m, &intel_connector->panel); 2247 } 2248 2249 static void intel_connector_info(struct seq_file *m, 2250 struct drm_connector *connector) 2251 { 2252 struct intel_connector *intel_connector = to_intel_connector(connector); 2253 struct intel_encoder *intel_encoder = intel_connector->encoder; 2254 struct drm_display_mode *mode; 2255 2256 seq_printf(m, "connector %d: type %s, status: %s\n", 2257 connector->base.id, drm_get_connector_name(connector), 2258 drm_get_connector_status_name(connector->status)); 2259 if (connector->status == connector_status_connected) { 2260 seq_printf(m, "\tname: %s\n", connector->display_info.name); 2261 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 2262 connector->display_info.width_mm, 2263 connector->display_info.height_mm); 2264 seq_printf(m, "\tsubpixel order: %s\n", 2265 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 2266 seq_printf(m, "\tCEA rev: %d\n", 2267 connector->display_info.cea_rev); 2268 } 2269 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2270 intel_encoder->type == INTEL_OUTPUT_EDP) 2271 intel_dp_info(m, intel_connector); 2272 else if (intel_encoder->type == INTEL_OUTPUT_HDMI) 2273 intel_hdmi_info(m, intel_connector); 2274 else if (intel_encoder->type == INTEL_OUTPUT_LVDS) 2275 intel_lvds_info(m, intel_connector); 2276 2277 seq_printf(m, "\tmodes:\n"); 2278 list_for_each_entry(mode, &connector->modes, head) 2279 intel_seq_print_mode(m, 2, mode); 2280 } 2281 2282 static bool cursor_active(struct drm_device *dev, int pipe) 2283 { 2284 struct drm_i915_private *dev_priv = dev->dev_private; 2285 u32 state; 2286 2287 if (IS_845G(dev) || IS_I865G(dev)) 2288 state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 2289 else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) 2290 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 2291 else 2292 state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; 2293 2294 return state; 2295 } 2296 2297 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) 2298 { 2299 struct drm_i915_private *dev_priv = dev->dev_private; 2300 u32 pos; 2301 2302 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) 2303 pos = I915_READ(CURPOS_IVB(pipe)); 2304 else 2305 pos = I915_READ(CURPOS(pipe)); 2306 2307 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK; 2308 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT)) 2309 *x = -*x; 2310 2311 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK; 2312 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT)) 2313 *y = -*y; 2314 2315 return cursor_active(dev, pipe); 2316 } 2317 2318 static int i915_display_info(struct seq_file *m, void *unused) 2319 { 2320 struct drm_info_node *node = (struct drm_info_node *) m->private; 2321 struct drm_device *dev = node->minor->dev; 2322 struct drm_i915_private *dev_priv = dev->dev_private; 2323 struct intel_crtc *crtc; 2324 struct drm_connector *connector; 2325 2326 intel_runtime_pm_get(dev_priv); 2327 drm_modeset_lock_all(dev); 2328 seq_printf(m, "CRTC info\n"); 2329 seq_printf(m, "---------\n"); 2330 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 2331 bool active; 2332 int x, y; 2333 2334 seq_printf(m, "CRTC %d: pipe: %c, active: %s\n", 2335 crtc->base.base.id, pipe_name(crtc->pipe), 2336 yesno(crtc->active)); 2337 if (crtc->active) { 2338 intel_crtc_info(m, crtc); 2339 2340 active = cursor_position(dev, crtc->pipe, &x, &y); 2341 seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n", 2342 yesno(crtc->cursor_visible), 2343 x, y, crtc->cursor_addr, 2344 yesno(active)); 2345 } 2346 } 2347 2348 seq_printf(m, "\n"); 2349 seq_printf(m, "Connector info\n"); 2350 seq_printf(m, "--------------\n"); 2351 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 2352 intel_connector_info(m, connector); 2353 } 2354 drm_modeset_unlock_all(dev); 2355 intel_runtime_pm_put(dev_priv); 2356 2357 return 0; 2358 } 2359 2360 struct pipe_crc_info { 2361 const char *name; 2362 struct drm_device *dev; 2363 enum pipe pipe; 2364 }; 2365 2366 static int i915_pipe_crc_open(struct inode *inode, struct file *filep) 2367 { 2368 struct pipe_crc_info *info = inode->i_private; 2369 struct drm_i915_private *dev_priv = info->dev->dev_private; 2370 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2371 2372 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes) 2373 return -ENODEV; 2374 2375 spin_lock_irq(&pipe_crc->lock); 2376 2377 if (pipe_crc->opened) { 2378 spin_unlock_irq(&pipe_crc->lock); 2379 return -EBUSY; /* already open */ 2380 } 2381 2382 pipe_crc->opened = true; 2383 filep->private_data = inode->i_private; 2384 2385 spin_unlock_irq(&pipe_crc->lock); 2386 2387 return 0; 2388 } 2389 2390 static int i915_pipe_crc_release(struct inode *inode, struct file *filep) 2391 { 2392 struct pipe_crc_info *info = inode->i_private; 2393 struct drm_i915_private *dev_priv = info->dev->dev_private; 2394 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2395 2396 spin_lock_irq(&pipe_crc->lock); 2397 pipe_crc->opened = false; 2398 spin_unlock_irq(&pipe_crc->lock); 2399 2400 return 0; 2401 } 2402 2403 /* (6 fields, 8 chars each, space separated (5) + '\n') */ 2404 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) 2405 /* account for \'0' */ 2406 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) 2407 2408 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) 2409 { 2410 assert_spin_locked(&pipe_crc->lock); 2411 return CIRC_CNT(pipe_crc->head, pipe_crc->tail, 2412 INTEL_PIPE_CRC_ENTRIES_NR); 2413 } 2414 2415 static ssize_t 2416 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, 2417 loff_t *pos) 2418 { 2419 struct pipe_crc_info *info = filep->private_data; 2420 struct drm_device *dev = info->dev; 2421 struct drm_i915_private *dev_priv = dev->dev_private; 2422 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2423 char buf[PIPE_CRC_BUFFER_LEN]; 2424 int head, tail, n_entries, n; 2425 ssize_t bytes_read; 2426 2427 /* 2428 * Don't allow user space to provide buffers not big enough to hold 2429 * a line of data. 2430 */ 2431 if (count < PIPE_CRC_LINE_LEN) 2432 return -EINVAL; 2433 2434 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) 2435 return 0; 2436 2437 /* nothing to read */ 2438 spin_lock_irq(&pipe_crc->lock); 2439 while (pipe_crc_data_count(pipe_crc) == 0) { 2440 int ret; 2441 2442 if (filep->f_flags & O_NONBLOCK) { 2443 spin_unlock_irq(&pipe_crc->lock); 2444 return -EAGAIN; 2445 } 2446 2447 ret = wait_event_interruptible_lock_irq(pipe_crc->wq, 2448 pipe_crc_data_count(pipe_crc), pipe_crc->lock); 2449 if (ret) { 2450 spin_unlock_irq(&pipe_crc->lock); 2451 return ret; 2452 } 2453 } 2454 2455 /* We now have one or more entries to read */ 2456 head = pipe_crc->head; 2457 tail = pipe_crc->tail; 2458 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR), 2459 count / PIPE_CRC_LINE_LEN); 2460 spin_unlock_irq(&pipe_crc->lock); 2461 2462 bytes_read = 0; 2463 n = 0; 2464 do { 2465 struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail]; 2466 int ret; 2467 2468 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, 2469 "%8u %8x %8x %8x %8x %8x\n", 2470 entry->frame, entry->crc[0], 2471 entry->crc[1], entry->crc[2], 2472 entry->crc[3], entry->crc[4]); 2473 2474 ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN, 2475 buf, PIPE_CRC_LINE_LEN); 2476 if (ret == PIPE_CRC_LINE_LEN) 2477 return -EFAULT; 2478 2479 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); 2480 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 2481 n++; 2482 } while (--n_entries); 2483 2484 spin_lock_irq(&pipe_crc->lock); 2485 pipe_crc->tail = tail; 2486 spin_unlock_irq(&pipe_crc->lock); 2487 2488 return bytes_read; 2489 } 2490 2491 static const struct file_operations i915_pipe_crc_fops = { 2492 .owner = THIS_MODULE, 2493 .open = i915_pipe_crc_open, 2494 .read = i915_pipe_crc_read, 2495 .release = i915_pipe_crc_release, 2496 }; 2497 2498 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { 2499 { 2500 .name = "i915_pipe_A_crc", 2501 .pipe = PIPE_A, 2502 }, 2503 { 2504 .name = "i915_pipe_B_crc", 2505 .pipe = PIPE_B, 2506 }, 2507 { 2508 .name = "i915_pipe_C_crc", 2509 .pipe = PIPE_C, 2510 }, 2511 }; 2512 2513 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, 2514 enum pipe pipe) 2515 { 2516 struct drm_device *dev = minor->dev; 2517 struct dentry *ent; 2518 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; 2519 2520 info->dev = dev; 2521 ent = debugfs_create_file(info->name, S_IRUGO, root, info, 2522 &i915_pipe_crc_fops); 2523 if (!ent) 2524 return -ENOMEM; 2525 2526 return drm_add_fake_info_node(minor, ent, info); 2527 } 2528 2529 static const char * const pipe_crc_sources[] = { 2530 "none", 2531 "plane1", 2532 "plane2", 2533 "pf", 2534 "pipe", 2535 "TV", 2536 "DP-B", 2537 "DP-C", 2538 "DP-D", 2539 "auto", 2540 }; 2541 2542 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) 2543 { 2544 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); 2545 return pipe_crc_sources[source]; 2546 } 2547 2548 static int display_crc_ctl_show(struct seq_file *m, void *data) 2549 { 2550 struct drm_device *dev = m->private; 2551 struct drm_i915_private *dev_priv = dev->dev_private; 2552 int i; 2553 2554 for (i = 0; i < I915_MAX_PIPES; i++) 2555 seq_printf(m, "%c %s\n", pipe_name(i), 2556 pipe_crc_source_name(dev_priv->pipe_crc[i].source)); 2557 2558 return 0; 2559 } 2560 2561 static int display_crc_ctl_open(struct inode *inode, struct file *file) 2562 { 2563 struct drm_device *dev = inode->i_private; 2564 2565 return single_open(file, display_crc_ctl_show, dev); 2566 } 2567 2568 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2569 uint32_t *val) 2570 { 2571 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2572 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2573 2574 switch (*source) { 2575 case INTEL_PIPE_CRC_SOURCE_PIPE: 2576 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; 2577 break; 2578 case INTEL_PIPE_CRC_SOURCE_NONE: 2579 *val = 0; 2580 break; 2581 default: 2582 return -EINVAL; 2583 } 2584 2585 return 0; 2586 } 2587 2588 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe, 2589 enum intel_pipe_crc_source *source) 2590 { 2591 struct intel_encoder *encoder; 2592 struct intel_crtc *crtc; 2593 struct intel_digital_port *dig_port; 2594 int ret = 0; 2595 2596 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2597 2598 mutex_lock(&dev->mode_config.mutex); 2599 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 2600 base.head) { 2601 if (!encoder->base.crtc) 2602 continue; 2603 2604 crtc = to_intel_crtc(encoder->base.crtc); 2605 2606 if (crtc->pipe != pipe) 2607 continue; 2608 2609 switch (encoder->type) { 2610 case INTEL_OUTPUT_TVOUT: 2611 *source = INTEL_PIPE_CRC_SOURCE_TV; 2612 break; 2613 case INTEL_OUTPUT_DISPLAYPORT: 2614 case INTEL_OUTPUT_EDP: 2615 dig_port = enc_to_dig_port(&encoder->base); 2616 switch (dig_port->port) { 2617 case PORT_B: 2618 *source = INTEL_PIPE_CRC_SOURCE_DP_B; 2619 break; 2620 case PORT_C: 2621 *source = INTEL_PIPE_CRC_SOURCE_DP_C; 2622 break; 2623 case PORT_D: 2624 *source = INTEL_PIPE_CRC_SOURCE_DP_D; 2625 break; 2626 default: 2627 WARN(1, "nonexisting DP port %c\n", 2628 port_name(dig_port->port)); 2629 break; 2630 } 2631 break; 2632 } 2633 } 2634 mutex_unlock(&dev->mode_config.mutex); 2635 2636 return ret; 2637 } 2638 2639 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, 2640 enum pipe pipe, 2641 enum intel_pipe_crc_source *source, 2642 uint32_t *val) 2643 { 2644 struct drm_i915_private *dev_priv = dev->dev_private; 2645 bool need_stable_symbols = false; 2646 2647 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 2648 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 2649 if (ret) 2650 return ret; 2651 } 2652 2653 switch (*source) { 2654 case INTEL_PIPE_CRC_SOURCE_PIPE: 2655 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; 2656 break; 2657 case INTEL_PIPE_CRC_SOURCE_DP_B: 2658 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; 2659 need_stable_symbols = true; 2660 break; 2661 case INTEL_PIPE_CRC_SOURCE_DP_C: 2662 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; 2663 need_stable_symbols = true; 2664 break; 2665 case INTEL_PIPE_CRC_SOURCE_NONE: 2666 *val = 0; 2667 break; 2668 default: 2669 return -EINVAL; 2670 } 2671 2672 /* 2673 * When the pipe CRC tap point is after the transcoders we need 2674 * to tweak symbol-level features to produce a deterministic series of 2675 * symbols for a given frame. We need to reset those features only once 2676 * a frame (instead of every nth symbol): 2677 * - DC-balance: used to ensure a better clock recovery from the data 2678 * link (SDVO) 2679 * - DisplayPort scrambling: used for EMI reduction 2680 */ 2681 if (need_stable_symbols) { 2682 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2683 2684 tmp |= DC_BALANCE_RESET_VLV; 2685 if (pipe == PIPE_A) 2686 tmp |= PIPE_A_SCRAMBLE_RESET; 2687 else 2688 tmp |= PIPE_B_SCRAMBLE_RESET; 2689 2690 I915_WRITE(PORT_DFT2_G4X, tmp); 2691 } 2692 2693 return 0; 2694 } 2695 2696 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, 2697 enum pipe pipe, 2698 enum intel_pipe_crc_source *source, 2699 uint32_t *val) 2700 { 2701 struct drm_i915_private *dev_priv = dev->dev_private; 2702 bool need_stable_symbols = false; 2703 2704 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 2705 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 2706 if (ret) 2707 return ret; 2708 } 2709 2710 switch (*source) { 2711 case INTEL_PIPE_CRC_SOURCE_PIPE: 2712 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; 2713 break; 2714 case INTEL_PIPE_CRC_SOURCE_TV: 2715 if (!SUPPORTS_TV(dev)) 2716 return -EINVAL; 2717 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; 2718 break; 2719 case INTEL_PIPE_CRC_SOURCE_DP_B: 2720 if (!IS_G4X(dev)) 2721 return -EINVAL; 2722 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; 2723 need_stable_symbols = true; 2724 break; 2725 case INTEL_PIPE_CRC_SOURCE_DP_C: 2726 if (!IS_G4X(dev)) 2727 return -EINVAL; 2728 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; 2729 need_stable_symbols = true; 2730 break; 2731 case INTEL_PIPE_CRC_SOURCE_DP_D: 2732 if (!IS_G4X(dev)) 2733 return -EINVAL; 2734 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; 2735 need_stable_symbols = true; 2736 break; 2737 case INTEL_PIPE_CRC_SOURCE_NONE: 2738 *val = 0; 2739 break; 2740 default: 2741 return -EINVAL; 2742 } 2743 2744 /* 2745 * When the pipe CRC tap point is after the transcoders we need 2746 * to tweak symbol-level features to produce a deterministic series of 2747 * symbols for a given frame. We need to reset those features only once 2748 * a frame (instead of every nth symbol): 2749 * - DC-balance: used to ensure a better clock recovery from the data 2750 * link (SDVO) 2751 * - DisplayPort scrambling: used for EMI reduction 2752 */ 2753 if (need_stable_symbols) { 2754 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2755 2756 WARN_ON(!IS_G4X(dev)); 2757 2758 I915_WRITE(PORT_DFT_I9XX, 2759 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); 2760 2761 if (pipe == PIPE_A) 2762 tmp |= PIPE_A_SCRAMBLE_RESET; 2763 else 2764 tmp |= PIPE_B_SCRAMBLE_RESET; 2765 2766 I915_WRITE(PORT_DFT2_G4X, tmp); 2767 } 2768 2769 return 0; 2770 } 2771 2772 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, 2773 enum pipe pipe) 2774 { 2775 struct drm_i915_private *dev_priv = dev->dev_private; 2776 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2777 2778 if (pipe == PIPE_A) 2779 tmp &= ~PIPE_A_SCRAMBLE_RESET; 2780 else 2781 tmp &= ~PIPE_B_SCRAMBLE_RESET; 2782 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) 2783 tmp &= ~DC_BALANCE_RESET_VLV; 2784 I915_WRITE(PORT_DFT2_G4X, tmp); 2785 2786 } 2787 2788 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, 2789 enum pipe pipe) 2790 { 2791 struct drm_i915_private *dev_priv = dev->dev_private; 2792 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2793 2794 if (pipe == PIPE_A) 2795 tmp &= ~PIPE_A_SCRAMBLE_RESET; 2796 else 2797 tmp &= ~PIPE_B_SCRAMBLE_RESET; 2798 I915_WRITE(PORT_DFT2_G4X, tmp); 2799 2800 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { 2801 I915_WRITE(PORT_DFT_I9XX, 2802 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); 2803 } 2804 } 2805 2806 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2807 uint32_t *val) 2808 { 2809 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2810 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2811 2812 switch (*source) { 2813 case INTEL_PIPE_CRC_SOURCE_PLANE1: 2814 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; 2815 break; 2816 case INTEL_PIPE_CRC_SOURCE_PLANE2: 2817 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; 2818 break; 2819 case INTEL_PIPE_CRC_SOURCE_PIPE: 2820 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; 2821 break; 2822 case INTEL_PIPE_CRC_SOURCE_NONE: 2823 *val = 0; 2824 break; 2825 default: 2826 return -EINVAL; 2827 } 2828 2829 return 0; 2830 } 2831 2832 static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2833 uint32_t *val) 2834 { 2835 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2836 *source = INTEL_PIPE_CRC_SOURCE_PF; 2837 2838 switch (*source) { 2839 case INTEL_PIPE_CRC_SOURCE_PLANE1: 2840 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; 2841 break; 2842 case INTEL_PIPE_CRC_SOURCE_PLANE2: 2843 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; 2844 break; 2845 case INTEL_PIPE_CRC_SOURCE_PF: 2846 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; 2847 break; 2848 case INTEL_PIPE_CRC_SOURCE_NONE: 2849 *val = 0; 2850 break; 2851 default: 2852 return -EINVAL; 2853 } 2854 2855 return 0; 2856 } 2857 2858 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, 2859 enum intel_pipe_crc_source source) 2860 { 2861 struct drm_i915_private *dev_priv = dev->dev_private; 2862 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 2863 u32 val = 0; /* shut up gcc */ 2864 int ret; 2865 2866 if (pipe_crc->source == source) 2867 return 0; 2868 2869 /* forbid changing the source without going back to 'none' */ 2870 if (pipe_crc->source && source) 2871 return -EINVAL; 2872 2873 if (IS_GEN2(dev)) 2874 ret = i8xx_pipe_crc_ctl_reg(&source, &val); 2875 else if (INTEL_INFO(dev)->gen < 5) 2876 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); 2877 else if (IS_VALLEYVIEW(dev)) 2878 ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val); 2879 else if (IS_GEN5(dev) || IS_GEN6(dev)) 2880 ret = ilk_pipe_crc_ctl_reg(&source, &val); 2881 else 2882 ret = ivb_pipe_crc_ctl_reg(&source, &val); 2883 2884 if (ret != 0) 2885 return ret; 2886 2887 /* none -> real source transition */ 2888 if (source) { 2889 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", 2890 pipe_name(pipe), pipe_crc_source_name(source)); 2891 2892 pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) * 2893 INTEL_PIPE_CRC_ENTRIES_NR, 2894 GFP_KERNEL); 2895 if (!pipe_crc->entries) 2896 return -ENOMEM; 2897 2898 spin_lock_irq(&pipe_crc->lock); 2899 pipe_crc->head = 0; 2900 pipe_crc->tail = 0; 2901 spin_unlock_irq(&pipe_crc->lock); 2902 } 2903 2904 pipe_crc->source = source; 2905 2906 I915_WRITE(PIPE_CRC_CTL(pipe), val); 2907 POSTING_READ(PIPE_CRC_CTL(pipe)); 2908 2909 /* real source -> none transition */ 2910 if (source == INTEL_PIPE_CRC_SOURCE_NONE) { 2911 struct intel_pipe_crc_entry *entries; 2912 2913 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", 2914 pipe_name(pipe)); 2915 2916 intel_wait_for_vblank(dev, pipe); 2917 2918 spin_lock_irq(&pipe_crc->lock); 2919 entries = pipe_crc->entries; 2920 pipe_crc->entries = NULL; 2921 spin_unlock_irq(&pipe_crc->lock); 2922 2923 kfree(entries); 2924 2925 if (IS_G4X(dev)) 2926 g4x_undo_pipe_scramble_reset(dev, pipe); 2927 else if (IS_VALLEYVIEW(dev)) 2928 vlv_undo_pipe_scramble_reset(dev, pipe); 2929 } 2930 2931 return 0; 2932 } 2933 2934 /* 2935 * Parse pipe CRC command strings: 2936 * command: wsp* object wsp+ name wsp+ source wsp* 2937 * object: 'pipe' 2938 * name: (A | B | C) 2939 * source: (none | plane1 | plane2 | pf) 2940 * wsp: (#0x20 | #0x9 | #0xA)+ 2941 * 2942 * eg.: 2943 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A 2944 * "pipe A none" -> Stop CRC 2945 */ 2946 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) 2947 { 2948 int n_words = 0; 2949 2950 while (*buf) { 2951 char *end; 2952 2953 /* skip leading white space */ 2954 buf = skip_spaces(buf); 2955 if (!*buf) 2956 break; /* end of buffer */ 2957 2958 /* find end of word */ 2959 for (end = buf; *end && !isspace(*end); end++) 2960 ; 2961 2962 if (n_words == max_words) { 2963 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", 2964 max_words); 2965 return -EINVAL; /* ran out of words[] before bytes */ 2966 } 2967 2968 if (*end) 2969 *end++ = '\0'; 2970 words[n_words++] = buf; 2971 buf = end; 2972 } 2973 2974 return n_words; 2975 } 2976 2977 enum intel_pipe_crc_object { 2978 PIPE_CRC_OBJECT_PIPE, 2979 }; 2980 2981 static const char * const pipe_crc_objects[] = { 2982 "pipe", 2983 }; 2984 2985 static int 2986 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) 2987 { 2988 int i; 2989 2990 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) 2991 if (!strcmp(buf, pipe_crc_objects[i])) { 2992 *o = i; 2993 return 0; 2994 } 2995 2996 return -EINVAL; 2997 } 2998 2999 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) 3000 { 3001 const char name = buf[0]; 3002 3003 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) 3004 return -EINVAL; 3005 3006 *pipe = name - 'A'; 3007 3008 return 0; 3009 } 3010 3011 static int 3012 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) 3013 { 3014 int i; 3015 3016 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) 3017 if (!strcmp(buf, pipe_crc_sources[i])) { 3018 *s = i; 3019 return 0; 3020 } 3021 3022 return -EINVAL; 3023 } 3024 3025 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len) 3026 { 3027 #define N_WORDS 3 3028 int n_words; 3029 char *words[N_WORDS]; 3030 enum pipe pipe; 3031 enum intel_pipe_crc_object object; 3032 enum intel_pipe_crc_source source; 3033 3034 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); 3035 if (n_words != N_WORDS) { 3036 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", 3037 N_WORDS); 3038 return -EINVAL; 3039 } 3040 3041 if (display_crc_ctl_parse_object(words[0], &object) < 0) { 3042 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); 3043 return -EINVAL; 3044 } 3045 3046 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { 3047 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); 3048 return -EINVAL; 3049 } 3050 3051 if (display_crc_ctl_parse_source(words[2], &source) < 0) { 3052 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); 3053 return -EINVAL; 3054 } 3055 3056 return pipe_crc_set_source(dev, pipe, source); 3057 } 3058 3059 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, 3060 size_t len, loff_t *offp) 3061 { 3062 struct seq_file *m = file->private_data; 3063 struct drm_device *dev = m->private; 3064 char *tmpbuf; 3065 int ret; 3066 3067 if (len == 0) 3068 return 0; 3069 3070 if (len > PAGE_SIZE - 1) { 3071 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", 3072 PAGE_SIZE); 3073 return -E2BIG; 3074 } 3075 3076 tmpbuf = kmalloc(len + 1, GFP_KERNEL); 3077 if (!tmpbuf) 3078 return -ENOMEM; 3079 3080 if (copy_from_user(tmpbuf, ubuf, len)) { 3081 ret = -EFAULT; 3082 goto out; 3083 } 3084 tmpbuf[len] = '\0'; 3085 3086 ret = display_crc_ctl_parse(dev, tmpbuf, len); 3087 3088 out: 3089 kfree(tmpbuf); 3090 if (ret < 0) 3091 return ret; 3092 3093 *offp += len; 3094 return len; 3095 } 3096 3097 static const struct file_operations i915_display_crc_ctl_fops = { 3098 .owner = THIS_MODULE, 3099 .open = display_crc_ctl_open, 3100 .read = seq_read, 3101 .llseek = seq_lseek, 3102 .release = single_release, 3103 .write = display_crc_ctl_write 3104 }; 3105 3106 static void wm_latency_show(struct seq_file *m, const uint16_t wm[5]) 3107 { 3108 struct drm_device *dev = m->private; 3109 int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4; 3110 int level; 3111 3112 drm_modeset_lock_all(dev); 3113 3114 for (level = 0; level < num_levels; level++) { 3115 unsigned int latency = wm[level]; 3116 3117 /* WM1+ latency values in 0.5us units */ 3118 if (level > 0) 3119 latency *= 5; 3120 3121 seq_printf(m, "WM%d %u (%u.%u usec)\n", 3122 level, wm[level], 3123 latency / 10, latency % 10); 3124 } 3125 3126 drm_modeset_unlock_all(dev); 3127 } 3128 3129 static int pri_wm_latency_show(struct seq_file *m, void *data) 3130 { 3131 struct drm_device *dev = m->private; 3132 3133 wm_latency_show(m, to_i915(dev)->wm.pri_latency); 3134 3135 return 0; 3136 } 3137 3138 static int spr_wm_latency_show(struct seq_file *m, void *data) 3139 { 3140 struct drm_device *dev = m->private; 3141 3142 wm_latency_show(m, to_i915(dev)->wm.spr_latency); 3143 3144 return 0; 3145 } 3146 3147 static int cur_wm_latency_show(struct seq_file *m, void *data) 3148 { 3149 struct drm_device *dev = m->private; 3150 3151 wm_latency_show(m, to_i915(dev)->wm.cur_latency); 3152 3153 return 0; 3154 } 3155 3156 static int pri_wm_latency_open(struct inode *inode, struct file *file) 3157 { 3158 struct drm_device *dev = inode->i_private; 3159 3160 if (!HAS_PCH_SPLIT(dev)) 3161 return -ENODEV; 3162 3163 return single_open(file, pri_wm_latency_show, dev); 3164 } 3165 3166 static int spr_wm_latency_open(struct inode *inode, struct file *file) 3167 { 3168 struct drm_device *dev = inode->i_private; 3169 3170 if (!HAS_PCH_SPLIT(dev)) 3171 return -ENODEV; 3172 3173 return single_open(file, spr_wm_latency_show, dev); 3174 } 3175 3176 static int cur_wm_latency_open(struct inode *inode, struct file *file) 3177 { 3178 struct drm_device *dev = inode->i_private; 3179 3180 if (!HAS_PCH_SPLIT(dev)) 3181 return -ENODEV; 3182 3183 return single_open(file, cur_wm_latency_show, dev); 3184 } 3185 3186 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 3187 size_t len, loff_t *offp, uint16_t wm[5]) 3188 { 3189 struct seq_file *m = file->private_data; 3190 struct drm_device *dev = m->private; 3191 uint16_t new[5] = { 0 }; 3192 int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4; 3193 int level; 3194 int ret; 3195 char tmp[32]; 3196 3197 if (len >= sizeof(tmp)) 3198 return -EINVAL; 3199 3200 if (copy_from_user(tmp, ubuf, len)) 3201 return -EFAULT; 3202 3203 tmp[len] = '\0'; 3204 3205 ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]); 3206 if (ret != num_levels) 3207 return -EINVAL; 3208 3209 drm_modeset_lock_all(dev); 3210 3211 for (level = 0; level < num_levels; level++) 3212 wm[level] = new[level]; 3213 3214 drm_modeset_unlock_all(dev); 3215 3216 return len; 3217 } 3218 3219 3220 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 3221 size_t len, loff_t *offp) 3222 { 3223 struct seq_file *m = file->private_data; 3224 struct drm_device *dev = m->private; 3225 3226 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency); 3227 } 3228 3229 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 3230 size_t len, loff_t *offp) 3231 { 3232 struct seq_file *m = file->private_data; 3233 struct drm_device *dev = m->private; 3234 3235 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency); 3236 } 3237 3238 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 3239 size_t len, loff_t *offp) 3240 { 3241 struct seq_file *m = file->private_data; 3242 struct drm_device *dev = m->private; 3243 3244 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency); 3245 } 3246 3247 static const struct file_operations i915_pri_wm_latency_fops = { 3248 .owner = THIS_MODULE, 3249 .open = pri_wm_latency_open, 3250 .read = seq_read, 3251 .llseek = seq_lseek, 3252 .release = single_release, 3253 .write = pri_wm_latency_write 3254 }; 3255 3256 static const struct file_operations i915_spr_wm_latency_fops = { 3257 .owner = THIS_MODULE, 3258 .open = spr_wm_latency_open, 3259 .read = seq_read, 3260 .llseek = seq_lseek, 3261 .release = single_release, 3262 .write = spr_wm_latency_write 3263 }; 3264 3265 static const struct file_operations i915_cur_wm_latency_fops = { 3266 .owner = THIS_MODULE, 3267 .open = cur_wm_latency_open, 3268 .read = seq_read, 3269 .llseek = seq_lseek, 3270 .release = single_release, 3271 .write = cur_wm_latency_write 3272 }; 3273 3274 static int 3275 i915_wedged_get(void *data, u64 *val) 3276 { 3277 struct drm_device *dev = data; 3278 struct drm_i915_private *dev_priv = dev->dev_private; 3279 3280 *val = atomic_read(&dev_priv->gpu_error.reset_counter); 3281 3282 return 0; 3283 } 3284 3285 static int 3286 i915_wedged_set(void *data, u64 val) 3287 { 3288 struct drm_device *dev = data; 3289 3290 i915_handle_error(dev, val, 3291 "Manually setting wedged to %llu", val); 3292 return 0; 3293 } 3294 3295 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 3296 i915_wedged_get, i915_wedged_set, 3297 "%llu\n"); 3298 3299 static int 3300 i915_ring_stop_get(void *data, u64 *val) 3301 { 3302 struct drm_device *dev = data; 3303 struct drm_i915_private *dev_priv = dev->dev_private; 3304 3305 *val = dev_priv->gpu_error.stop_rings; 3306 3307 return 0; 3308 } 3309 3310 static int 3311 i915_ring_stop_set(void *data, u64 val) 3312 { 3313 struct drm_device *dev = data; 3314 struct drm_i915_private *dev_priv = dev->dev_private; 3315 int ret; 3316 3317 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); 3318 3319 ret = mutex_lock_interruptible(&dev->struct_mutex); 3320 if (ret) 3321 return ret; 3322 3323 dev_priv->gpu_error.stop_rings = val; 3324 mutex_unlock(&dev->struct_mutex); 3325 3326 return 0; 3327 } 3328 3329 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, 3330 i915_ring_stop_get, i915_ring_stop_set, 3331 "0x%08llx\n"); 3332 3333 static int 3334 i915_ring_missed_irq_get(void *data, u64 *val) 3335 { 3336 struct drm_device *dev = data; 3337 struct drm_i915_private *dev_priv = dev->dev_private; 3338 3339 *val = dev_priv->gpu_error.missed_irq_rings; 3340 return 0; 3341 } 3342 3343 static int 3344 i915_ring_missed_irq_set(void *data, u64 val) 3345 { 3346 struct drm_device *dev = data; 3347 struct drm_i915_private *dev_priv = dev->dev_private; 3348 int ret; 3349 3350 /* Lock against concurrent debugfs callers */ 3351 ret = mutex_lock_interruptible(&dev->struct_mutex); 3352 if (ret) 3353 return ret; 3354 dev_priv->gpu_error.missed_irq_rings = val; 3355 mutex_unlock(&dev->struct_mutex); 3356 3357 return 0; 3358 } 3359 3360 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 3361 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 3362 "0x%08llx\n"); 3363 3364 static int 3365 i915_ring_test_irq_get(void *data, u64 *val) 3366 { 3367 struct drm_device *dev = data; 3368 struct drm_i915_private *dev_priv = dev->dev_private; 3369 3370 *val = dev_priv->gpu_error.test_irq_rings; 3371 3372 return 0; 3373 } 3374 3375 static int 3376 i915_ring_test_irq_set(void *data, u64 val) 3377 { 3378 struct drm_device *dev = data; 3379 struct drm_i915_private *dev_priv = dev->dev_private; 3380 int ret; 3381 3382 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 3383 3384 /* Lock against concurrent debugfs callers */ 3385 ret = mutex_lock_interruptible(&dev->struct_mutex); 3386 if (ret) 3387 return ret; 3388 3389 dev_priv->gpu_error.test_irq_rings = val; 3390 mutex_unlock(&dev->struct_mutex); 3391 3392 return 0; 3393 } 3394 3395 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 3396 i915_ring_test_irq_get, i915_ring_test_irq_set, 3397 "0x%08llx\n"); 3398 3399 #define DROP_UNBOUND 0x1 3400 #define DROP_BOUND 0x2 3401 #define DROP_RETIRE 0x4 3402 #define DROP_ACTIVE 0x8 3403 #define DROP_ALL (DROP_UNBOUND | \ 3404 DROP_BOUND | \ 3405 DROP_RETIRE | \ 3406 DROP_ACTIVE) 3407 static int 3408 i915_drop_caches_get(void *data, u64 *val) 3409 { 3410 *val = DROP_ALL; 3411 3412 return 0; 3413 } 3414 3415 static int 3416 i915_drop_caches_set(void *data, u64 val) 3417 { 3418 struct drm_device *dev = data; 3419 struct drm_i915_private *dev_priv = dev->dev_private; 3420 struct drm_i915_gem_object *obj, *next; 3421 struct i915_address_space *vm; 3422 struct i915_vma *vma, *x; 3423 int ret; 3424 3425 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 3426 3427 /* No need to check and wait for gpu resets, only libdrm auto-restarts 3428 * on ioctls on -EAGAIN. */ 3429 ret = mutex_lock_interruptible(&dev->struct_mutex); 3430 if (ret) 3431 return ret; 3432 3433 if (val & DROP_ACTIVE) { 3434 ret = i915_gpu_idle(dev); 3435 if (ret) 3436 goto unlock; 3437 } 3438 3439 if (val & (DROP_RETIRE | DROP_ACTIVE)) 3440 i915_gem_retire_requests(dev); 3441 3442 if (val & DROP_BOUND) { 3443 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 3444 list_for_each_entry_safe(vma, x, &vm->inactive_list, 3445 mm_list) { 3446 if (vma->pin_count) 3447 continue; 3448 3449 ret = i915_vma_unbind(vma); 3450 if (ret) 3451 goto unlock; 3452 } 3453 } 3454 } 3455 3456 if (val & DROP_UNBOUND) { 3457 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, 3458 global_list) 3459 if (obj->pages_pin_count == 0) { 3460 ret = i915_gem_object_put_pages(obj); 3461 if (ret) 3462 goto unlock; 3463 } 3464 } 3465 3466 unlock: 3467 mutex_unlock(&dev->struct_mutex); 3468 3469 return ret; 3470 } 3471 3472 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 3473 i915_drop_caches_get, i915_drop_caches_set, 3474 "0x%08llx\n"); 3475 3476 static int 3477 i915_max_freq_get(void *data, u64 *val) 3478 { 3479 struct drm_device *dev = data; 3480 struct drm_i915_private *dev_priv = dev->dev_private; 3481 int ret; 3482 3483 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3484 return -ENODEV; 3485 3486 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3487 3488 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3489 if (ret) 3490 return ret; 3491 3492 if (IS_VALLEYVIEW(dev)) 3493 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); 3494 else 3495 *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER; 3496 mutex_unlock(&dev_priv->rps.hw_lock); 3497 3498 return 0; 3499 } 3500 3501 static int 3502 i915_max_freq_set(void *data, u64 val) 3503 { 3504 struct drm_device *dev = data; 3505 struct drm_i915_private *dev_priv = dev->dev_private; 3506 u32 rp_state_cap, hw_max, hw_min; 3507 int ret; 3508 3509 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3510 return -ENODEV; 3511 3512 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3513 3514 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 3515 3516 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3517 if (ret) 3518 return ret; 3519 3520 /* 3521 * Turbo will still be enabled, but won't go above the set value. 3522 */ 3523 if (IS_VALLEYVIEW(dev)) { 3524 val = vlv_freq_opcode(dev_priv, val); 3525 3526 hw_max = valleyview_rps_max_freq(dev_priv); 3527 hw_min = valleyview_rps_min_freq(dev_priv); 3528 } else { 3529 do_div(val, GT_FREQUENCY_MULTIPLIER); 3530 3531 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3532 hw_max = dev_priv->rps.max_freq; 3533 hw_min = (rp_state_cap >> 16) & 0xff; 3534 } 3535 3536 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { 3537 mutex_unlock(&dev_priv->rps.hw_lock); 3538 return -EINVAL; 3539 } 3540 3541 dev_priv->rps.max_freq_softlimit = val; 3542 3543 if (IS_VALLEYVIEW(dev)) 3544 valleyview_set_rps(dev, val); 3545 else 3546 gen6_set_rps(dev, val); 3547 3548 mutex_unlock(&dev_priv->rps.hw_lock); 3549 3550 return 0; 3551 } 3552 3553 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 3554 i915_max_freq_get, i915_max_freq_set, 3555 "%llu\n"); 3556 3557 static int 3558 i915_min_freq_get(void *data, u64 *val) 3559 { 3560 struct drm_device *dev = data; 3561 struct drm_i915_private *dev_priv = dev->dev_private; 3562 int ret; 3563 3564 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3565 return -ENODEV; 3566 3567 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3568 3569 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3570 if (ret) 3571 return ret; 3572 3573 if (IS_VALLEYVIEW(dev)) 3574 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); 3575 else 3576 *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER; 3577 mutex_unlock(&dev_priv->rps.hw_lock); 3578 3579 return 0; 3580 } 3581 3582 static int 3583 i915_min_freq_set(void *data, u64 val) 3584 { 3585 struct drm_device *dev = data; 3586 struct drm_i915_private *dev_priv = dev->dev_private; 3587 u32 rp_state_cap, hw_max, hw_min; 3588 int ret; 3589 3590 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3591 return -ENODEV; 3592 3593 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3594 3595 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 3596 3597 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3598 if (ret) 3599 return ret; 3600 3601 /* 3602 * Turbo will still be enabled, but won't go below the set value. 3603 */ 3604 if (IS_VALLEYVIEW(dev)) { 3605 val = vlv_freq_opcode(dev_priv, val); 3606 3607 hw_max = valleyview_rps_max_freq(dev_priv); 3608 hw_min = valleyview_rps_min_freq(dev_priv); 3609 } else { 3610 do_div(val, GT_FREQUENCY_MULTIPLIER); 3611 3612 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3613 hw_max = dev_priv->rps.max_freq; 3614 hw_min = (rp_state_cap >> 16) & 0xff; 3615 } 3616 3617 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { 3618 mutex_unlock(&dev_priv->rps.hw_lock); 3619 return -EINVAL; 3620 } 3621 3622 dev_priv->rps.min_freq_softlimit = val; 3623 3624 if (IS_VALLEYVIEW(dev)) 3625 valleyview_set_rps(dev, val); 3626 else 3627 gen6_set_rps(dev, val); 3628 3629 mutex_unlock(&dev_priv->rps.hw_lock); 3630 3631 return 0; 3632 } 3633 3634 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 3635 i915_min_freq_get, i915_min_freq_set, 3636 "%llu\n"); 3637 3638 static int 3639 i915_cache_sharing_get(void *data, u64 *val) 3640 { 3641 struct drm_device *dev = data; 3642 struct drm_i915_private *dev_priv = dev->dev_private; 3643 u32 snpcr; 3644 int ret; 3645 3646 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3647 return -ENODEV; 3648 3649 ret = mutex_lock_interruptible(&dev->struct_mutex); 3650 if (ret) 3651 return ret; 3652 intel_runtime_pm_get(dev_priv); 3653 3654 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 3655 3656 intel_runtime_pm_put(dev_priv); 3657 mutex_unlock(&dev_priv->dev->struct_mutex); 3658 3659 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 3660 3661 return 0; 3662 } 3663 3664 static int 3665 i915_cache_sharing_set(void *data, u64 val) 3666 { 3667 struct drm_device *dev = data; 3668 struct drm_i915_private *dev_priv = dev->dev_private; 3669 u32 snpcr; 3670 3671 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3672 return -ENODEV; 3673 3674 if (val > 3) 3675 return -EINVAL; 3676 3677 intel_runtime_pm_get(dev_priv); 3678 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 3679 3680 /* Update the cache sharing policy here as well */ 3681 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 3682 snpcr &= ~GEN6_MBC_SNPCR_MASK; 3683 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 3684 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 3685 3686 intel_runtime_pm_put(dev_priv); 3687 return 0; 3688 } 3689 3690 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 3691 i915_cache_sharing_get, i915_cache_sharing_set, 3692 "%llu\n"); 3693 3694 static int i915_forcewake_open(struct inode *inode, struct file *file) 3695 { 3696 struct drm_device *dev = inode->i_private; 3697 struct drm_i915_private *dev_priv = dev->dev_private; 3698 3699 if (INTEL_INFO(dev)->gen < 6) 3700 return 0; 3701 3702 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 3703 3704 return 0; 3705 } 3706 3707 static int i915_forcewake_release(struct inode *inode, struct file *file) 3708 { 3709 struct drm_device *dev = inode->i_private; 3710 struct drm_i915_private *dev_priv = dev->dev_private; 3711 3712 if (INTEL_INFO(dev)->gen < 6) 3713 return 0; 3714 3715 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3716 3717 return 0; 3718 } 3719 3720 static const struct file_operations i915_forcewake_fops = { 3721 .owner = THIS_MODULE, 3722 .open = i915_forcewake_open, 3723 .release = i915_forcewake_release, 3724 }; 3725 3726 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 3727 { 3728 struct drm_device *dev = minor->dev; 3729 struct dentry *ent; 3730 3731 ent = debugfs_create_file("i915_forcewake_user", 3732 S_IRUSR, 3733 root, dev, 3734 &i915_forcewake_fops); 3735 if (!ent) 3736 return -ENOMEM; 3737 3738 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 3739 } 3740 3741 static int i915_debugfs_create(struct dentry *root, 3742 struct drm_minor *minor, 3743 const char *name, 3744 const struct file_operations *fops) 3745 { 3746 struct drm_device *dev = minor->dev; 3747 struct dentry *ent; 3748 3749 ent = debugfs_create_file(name, 3750 S_IRUGO | S_IWUSR, 3751 root, dev, 3752 fops); 3753 if (!ent) 3754 return -ENOMEM; 3755 3756 return drm_add_fake_info_node(minor, ent, fops); 3757 } 3758 3759 static const struct drm_info_list i915_debugfs_list[] = { 3760 {"i915_capabilities", i915_capabilities, 0}, 3761 {"i915_gem_objects", i915_gem_object_info, 0}, 3762 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 3763 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 3764 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 3765 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 3766 {"i915_gem_stolen", i915_gem_stolen_list_info }, 3767 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 3768 {"i915_gem_request", i915_gem_request_info, 0}, 3769 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 3770 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 3771 {"i915_gem_interrupt", i915_interrupt_info, 0}, 3772 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 3773 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 3774 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 3775 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 3776 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 3777 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 3778 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 3779 {"i915_inttoext_table", i915_inttoext_table, 0}, 3780 {"i915_drpc_info", i915_drpc_info, 0}, 3781 {"i915_emon_status", i915_emon_status, 0}, 3782 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 3783 {"i915_gfxec", i915_gfxec, 0}, 3784 {"i915_fbc_status", i915_fbc_status, 0}, 3785 {"i915_ips_status", i915_ips_status, 0}, 3786 {"i915_sr_status", i915_sr_status, 0}, 3787 {"i915_opregion", i915_opregion, 0}, 3788 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 3789 {"i915_context_status", i915_context_status, 0}, 3790 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 3791 {"i915_swizzle_info", i915_swizzle_info, 0}, 3792 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 3793 {"i915_dpio", i915_dpio_info, 0}, 3794 {"i915_llc", i915_llc, 0}, 3795 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 3796 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 3797 {"i915_energy_uJ", i915_energy_uJ, 0}, 3798 {"i915_pc8_status", i915_pc8_status, 0}, 3799 {"i915_power_domain_info", i915_power_domain_info, 0}, 3800 {"i915_display_info", i915_display_info, 0}, 3801 }; 3802 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 3803 3804 static const struct i915_debugfs_files { 3805 const char *name; 3806 const struct file_operations *fops; 3807 } i915_debugfs_files[] = { 3808 {"i915_wedged", &i915_wedged_fops}, 3809 {"i915_max_freq", &i915_max_freq_fops}, 3810 {"i915_min_freq", &i915_min_freq_fops}, 3811 {"i915_cache_sharing", &i915_cache_sharing_fops}, 3812 {"i915_ring_stop", &i915_ring_stop_fops}, 3813 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 3814 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 3815 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 3816 {"i915_error_state", &i915_error_state_fops}, 3817 {"i915_next_seqno", &i915_next_seqno_fops}, 3818 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 3819 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 3820 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 3821 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 3822 }; 3823 3824 void intel_display_crc_init(struct drm_device *dev) 3825 { 3826 struct drm_i915_private *dev_priv = dev->dev_private; 3827 enum pipe pipe; 3828 3829 for_each_pipe(pipe) { 3830 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 3831 3832 pipe_crc->opened = false; 3833 spin_lock_init(&pipe_crc->lock); 3834 init_waitqueue_head(&pipe_crc->wq); 3835 } 3836 } 3837 3838 int i915_debugfs_init(struct drm_minor *minor) 3839 { 3840 int ret, i; 3841 3842 ret = i915_forcewake_create(minor->debugfs_root, minor); 3843 if (ret) 3844 return ret; 3845 3846 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 3847 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); 3848 if (ret) 3849 return ret; 3850 } 3851 3852 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 3853 ret = i915_debugfs_create(minor->debugfs_root, minor, 3854 i915_debugfs_files[i].name, 3855 i915_debugfs_files[i].fops); 3856 if (ret) 3857 return ret; 3858 } 3859 3860 return drm_debugfs_create_files(i915_debugfs_list, 3861 I915_DEBUGFS_ENTRIES, 3862 minor->debugfs_root, minor); 3863 } 3864 3865 void i915_debugfs_cleanup(struct drm_minor *minor) 3866 { 3867 int i; 3868 3869 drm_debugfs_remove_files(i915_debugfs_list, 3870 I915_DEBUGFS_ENTRIES, minor); 3871 3872 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 3873 1, minor); 3874 3875 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 3876 struct drm_info_list *info_list = 3877 (struct drm_info_list *)&i915_pipe_crc_data[i]; 3878 3879 drm_debugfs_remove_files(info_list, 1, minor); 3880 } 3881 3882 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 3883 struct drm_info_list *info_list = 3884 (struct drm_info_list *) i915_debugfs_files[i].fops; 3885 3886 drm_debugfs_remove_files(info_list, 1, minor); 3887 } 3888 } 3889