1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/circ_buf.h> 31 #include <linux/ctype.h> 32 #include <linux/debugfs.h> 33 #include <linux/slab.h> 34 #include <linux/export.h> 35 #include <linux/list_sort.h> 36 #include <asm/msr-index.h> 37 #include <drm/drmP.h> 38 #include "intel_drv.h" 39 #include "intel_ringbuffer.h" 40 #include <drm/i915_drm.h> 41 #include "i915_drv.h" 42 43 enum { 44 ACTIVE_LIST, 45 INACTIVE_LIST, 46 PINNED_LIST, 47 }; 48 49 static const char *yesno(int v) 50 { 51 return v ? "yes" : "no"; 52 } 53 54 /* As the drm_debugfs_init() routines are called before dev->dev_private is 55 * allocated we need to hook into the minor for release. */ 56 static int 57 drm_add_fake_info_node(struct drm_minor *minor, 58 struct dentry *ent, 59 const void *key) 60 { 61 struct drm_info_node *node; 62 63 node = kmalloc(sizeof(*node), GFP_KERNEL); 64 if (node == NULL) { 65 debugfs_remove(ent); 66 return -ENOMEM; 67 } 68 69 node->minor = minor; 70 node->dent = ent; 71 node->info_ent = (void *) key; 72 73 mutex_lock(&minor->debugfs_lock); 74 list_add(&node->list, &minor->debugfs_list); 75 mutex_unlock(&minor->debugfs_lock); 76 77 return 0; 78 } 79 80 static int i915_capabilities(struct seq_file *m, void *data) 81 { 82 struct drm_info_node *node = (struct drm_info_node *) m->private; 83 struct drm_device *dev = node->minor->dev; 84 const struct intel_device_info *info = INTEL_INFO(dev); 85 86 seq_printf(m, "gen: %d\n", info->gen); 87 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 88 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 89 #define SEP_SEMICOLON ; 90 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON); 91 #undef PRINT_FLAG 92 #undef SEP_SEMICOLON 93 94 return 0; 95 } 96 97 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 98 { 99 if (obj->user_pin_count > 0) 100 return "P"; 101 else if (obj->pin_count > 0) 102 return "p"; 103 else 104 return " "; 105 } 106 107 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 108 { 109 switch (obj->tiling_mode) { 110 default: 111 case I915_TILING_NONE: return " "; 112 case I915_TILING_X: return "X"; 113 case I915_TILING_Y: return "Y"; 114 } 115 } 116 117 static inline const char *get_global_flag(struct drm_i915_gem_object *obj) 118 { 119 return obj->has_global_gtt_mapping ? "g" : " "; 120 } 121 122 static void 123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 124 { 125 struct i915_vma *vma; 126 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s", 127 &obj->base, 128 get_pin_flag(obj), 129 get_tiling_flag(obj), 130 get_global_flag(obj), 131 obj->base.size / 1024, 132 obj->base.read_domains, 133 obj->base.write_domain, 134 obj->last_read_seqno, 135 obj->last_write_seqno, 136 obj->last_fenced_seqno, 137 i915_cache_level_str(obj->cache_level), 138 obj->dirty ? " dirty" : "", 139 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 140 if (obj->base.name) 141 seq_printf(m, " (name: %d)", obj->base.name); 142 if (obj->pin_count) 143 seq_printf(m, " (pinned x %d)", obj->pin_count); 144 if (obj->pin_display) 145 seq_printf(m, " (display)"); 146 if (obj->fence_reg != I915_FENCE_REG_NONE) 147 seq_printf(m, " (fence: %d)", obj->fence_reg); 148 list_for_each_entry(vma, &obj->vma_list, vma_link) { 149 if (!i915_is_ggtt(vma->vm)) 150 seq_puts(m, " (pp"); 151 else 152 seq_puts(m, " (g"); 153 seq_printf(m, "gtt offset: %08lx, size: %08lx)", 154 vma->node.start, vma->node.size); 155 } 156 if (obj->stolen) 157 seq_printf(m, " (stolen: %08lx)", obj->stolen->start); 158 if (obj->pin_mappable || obj->fault_mappable) { 159 char s[3], *t = s; 160 if (obj->pin_mappable) 161 *t++ = 'p'; 162 if (obj->fault_mappable) 163 *t++ = 'f'; 164 *t = '\0'; 165 seq_printf(m, " (%s mappable)", s); 166 } 167 if (obj->ring != NULL) 168 seq_printf(m, " (%s)", obj->ring->name); 169 } 170 171 static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx) 172 { 173 seq_putc(m, ctx->is_initialized ? 'I' : 'i'); 174 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 175 seq_putc(m, ' '); 176 } 177 178 static int i915_gem_object_list_info(struct seq_file *m, void *data) 179 { 180 struct drm_info_node *node = (struct drm_info_node *) m->private; 181 uintptr_t list = (uintptr_t) node->info_ent->data; 182 struct list_head *head; 183 struct drm_device *dev = node->minor->dev; 184 struct drm_i915_private *dev_priv = dev->dev_private; 185 struct i915_address_space *vm = &dev_priv->gtt.base; 186 struct i915_vma *vma; 187 size_t total_obj_size, total_gtt_size; 188 int count, ret; 189 190 ret = mutex_lock_interruptible(&dev->struct_mutex); 191 if (ret) 192 return ret; 193 194 /* FIXME: the user of this interface might want more than just GGTT */ 195 switch (list) { 196 case ACTIVE_LIST: 197 seq_puts(m, "Active:\n"); 198 head = &vm->active_list; 199 break; 200 case INACTIVE_LIST: 201 seq_puts(m, "Inactive:\n"); 202 head = &vm->inactive_list; 203 break; 204 default: 205 mutex_unlock(&dev->struct_mutex); 206 return -EINVAL; 207 } 208 209 total_obj_size = total_gtt_size = count = 0; 210 list_for_each_entry(vma, head, mm_list) { 211 seq_printf(m, " "); 212 describe_obj(m, vma->obj); 213 seq_printf(m, "\n"); 214 total_obj_size += vma->obj->base.size; 215 total_gtt_size += vma->node.size; 216 count++; 217 } 218 mutex_unlock(&dev->struct_mutex); 219 220 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 221 count, total_obj_size, total_gtt_size); 222 return 0; 223 } 224 225 static int obj_rank_by_stolen(void *priv, 226 struct list_head *A, struct list_head *B) 227 { 228 struct drm_i915_gem_object *a = 229 container_of(A, struct drm_i915_gem_object, obj_exec_link); 230 struct drm_i915_gem_object *b = 231 container_of(B, struct drm_i915_gem_object, obj_exec_link); 232 233 return a->stolen->start - b->stolen->start; 234 } 235 236 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 237 { 238 struct drm_info_node *node = (struct drm_info_node *) m->private; 239 struct drm_device *dev = node->minor->dev; 240 struct drm_i915_private *dev_priv = dev->dev_private; 241 struct drm_i915_gem_object *obj; 242 size_t total_obj_size, total_gtt_size; 243 LIST_HEAD(stolen); 244 int count, ret; 245 246 ret = mutex_lock_interruptible(&dev->struct_mutex); 247 if (ret) 248 return ret; 249 250 total_obj_size = total_gtt_size = count = 0; 251 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 252 if (obj->stolen == NULL) 253 continue; 254 255 list_add(&obj->obj_exec_link, &stolen); 256 257 total_obj_size += obj->base.size; 258 total_gtt_size += i915_gem_obj_ggtt_size(obj); 259 count++; 260 } 261 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 262 if (obj->stolen == NULL) 263 continue; 264 265 list_add(&obj->obj_exec_link, &stolen); 266 267 total_obj_size += obj->base.size; 268 count++; 269 } 270 list_sort(NULL, &stolen, obj_rank_by_stolen); 271 seq_puts(m, "Stolen:\n"); 272 while (!list_empty(&stolen)) { 273 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link); 274 seq_puts(m, " "); 275 describe_obj(m, obj); 276 seq_putc(m, '\n'); 277 list_del_init(&obj->obj_exec_link); 278 } 279 mutex_unlock(&dev->struct_mutex); 280 281 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 282 count, total_obj_size, total_gtt_size); 283 return 0; 284 } 285 286 #define count_objects(list, member) do { \ 287 list_for_each_entry(obj, list, member) { \ 288 size += i915_gem_obj_ggtt_size(obj); \ 289 ++count; \ 290 if (obj->map_and_fenceable) { \ 291 mappable_size += i915_gem_obj_ggtt_size(obj); \ 292 ++mappable_count; \ 293 } \ 294 } \ 295 } while (0) 296 297 struct file_stats { 298 int count; 299 size_t total, active, inactive, unbound; 300 }; 301 302 static int per_file_stats(int id, void *ptr, void *data) 303 { 304 struct drm_i915_gem_object *obj = ptr; 305 struct file_stats *stats = data; 306 307 stats->count++; 308 stats->total += obj->base.size; 309 310 if (i915_gem_obj_ggtt_bound(obj)) { 311 if (!list_empty(&obj->ring_list)) 312 stats->active += obj->base.size; 313 else 314 stats->inactive += obj->base.size; 315 } else { 316 if (!list_empty(&obj->global_list)) 317 stats->unbound += obj->base.size; 318 } 319 320 return 0; 321 } 322 323 #define count_vmas(list, member) do { \ 324 list_for_each_entry(vma, list, member) { \ 325 size += i915_gem_obj_ggtt_size(vma->obj); \ 326 ++count; \ 327 if (vma->obj->map_and_fenceable) { \ 328 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ 329 ++mappable_count; \ 330 } \ 331 } \ 332 } while (0) 333 334 static int i915_gem_object_info(struct seq_file *m, void* data) 335 { 336 struct drm_info_node *node = (struct drm_info_node *) m->private; 337 struct drm_device *dev = node->minor->dev; 338 struct drm_i915_private *dev_priv = dev->dev_private; 339 u32 count, mappable_count, purgeable_count; 340 size_t size, mappable_size, purgeable_size; 341 struct drm_i915_gem_object *obj; 342 struct i915_address_space *vm = &dev_priv->gtt.base; 343 struct drm_file *file; 344 struct i915_vma *vma; 345 int ret; 346 347 ret = mutex_lock_interruptible(&dev->struct_mutex); 348 if (ret) 349 return ret; 350 351 seq_printf(m, "%u objects, %zu bytes\n", 352 dev_priv->mm.object_count, 353 dev_priv->mm.object_memory); 354 355 size = count = mappable_size = mappable_count = 0; 356 count_objects(&dev_priv->mm.bound_list, global_list); 357 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 358 count, mappable_count, size, mappable_size); 359 360 size = count = mappable_size = mappable_count = 0; 361 count_vmas(&vm->active_list, mm_list); 362 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 363 count, mappable_count, size, mappable_size); 364 365 size = count = mappable_size = mappable_count = 0; 366 count_vmas(&vm->inactive_list, mm_list); 367 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 368 count, mappable_count, size, mappable_size); 369 370 size = count = purgeable_size = purgeable_count = 0; 371 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 372 size += obj->base.size, ++count; 373 if (obj->madv == I915_MADV_DONTNEED) 374 purgeable_size += obj->base.size, ++purgeable_count; 375 } 376 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); 377 378 size = count = mappable_size = mappable_count = 0; 379 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 380 if (obj->fault_mappable) { 381 size += i915_gem_obj_ggtt_size(obj); 382 ++count; 383 } 384 if (obj->pin_mappable) { 385 mappable_size += i915_gem_obj_ggtt_size(obj); 386 ++mappable_count; 387 } 388 if (obj->madv == I915_MADV_DONTNEED) { 389 purgeable_size += obj->base.size; 390 ++purgeable_count; 391 } 392 } 393 seq_printf(m, "%u purgeable objects, %zu bytes\n", 394 purgeable_count, purgeable_size); 395 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 396 mappable_count, mappable_size); 397 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 398 count, size); 399 400 seq_printf(m, "%zu [%lu] gtt total\n", 401 dev_priv->gtt.base.total, 402 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); 403 404 seq_putc(m, '\n'); 405 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 406 struct file_stats stats; 407 struct task_struct *task; 408 409 memset(&stats, 0, sizeof(stats)); 410 idr_for_each(&file->object_idr, per_file_stats, &stats); 411 /* 412 * Although we have a valid reference on file->pid, that does 413 * not guarantee that the task_struct who called get_pid() is 414 * still alive (e.g. get_pid(current) => fork() => exit()). 415 * Therefore, we need to protect this ->comm access using RCU. 416 */ 417 rcu_read_lock(); 418 task = pid_task(file->pid, PIDTYPE_PID); 419 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n", 420 task ? task->comm : "<unknown>", 421 stats.count, 422 stats.total, 423 stats.active, 424 stats.inactive, 425 stats.unbound); 426 rcu_read_unlock(); 427 } 428 429 mutex_unlock(&dev->struct_mutex); 430 431 return 0; 432 } 433 434 static int i915_gem_gtt_info(struct seq_file *m, void *data) 435 { 436 struct drm_info_node *node = (struct drm_info_node *) m->private; 437 struct drm_device *dev = node->minor->dev; 438 uintptr_t list = (uintptr_t) node->info_ent->data; 439 struct drm_i915_private *dev_priv = dev->dev_private; 440 struct drm_i915_gem_object *obj; 441 size_t total_obj_size, total_gtt_size; 442 int count, ret; 443 444 ret = mutex_lock_interruptible(&dev->struct_mutex); 445 if (ret) 446 return ret; 447 448 total_obj_size = total_gtt_size = count = 0; 449 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 450 if (list == PINNED_LIST && obj->pin_count == 0) 451 continue; 452 453 seq_puts(m, " "); 454 describe_obj(m, obj); 455 seq_putc(m, '\n'); 456 total_obj_size += obj->base.size; 457 total_gtt_size += i915_gem_obj_ggtt_size(obj); 458 count++; 459 } 460 461 mutex_unlock(&dev->struct_mutex); 462 463 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 464 count, total_obj_size, total_gtt_size); 465 466 return 0; 467 } 468 469 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 470 { 471 struct drm_info_node *node = (struct drm_info_node *) m->private; 472 struct drm_device *dev = node->minor->dev; 473 unsigned long flags; 474 struct intel_crtc *crtc; 475 476 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 477 const char pipe = pipe_name(crtc->pipe); 478 const char plane = plane_name(crtc->plane); 479 struct intel_unpin_work *work; 480 481 spin_lock_irqsave(&dev->event_lock, flags); 482 work = crtc->unpin_work; 483 if (work == NULL) { 484 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 485 pipe, plane); 486 } else { 487 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 488 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 489 pipe, plane); 490 } else { 491 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 492 pipe, plane); 493 } 494 if (work->enable_stall_check) 495 seq_puts(m, "Stall check enabled, "); 496 else 497 seq_puts(m, "Stall check waiting for page flip ioctl, "); 498 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 499 500 if (work->old_fb_obj) { 501 struct drm_i915_gem_object *obj = work->old_fb_obj; 502 if (obj) 503 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n", 504 i915_gem_obj_ggtt_offset(obj)); 505 } 506 if (work->pending_flip_obj) { 507 struct drm_i915_gem_object *obj = work->pending_flip_obj; 508 if (obj) 509 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n", 510 i915_gem_obj_ggtt_offset(obj)); 511 } 512 } 513 spin_unlock_irqrestore(&dev->event_lock, flags); 514 } 515 516 return 0; 517 } 518 519 static int i915_gem_request_info(struct seq_file *m, void *data) 520 { 521 struct drm_info_node *node = (struct drm_info_node *) m->private; 522 struct drm_device *dev = node->minor->dev; 523 drm_i915_private_t *dev_priv = dev->dev_private; 524 struct intel_ring_buffer *ring; 525 struct drm_i915_gem_request *gem_request; 526 int ret, count, i; 527 528 ret = mutex_lock_interruptible(&dev->struct_mutex); 529 if (ret) 530 return ret; 531 532 count = 0; 533 for_each_ring(ring, dev_priv, i) { 534 if (list_empty(&ring->request_list)) 535 continue; 536 537 seq_printf(m, "%s requests:\n", ring->name); 538 list_for_each_entry(gem_request, 539 &ring->request_list, 540 list) { 541 seq_printf(m, " %d @ %d\n", 542 gem_request->seqno, 543 (int) (jiffies - gem_request->emitted_jiffies)); 544 } 545 count++; 546 } 547 mutex_unlock(&dev->struct_mutex); 548 549 if (count == 0) 550 seq_puts(m, "No requests\n"); 551 552 return 0; 553 } 554 555 static void i915_ring_seqno_info(struct seq_file *m, 556 struct intel_ring_buffer *ring) 557 { 558 if (ring->get_seqno) { 559 seq_printf(m, "Current sequence (%s): %u\n", 560 ring->name, ring->get_seqno(ring, false)); 561 } 562 } 563 564 static int i915_gem_seqno_info(struct seq_file *m, void *data) 565 { 566 struct drm_info_node *node = (struct drm_info_node *) m->private; 567 struct drm_device *dev = node->minor->dev; 568 drm_i915_private_t *dev_priv = dev->dev_private; 569 struct intel_ring_buffer *ring; 570 int ret, i; 571 572 ret = mutex_lock_interruptible(&dev->struct_mutex); 573 if (ret) 574 return ret; 575 intel_runtime_pm_get(dev_priv); 576 577 for_each_ring(ring, dev_priv, i) 578 i915_ring_seqno_info(m, ring); 579 580 intel_runtime_pm_put(dev_priv); 581 mutex_unlock(&dev->struct_mutex); 582 583 return 0; 584 } 585 586 587 static int i915_interrupt_info(struct seq_file *m, void *data) 588 { 589 struct drm_info_node *node = (struct drm_info_node *) m->private; 590 struct drm_device *dev = node->minor->dev; 591 drm_i915_private_t *dev_priv = dev->dev_private; 592 struct intel_ring_buffer *ring; 593 int ret, i, pipe; 594 595 ret = mutex_lock_interruptible(&dev->struct_mutex); 596 if (ret) 597 return ret; 598 intel_runtime_pm_get(dev_priv); 599 600 if (INTEL_INFO(dev)->gen >= 8) { 601 int i; 602 seq_printf(m, "Master Interrupt Control:\t%08x\n", 603 I915_READ(GEN8_MASTER_IRQ)); 604 605 for (i = 0; i < 4; i++) { 606 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 607 i, I915_READ(GEN8_GT_IMR(i))); 608 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 609 i, I915_READ(GEN8_GT_IIR(i))); 610 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 611 i, I915_READ(GEN8_GT_IER(i))); 612 } 613 614 for_each_pipe(i) { 615 seq_printf(m, "Pipe %c IMR:\t%08x\n", 616 pipe_name(i), 617 I915_READ(GEN8_DE_PIPE_IMR(i))); 618 seq_printf(m, "Pipe %c IIR:\t%08x\n", 619 pipe_name(i), 620 I915_READ(GEN8_DE_PIPE_IIR(i))); 621 seq_printf(m, "Pipe %c IER:\t%08x\n", 622 pipe_name(i), 623 I915_READ(GEN8_DE_PIPE_IER(i))); 624 } 625 626 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 627 I915_READ(GEN8_DE_PORT_IMR)); 628 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 629 I915_READ(GEN8_DE_PORT_IIR)); 630 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 631 I915_READ(GEN8_DE_PORT_IER)); 632 633 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 634 I915_READ(GEN8_DE_MISC_IMR)); 635 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 636 I915_READ(GEN8_DE_MISC_IIR)); 637 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 638 I915_READ(GEN8_DE_MISC_IER)); 639 640 seq_printf(m, "PCU interrupt mask:\t%08x\n", 641 I915_READ(GEN8_PCU_IMR)); 642 seq_printf(m, "PCU interrupt identity:\t%08x\n", 643 I915_READ(GEN8_PCU_IIR)); 644 seq_printf(m, "PCU interrupt enable:\t%08x\n", 645 I915_READ(GEN8_PCU_IER)); 646 } else if (IS_VALLEYVIEW(dev)) { 647 seq_printf(m, "Display IER:\t%08x\n", 648 I915_READ(VLV_IER)); 649 seq_printf(m, "Display IIR:\t%08x\n", 650 I915_READ(VLV_IIR)); 651 seq_printf(m, "Display IIR_RW:\t%08x\n", 652 I915_READ(VLV_IIR_RW)); 653 seq_printf(m, "Display IMR:\t%08x\n", 654 I915_READ(VLV_IMR)); 655 for_each_pipe(pipe) 656 seq_printf(m, "Pipe %c stat:\t%08x\n", 657 pipe_name(pipe), 658 I915_READ(PIPESTAT(pipe))); 659 660 seq_printf(m, "Master IER:\t%08x\n", 661 I915_READ(VLV_MASTER_IER)); 662 663 seq_printf(m, "Render IER:\t%08x\n", 664 I915_READ(GTIER)); 665 seq_printf(m, "Render IIR:\t%08x\n", 666 I915_READ(GTIIR)); 667 seq_printf(m, "Render IMR:\t%08x\n", 668 I915_READ(GTIMR)); 669 670 seq_printf(m, "PM IER:\t\t%08x\n", 671 I915_READ(GEN6_PMIER)); 672 seq_printf(m, "PM IIR:\t\t%08x\n", 673 I915_READ(GEN6_PMIIR)); 674 seq_printf(m, "PM IMR:\t\t%08x\n", 675 I915_READ(GEN6_PMIMR)); 676 677 seq_printf(m, "Port hotplug:\t%08x\n", 678 I915_READ(PORT_HOTPLUG_EN)); 679 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 680 I915_READ(VLV_DPFLIPSTAT)); 681 seq_printf(m, "DPINVGTT:\t%08x\n", 682 I915_READ(DPINVGTT)); 683 684 } else if (!HAS_PCH_SPLIT(dev)) { 685 seq_printf(m, "Interrupt enable: %08x\n", 686 I915_READ(IER)); 687 seq_printf(m, "Interrupt identity: %08x\n", 688 I915_READ(IIR)); 689 seq_printf(m, "Interrupt mask: %08x\n", 690 I915_READ(IMR)); 691 for_each_pipe(pipe) 692 seq_printf(m, "Pipe %c stat: %08x\n", 693 pipe_name(pipe), 694 I915_READ(PIPESTAT(pipe))); 695 } else { 696 seq_printf(m, "North Display Interrupt enable: %08x\n", 697 I915_READ(DEIER)); 698 seq_printf(m, "North Display Interrupt identity: %08x\n", 699 I915_READ(DEIIR)); 700 seq_printf(m, "North Display Interrupt mask: %08x\n", 701 I915_READ(DEIMR)); 702 seq_printf(m, "South Display Interrupt enable: %08x\n", 703 I915_READ(SDEIER)); 704 seq_printf(m, "South Display Interrupt identity: %08x\n", 705 I915_READ(SDEIIR)); 706 seq_printf(m, "South Display Interrupt mask: %08x\n", 707 I915_READ(SDEIMR)); 708 seq_printf(m, "Graphics Interrupt enable: %08x\n", 709 I915_READ(GTIER)); 710 seq_printf(m, "Graphics Interrupt identity: %08x\n", 711 I915_READ(GTIIR)); 712 seq_printf(m, "Graphics Interrupt mask: %08x\n", 713 I915_READ(GTIMR)); 714 } 715 seq_printf(m, "Interrupts received: %d\n", 716 atomic_read(&dev_priv->irq_received)); 717 for_each_ring(ring, dev_priv, i) { 718 if (INTEL_INFO(dev)->gen >= 6) { 719 seq_printf(m, 720 "Graphics Interrupt mask (%s): %08x\n", 721 ring->name, I915_READ_IMR(ring)); 722 } 723 i915_ring_seqno_info(m, ring); 724 } 725 intel_runtime_pm_put(dev_priv); 726 mutex_unlock(&dev->struct_mutex); 727 728 return 0; 729 } 730 731 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 732 { 733 struct drm_info_node *node = (struct drm_info_node *) m->private; 734 struct drm_device *dev = node->minor->dev; 735 drm_i915_private_t *dev_priv = dev->dev_private; 736 int i, ret; 737 738 ret = mutex_lock_interruptible(&dev->struct_mutex); 739 if (ret) 740 return ret; 741 742 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 743 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 744 for (i = 0; i < dev_priv->num_fence_regs; i++) { 745 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 746 747 seq_printf(m, "Fence %d, pin count = %d, object = ", 748 i, dev_priv->fence_regs[i].pin_count); 749 if (obj == NULL) 750 seq_puts(m, "unused"); 751 else 752 describe_obj(m, obj); 753 seq_putc(m, '\n'); 754 } 755 756 mutex_unlock(&dev->struct_mutex); 757 return 0; 758 } 759 760 static int i915_hws_info(struct seq_file *m, void *data) 761 { 762 struct drm_info_node *node = (struct drm_info_node *) m->private; 763 struct drm_device *dev = node->minor->dev; 764 drm_i915_private_t *dev_priv = dev->dev_private; 765 struct intel_ring_buffer *ring; 766 const u32 *hws; 767 int i; 768 769 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 770 hws = ring->status_page.page_addr; 771 if (hws == NULL) 772 return 0; 773 774 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 775 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 776 i * 4, 777 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 778 } 779 return 0; 780 } 781 782 static ssize_t 783 i915_error_state_write(struct file *filp, 784 const char __user *ubuf, 785 size_t cnt, 786 loff_t *ppos) 787 { 788 struct i915_error_state_file_priv *error_priv = filp->private_data; 789 struct drm_device *dev = error_priv->dev; 790 int ret; 791 792 DRM_DEBUG_DRIVER("Resetting error state\n"); 793 794 ret = mutex_lock_interruptible(&dev->struct_mutex); 795 if (ret) 796 return ret; 797 798 i915_destroy_error_state(dev); 799 mutex_unlock(&dev->struct_mutex); 800 801 return cnt; 802 } 803 804 static int i915_error_state_open(struct inode *inode, struct file *file) 805 { 806 struct drm_device *dev = inode->i_private; 807 struct i915_error_state_file_priv *error_priv; 808 809 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 810 if (!error_priv) 811 return -ENOMEM; 812 813 error_priv->dev = dev; 814 815 i915_error_state_get(dev, error_priv); 816 817 file->private_data = error_priv; 818 819 return 0; 820 } 821 822 static int i915_error_state_release(struct inode *inode, struct file *file) 823 { 824 struct i915_error_state_file_priv *error_priv = file->private_data; 825 826 i915_error_state_put(error_priv); 827 kfree(error_priv); 828 829 return 0; 830 } 831 832 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, 833 size_t count, loff_t *pos) 834 { 835 struct i915_error_state_file_priv *error_priv = file->private_data; 836 struct drm_i915_error_state_buf error_str; 837 loff_t tmp_pos = 0; 838 ssize_t ret_count = 0; 839 int ret; 840 841 ret = i915_error_state_buf_init(&error_str, count, *pos); 842 if (ret) 843 return ret; 844 845 ret = i915_error_state_to_str(&error_str, error_priv); 846 if (ret) 847 goto out; 848 849 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, 850 error_str.buf, 851 error_str.bytes); 852 853 if (ret_count < 0) 854 ret = ret_count; 855 else 856 *pos = error_str.start + ret_count; 857 out: 858 i915_error_state_buf_release(&error_str); 859 return ret ?: ret_count; 860 } 861 862 static const struct file_operations i915_error_state_fops = { 863 .owner = THIS_MODULE, 864 .open = i915_error_state_open, 865 .read = i915_error_state_read, 866 .write = i915_error_state_write, 867 .llseek = default_llseek, 868 .release = i915_error_state_release, 869 }; 870 871 static int 872 i915_next_seqno_get(void *data, u64 *val) 873 { 874 struct drm_device *dev = data; 875 drm_i915_private_t *dev_priv = dev->dev_private; 876 int ret; 877 878 ret = mutex_lock_interruptible(&dev->struct_mutex); 879 if (ret) 880 return ret; 881 882 *val = dev_priv->next_seqno; 883 mutex_unlock(&dev->struct_mutex); 884 885 return 0; 886 } 887 888 static int 889 i915_next_seqno_set(void *data, u64 val) 890 { 891 struct drm_device *dev = data; 892 int ret; 893 894 ret = mutex_lock_interruptible(&dev->struct_mutex); 895 if (ret) 896 return ret; 897 898 ret = i915_gem_set_seqno(dev, val); 899 mutex_unlock(&dev->struct_mutex); 900 901 return ret; 902 } 903 904 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 905 i915_next_seqno_get, i915_next_seqno_set, 906 "0x%llx\n"); 907 908 static int i915_rstdby_delays(struct seq_file *m, void *unused) 909 { 910 struct drm_info_node *node = (struct drm_info_node *) m->private; 911 struct drm_device *dev = node->minor->dev; 912 drm_i915_private_t *dev_priv = dev->dev_private; 913 u16 crstanddelay; 914 int ret; 915 916 ret = mutex_lock_interruptible(&dev->struct_mutex); 917 if (ret) 918 return ret; 919 intel_runtime_pm_get(dev_priv); 920 921 crstanddelay = I915_READ16(CRSTANDVID); 922 923 intel_runtime_pm_put(dev_priv); 924 mutex_unlock(&dev->struct_mutex); 925 926 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 927 928 return 0; 929 } 930 931 static int i915_cur_delayinfo(struct seq_file *m, void *unused) 932 { 933 struct drm_info_node *node = (struct drm_info_node *) m->private; 934 struct drm_device *dev = node->minor->dev; 935 drm_i915_private_t *dev_priv = dev->dev_private; 936 int ret = 0; 937 938 intel_runtime_pm_get(dev_priv); 939 940 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 941 942 if (IS_GEN5(dev)) { 943 u16 rgvswctl = I915_READ16(MEMSWCTL); 944 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 945 946 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 947 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 948 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 949 MEMSTAT_VID_SHIFT); 950 seq_printf(m, "Current P-state: %d\n", 951 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 952 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { 953 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 954 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 955 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 956 u32 rpstat, cagf, reqf; 957 u32 rpupei, rpcurup, rpprevup; 958 u32 rpdownei, rpcurdown, rpprevdown; 959 int max_freq; 960 961 /* RPSTAT1 is in the GT power well */ 962 ret = mutex_lock_interruptible(&dev->struct_mutex); 963 if (ret) 964 goto out; 965 966 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 967 968 reqf = I915_READ(GEN6_RPNSWREQ); 969 reqf &= ~GEN6_TURBO_DISABLE; 970 if (IS_HASWELL(dev)) 971 reqf >>= 24; 972 else 973 reqf >>= 25; 974 reqf *= GT_FREQUENCY_MULTIPLIER; 975 976 rpstat = I915_READ(GEN6_RPSTAT1); 977 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 978 rpcurup = I915_READ(GEN6_RP_CUR_UP); 979 rpprevup = I915_READ(GEN6_RP_PREV_UP); 980 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 981 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 982 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 983 if (IS_HASWELL(dev)) 984 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 985 else 986 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 987 cagf *= GT_FREQUENCY_MULTIPLIER; 988 989 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 990 mutex_unlock(&dev->struct_mutex); 991 992 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 993 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 994 seq_printf(m, "Render p-state ratio: %d\n", 995 (gt_perf_status & 0xff00) >> 8); 996 seq_printf(m, "Render p-state VID: %d\n", 997 gt_perf_status & 0xff); 998 seq_printf(m, "Render p-state limit: %d\n", 999 rp_state_limits & 0xff); 1000 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1001 seq_printf(m, "CAGF: %dMHz\n", cagf); 1002 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 1003 GEN6_CURICONT_MASK); 1004 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 1005 GEN6_CURBSYTAVG_MASK); 1006 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 1007 GEN6_CURBSYTAVG_MASK); 1008 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 1009 GEN6_CURIAVG_MASK); 1010 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 1011 GEN6_CURBSYTAVG_MASK); 1012 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 1013 GEN6_CURBSYTAVG_MASK); 1014 1015 max_freq = (rp_state_cap & 0xff0000) >> 16; 1016 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1017 max_freq * GT_FREQUENCY_MULTIPLIER); 1018 1019 max_freq = (rp_state_cap & 0xff00) >> 8; 1020 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1021 max_freq * GT_FREQUENCY_MULTIPLIER); 1022 1023 max_freq = rp_state_cap & 0xff; 1024 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1025 max_freq * GT_FREQUENCY_MULTIPLIER); 1026 1027 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1028 dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER); 1029 } else if (IS_VALLEYVIEW(dev)) { 1030 u32 freq_sts, val; 1031 1032 mutex_lock(&dev_priv->rps.hw_lock); 1033 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1034 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1035 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1036 1037 val = valleyview_rps_max_freq(dev_priv); 1038 seq_printf(m, "max GPU freq: %d MHz\n", 1039 vlv_gpu_freq(dev_priv, val)); 1040 1041 val = valleyview_rps_min_freq(dev_priv); 1042 seq_printf(m, "min GPU freq: %d MHz\n", 1043 vlv_gpu_freq(dev_priv, val)); 1044 1045 seq_printf(m, "current GPU freq: %d MHz\n", 1046 vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1047 mutex_unlock(&dev_priv->rps.hw_lock); 1048 } else { 1049 seq_puts(m, "no P-state info available\n"); 1050 } 1051 1052 out: 1053 intel_runtime_pm_put(dev_priv); 1054 return ret; 1055 } 1056 1057 static int i915_delayfreq_table(struct seq_file *m, void *unused) 1058 { 1059 struct drm_info_node *node = (struct drm_info_node *) m->private; 1060 struct drm_device *dev = node->minor->dev; 1061 drm_i915_private_t *dev_priv = dev->dev_private; 1062 u32 delayfreq; 1063 int ret, i; 1064 1065 ret = mutex_lock_interruptible(&dev->struct_mutex); 1066 if (ret) 1067 return ret; 1068 intel_runtime_pm_get(dev_priv); 1069 1070 for (i = 0; i < 16; i++) { 1071 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 1072 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 1073 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 1074 } 1075 1076 intel_runtime_pm_put(dev_priv); 1077 1078 mutex_unlock(&dev->struct_mutex); 1079 1080 return 0; 1081 } 1082 1083 static inline int MAP_TO_MV(int map) 1084 { 1085 return 1250 - (map * 25); 1086 } 1087 1088 static int i915_inttoext_table(struct seq_file *m, void *unused) 1089 { 1090 struct drm_info_node *node = (struct drm_info_node *) m->private; 1091 struct drm_device *dev = node->minor->dev; 1092 drm_i915_private_t *dev_priv = dev->dev_private; 1093 u32 inttoext; 1094 int ret, i; 1095 1096 ret = mutex_lock_interruptible(&dev->struct_mutex); 1097 if (ret) 1098 return ret; 1099 intel_runtime_pm_get(dev_priv); 1100 1101 for (i = 1; i <= 32; i++) { 1102 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 1103 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 1104 } 1105 1106 intel_runtime_pm_put(dev_priv); 1107 mutex_unlock(&dev->struct_mutex); 1108 1109 return 0; 1110 } 1111 1112 static int ironlake_drpc_info(struct seq_file *m) 1113 { 1114 struct drm_info_node *node = (struct drm_info_node *) m->private; 1115 struct drm_device *dev = node->minor->dev; 1116 drm_i915_private_t *dev_priv = dev->dev_private; 1117 u32 rgvmodectl, rstdbyctl; 1118 u16 crstandvid; 1119 int ret; 1120 1121 ret = mutex_lock_interruptible(&dev->struct_mutex); 1122 if (ret) 1123 return ret; 1124 intel_runtime_pm_get(dev_priv); 1125 1126 rgvmodectl = I915_READ(MEMMODECTL); 1127 rstdbyctl = I915_READ(RSTDBYCTL); 1128 crstandvid = I915_READ16(CRSTANDVID); 1129 1130 intel_runtime_pm_put(dev_priv); 1131 mutex_unlock(&dev->struct_mutex); 1132 1133 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1134 "yes" : "no"); 1135 seq_printf(m, "Boost freq: %d\n", 1136 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1137 MEMMODE_BOOST_FREQ_SHIFT); 1138 seq_printf(m, "HW control enabled: %s\n", 1139 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 1140 seq_printf(m, "SW control enabled: %s\n", 1141 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 1142 seq_printf(m, "Gated voltage change: %s\n", 1143 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 1144 seq_printf(m, "Starting frequency: P%d\n", 1145 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1146 seq_printf(m, "Max P-state: P%d\n", 1147 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1148 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1149 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1150 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1151 seq_printf(m, "Render standby enabled: %s\n", 1152 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1153 seq_puts(m, "Current RS state: "); 1154 switch (rstdbyctl & RSX_STATUS_MASK) { 1155 case RSX_STATUS_ON: 1156 seq_puts(m, "on\n"); 1157 break; 1158 case RSX_STATUS_RC1: 1159 seq_puts(m, "RC1\n"); 1160 break; 1161 case RSX_STATUS_RC1E: 1162 seq_puts(m, "RC1E\n"); 1163 break; 1164 case RSX_STATUS_RS1: 1165 seq_puts(m, "RS1\n"); 1166 break; 1167 case RSX_STATUS_RS2: 1168 seq_puts(m, "RS2 (RC6)\n"); 1169 break; 1170 case RSX_STATUS_RS3: 1171 seq_puts(m, "RC3 (RC6+)\n"); 1172 break; 1173 default: 1174 seq_puts(m, "unknown\n"); 1175 break; 1176 } 1177 1178 return 0; 1179 } 1180 1181 static int vlv_drpc_info(struct seq_file *m) 1182 { 1183 1184 struct drm_info_node *node = (struct drm_info_node *) m->private; 1185 struct drm_device *dev = node->minor->dev; 1186 struct drm_i915_private *dev_priv = dev->dev_private; 1187 u32 rpmodectl1, rcctl1; 1188 unsigned fw_rendercount = 0, fw_mediacount = 0; 1189 1190 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1191 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1192 1193 seq_printf(m, "Video Turbo Mode: %s\n", 1194 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1195 seq_printf(m, "Turbo enabled: %s\n", 1196 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1197 seq_printf(m, "HW control enabled: %s\n", 1198 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1199 seq_printf(m, "SW control enabled: %s\n", 1200 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1201 GEN6_RP_MEDIA_SW_MODE)); 1202 seq_printf(m, "RC6 Enabled: %s\n", 1203 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1204 GEN6_RC_CTL_EI_MODE(1)))); 1205 seq_printf(m, "Render Power Well: %s\n", 1206 (I915_READ(VLV_GTLC_PW_STATUS) & 1207 VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1208 seq_printf(m, "Media Power Well: %s\n", 1209 (I915_READ(VLV_GTLC_PW_STATUS) & 1210 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1211 1212 spin_lock_irq(&dev_priv->uncore.lock); 1213 fw_rendercount = dev_priv->uncore.fw_rendercount; 1214 fw_mediacount = dev_priv->uncore.fw_mediacount; 1215 spin_unlock_irq(&dev_priv->uncore.lock); 1216 1217 seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount); 1218 seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount); 1219 1220 1221 return 0; 1222 } 1223 1224 1225 static int gen6_drpc_info(struct seq_file *m) 1226 { 1227 1228 struct drm_info_node *node = (struct drm_info_node *) m->private; 1229 struct drm_device *dev = node->minor->dev; 1230 struct drm_i915_private *dev_priv = dev->dev_private; 1231 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1232 unsigned forcewake_count; 1233 int count = 0, ret; 1234 1235 ret = mutex_lock_interruptible(&dev->struct_mutex); 1236 if (ret) 1237 return ret; 1238 intel_runtime_pm_get(dev_priv); 1239 1240 spin_lock_irq(&dev_priv->uncore.lock); 1241 forcewake_count = dev_priv->uncore.forcewake_count; 1242 spin_unlock_irq(&dev_priv->uncore.lock); 1243 1244 if (forcewake_count) { 1245 seq_puts(m, "RC information inaccurate because somebody " 1246 "holds a forcewake reference \n"); 1247 } else { 1248 /* NB: we cannot use forcewake, else we read the wrong values */ 1249 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1250 udelay(10); 1251 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1252 } 1253 1254 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1255 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1256 1257 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1258 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1259 mutex_unlock(&dev->struct_mutex); 1260 mutex_lock(&dev_priv->rps.hw_lock); 1261 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1262 mutex_unlock(&dev_priv->rps.hw_lock); 1263 1264 intel_runtime_pm_put(dev_priv); 1265 1266 seq_printf(m, "Video Turbo Mode: %s\n", 1267 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1268 seq_printf(m, "HW control enabled: %s\n", 1269 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1270 seq_printf(m, "SW control enabled: %s\n", 1271 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1272 GEN6_RP_MEDIA_SW_MODE)); 1273 seq_printf(m, "RC1e Enabled: %s\n", 1274 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1275 seq_printf(m, "RC6 Enabled: %s\n", 1276 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1277 seq_printf(m, "Deep RC6 Enabled: %s\n", 1278 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1279 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1280 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1281 seq_puts(m, "Current RC state: "); 1282 switch (gt_core_status & GEN6_RCn_MASK) { 1283 case GEN6_RC0: 1284 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1285 seq_puts(m, "Core Power Down\n"); 1286 else 1287 seq_puts(m, "on\n"); 1288 break; 1289 case GEN6_RC3: 1290 seq_puts(m, "RC3\n"); 1291 break; 1292 case GEN6_RC6: 1293 seq_puts(m, "RC6\n"); 1294 break; 1295 case GEN6_RC7: 1296 seq_puts(m, "RC7\n"); 1297 break; 1298 default: 1299 seq_puts(m, "Unknown\n"); 1300 break; 1301 } 1302 1303 seq_printf(m, "Core Power Down: %s\n", 1304 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1305 1306 /* Not exactly sure what this is */ 1307 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1308 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1309 seq_printf(m, "RC6 residency since boot: %u\n", 1310 I915_READ(GEN6_GT_GFX_RC6)); 1311 seq_printf(m, "RC6+ residency since boot: %u\n", 1312 I915_READ(GEN6_GT_GFX_RC6p)); 1313 seq_printf(m, "RC6++ residency since boot: %u\n", 1314 I915_READ(GEN6_GT_GFX_RC6pp)); 1315 1316 seq_printf(m, "RC6 voltage: %dmV\n", 1317 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1318 seq_printf(m, "RC6+ voltage: %dmV\n", 1319 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1320 seq_printf(m, "RC6++ voltage: %dmV\n", 1321 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1322 return 0; 1323 } 1324 1325 static int i915_drpc_info(struct seq_file *m, void *unused) 1326 { 1327 struct drm_info_node *node = (struct drm_info_node *) m->private; 1328 struct drm_device *dev = node->minor->dev; 1329 1330 if (IS_VALLEYVIEW(dev)) 1331 return vlv_drpc_info(m); 1332 else if (IS_GEN6(dev) || IS_GEN7(dev)) 1333 return gen6_drpc_info(m); 1334 else 1335 return ironlake_drpc_info(m); 1336 } 1337 1338 static int i915_fbc_status(struct seq_file *m, void *unused) 1339 { 1340 struct drm_info_node *node = (struct drm_info_node *) m->private; 1341 struct drm_device *dev = node->minor->dev; 1342 drm_i915_private_t *dev_priv = dev->dev_private; 1343 1344 if (!HAS_FBC(dev)) { 1345 seq_puts(m, "FBC unsupported on this chipset\n"); 1346 return 0; 1347 } 1348 1349 if (intel_fbc_enabled(dev)) { 1350 seq_puts(m, "FBC enabled\n"); 1351 } else { 1352 seq_puts(m, "FBC disabled: "); 1353 switch (dev_priv->fbc.no_fbc_reason) { 1354 case FBC_OK: 1355 seq_puts(m, "FBC actived, but currently disabled in hardware"); 1356 break; 1357 case FBC_UNSUPPORTED: 1358 seq_puts(m, "unsupported by this chipset"); 1359 break; 1360 case FBC_NO_OUTPUT: 1361 seq_puts(m, "no outputs"); 1362 break; 1363 case FBC_STOLEN_TOO_SMALL: 1364 seq_puts(m, "not enough stolen memory"); 1365 break; 1366 case FBC_UNSUPPORTED_MODE: 1367 seq_puts(m, "mode not supported"); 1368 break; 1369 case FBC_MODE_TOO_LARGE: 1370 seq_puts(m, "mode too large"); 1371 break; 1372 case FBC_BAD_PLANE: 1373 seq_puts(m, "FBC unsupported on plane"); 1374 break; 1375 case FBC_NOT_TILED: 1376 seq_puts(m, "scanout buffer not tiled"); 1377 break; 1378 case FBC_MULTIPLE_PIPES: 1379 seq_puts(m, "multiple pipes are enabled"); 1380 break; 1381 case FBC_MODULE_PARAM: 1382 seq_puts(m, "disabled per module param (default off)"); 1383 break; 1384 case FBC_CHIP_DEFAULT: 1385 seq_puts(m, "disabled per chip default"); 1386 break; 1387 default: 1388 seq_puts(m, "unknown reason"); 1389 } 1390 seq_putc(m, '\n'); 1391 } 1392 return 0; 1393 } 1394 1395 static int i915_ips_status(struct seq_file *m, void *unused) 1396 { 1397 struct drm_info_node *node = (struct drm_info_node *) m->private; 1398 struct drm_device *dev = node->minor->dev; 1399 struct drm_i915_private *dev_priv = dev->dev_private; 1400 1401 if (!HAS_IPS(dev)) { 1402 seq_puts(m, "not supported\n"); 1403 return 0; 1404 } 1405 1406 if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE) 1407 seq_puts(m, "enabled\n"); 1408 else 1409 seq_puts(m, "disabled\n"); 1410 1411 return 0; 1412 } 1413 1414 static int i915_sr_status(struct seq_file *m, void *unused) 1415 { 1416 struct drm_info_node *node = (struct drm_info_node *) m->private; 1417 struct drm_device *dev = node->minor->dev; 1418 drm_i915_private_t *dev_priv = dev->dev_private; 1419 bool sr_enabled = false; 1420 1421 if (HAS_PCH_SPLIT(dev)) 1422 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1423 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1424 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1425 else if (IS_I915GM(dev)) 1426 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1427 else if (IS_PINEVIEW(dev)) 1428 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1429 1430 seq_printf(m, "self-refresh: %s\n", 1431 sr_enabled ? "enabled" : "disabled"); 1432 1433 return 0; 1434 } 1435 1436 static int i915_emon_status(struct seq_file *m, void *unused) 1437 { 1438 struct drm_info_node *node = (struct drm_info_node *) m->private; 1439 struct drm_device *dev = node->minor->dev; 1440 drm_i915_private_t *dev_priv = dev->dev_private; 1441 unsigned long temp, chipset, gfx; 1442 int ret; 1443 1444 if (!IS_GEN5(dev)) 1445 return -ENODEV; 1446 1447 ret = mutex_lock_interruptible(&dev->struct_mutex); 1448 if (ret) 1449 return ret; 1450 1451 temp = i915_mch_val(dev_priv); 1452 chipset = i915_chipset_val(dev_priv); 1453 gfx = i915_gfx_val(dev_priv); 1454 mutex_unlock(&dev->struct_mutex); 1455 1456 seq_printf(m, "GMCH temp: %ld\n", temp); 1457 seq_printf(m, "Chipset power: %ld\n", chipset); 1458 seq_printf(m, "GFX power: %ld\n", gfx); 1459 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1460 1461 return 0; 1462 } 1463 1464 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1465 { 1466 struct drm_info_node *node = (struct drm_info_node *) m->private; 1467 struct drm_device *dev = node->minor->dev; 1468 drm_i915_private_t *dev_priv = dev->dev_private; 1469 int ret; 1470 int gpu_freq, ia_freq; 1471 1472 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1473 seq_puts(m, "unsupported on this chipset\n"); 1474 return 0; 1475 } 1476 1477 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1478 1479 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1480 if (ret) 1481 return ret; 1482 intel_runtime_pm_get(dev_priv); 1483 1484 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1485 1486 for (gpu_freq = dev_priv->rps.min_delay; 1487 gpu_freq <= dev_priv->rps.max_delay; 1488 gpu_freq++) { 1489 ia_freq = gpu_freq; 1490 sandybridge_pcode_read(dev_priv, 1491 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1492 &ia_freq); 1493 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1494 gpu_freq * GT_FREQUENCY_MULTIPLIER, 1495 ((ia_freq >> 0) & 0xff) * 100, 1496 ((ia_freq >> 8) & 0xff) * 100); 1497 } 1498 1499 intel_runtime_pm_put(dev_priv); 1500 mutex_unlock(&dev_priv->rps.hw_lock); 1501 1502 return 0; 1503 } 1504 1505 static int i915_gfxec(struct seq_file *m, void *unused) 1506 { 1507 struct drm_info_node *node = (struct drm_info_node *) m->private; 1508 struct drm_device *dev = node->minor->dev; 1509 drm_i915_private_t *dev_priv = dev->dev_private; 1510 int ret; 1511 1512 ret = mutex_lock_interruptible(&dev->struct_mutex); 1513 if (ret) 1514 return ret; 1515 intel_runtime_pm_get(dev_priv); 1516 1517 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1518 intel_runtime_pm_put(dev_priv); 1519 1520 mutex_unlock(&dev->struct_mutex); 1521 1522 return 0; 1523 } 1524 1525 static int i915_opregion(struct seq_file *m, void *unused) 1526 { 1527 struct drm_info_node *node = (struct drm_info_node *) m->private; 1528 struct drm_device *dev = node->minor->dev; 1529 drm_i915_private_t *dev_priv = dev->dev_private; 1530 struct intel_opregion *opregion = &dev_priv->opregion; 1531 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); 1532 int ret; 1533 1534 if (data == NULL) 1535 return -ENOMEM; 1536 1537 ret = mutex_lock_interruptible(&dev->struct_mutex); 1538 if (ret) 1539 goto out; 1540 1541 if (opregion->header) { 1542 memcpy_fromio(data, opregion->header, OPREGION_SIZE); 1543 seq_write(m, data, OPREGION_SIZE); 1544 } 1545 1546 mutex_unlock(&dev->struct_mutex); 1547 1548 out: 1549 kfree(data); 1550 return 0; 1551 } 1552 1553 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1554 { 1555 struct drm_info_node *node = (struct drm_info_node *) m->private; 1556 struct drm_device *dev = node->minor->dev; 1557 struct intel_fbdev *ifbdev = NULL; 1558 struct intel_framebuffer *fb; 1559 1560 #ifdef CONFIG_DRM_I915_FBDEV 1561 struct drm_i915_private *dev_priv = dev->dev_private; 1562 int ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1563 if (ret) 1564 return ret; 1565 1566 ifbdev = dev_priv->fbdev; 1567 fb = to_intel_framebuffer(ifbdev->helper.fb); 1568 1569 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1570 fb->base.width, 1571 fb->base.height, 1572 fb->base.depth, 1573 fb->base.bits_per_pixel, 1574 atomic_read(&fb->base.refcount.refcount)); 1575 describe_obj(m, fb->obj); 1576 seq_putc(m, '\n'); 1577 mutex_unlock(&dev->mode_config.mutex); 1578 #endif 1579 1580 mutex_lock(&dev->mode_config.fb_lock); 1581 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1582 if (ifbdev && &fb->base == ifbdev->helper.fb) 1583 continue; 1584 1585 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1586 fb->base.width, 1587 fb->base.height, 1588 fb->base.depth, 1589 fb->base.bits_per_pixel, 1590 atomic_read(&fb->base.refcount.refcount)); 1591 describe_obj(m, fb->obj); 1592 seq_putc(m, '\n'); 1593 } 1594 mutex_unlock(&dev->mode_config.fb_lock); 1595 1596 return 0; 1597 } 1598 1599 static int i915_context_status(struct seq_file *m, void *unused) 1600 { 1601 struct drm_info_node *node = (struct drm_info_node *) m->private; 1602 struct drm_device *dev = node->minor->dev; 1603 drm_i915_private_t *dev_priv = dev->dev_private; 1604 struct intel_ring_buffer *ring; 1605 struct i915_hw_context *ctx; 1606 int ret, i; 1607 1608 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1609 if (ret) 1610 return ret; 1611 1612 if (dev_priv->ips.pwrctx) { 1613 seq_puts(m, "power context "); 1614 describe_obj(m, dev_priv->ips.pwrctx); 1615 seq_putc(m, '\n'); 1616 } 1617 1618 if (dev_priv->ips.renderctx) { 1619 seq_puts(m, "render context "); 1620 describe_obj(m, dev_priv->ips.renderctx); 1621 seq_putc(m, '\n'); 1622 } 1623 1624 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1625 seq_puts(m, "HW context "); 1626 describe_ctx(m, ctx); 1627 for_each_ring(ring, dev_priv, i) 1628 if (ring->default_context == ctx) 1629 seq_printf(m, "(default context %s) ", ring->name); 1630 1631 describe_obj(m, ctx->obj); 1632 seq_putc(m, '\n'); 1633 } 1634 1635 mutex_unlock(&dev->mode_config.mutex); 1636 1637 return 0; 1638 } 1639 1640 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1641 { 1642 struct drm_info_node *node = (struct drm_info_node *) m->private; 1643 struct drm_device *dev = node->minor->dev; 1644 struct drm_i915_private *dev_priv = dev->dev_private; 1645 unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0; 1646 1647 spin_lock_irq(&dev_priv->uncore.lock); 1648 if (IS_VALLEYVIEW(dev)) { 1649 fw_rendercount = dev_priv->uncore.fw_rendercount; 1650 fw_mediacount = dev_priv->uncore.fw_mediacount; 1651 } else 1652 forcewake_count = dev_priv->uncore.forcewake_count; 1653 spin_unlock_irq(&dev_priv->uncore.lock); 1654 1655 if (IS_VALLEYVIEW(dev)) { 1656 seq_printf(m, "fw_rendercount = %u\n", fw_rendercount); 1657 seq_printf(m, "fw_mediacount = %u\n", fw_mediacount); 1658 } else 1659 seq_printf(m, "forcewake count = %u\n", forcewake_count); 1660 1661 return 0; 1662 } 1663 1664 static const char *swizzle_string(unsigned swizzle) 1665 { 1666 switch (swizzle) { 1667 case I915_BIT_6_SWIZZLE_NONE: 1668 return "none"; 1669 case I915_BIT_6_SWIZZLE_9: 1670 return "bit9"; 1671 case I915_BIT_6_SWIZZLE_9_10: 1672 return "bit9/bit10"; 1673 case I915_BIT_6_SWIZZLE_9_11: 1674 return "bit9/bit11"; 1675 case I915_BIT_6_SWIZZLE_9_10_11: 1676 return "bit9/bit10/bit11"; 1677 case I915_BIT_6_SWIZZLE_9_17: 1678 return "bit9/bit17"; 1679 case I915_BIT_6_SWIZZLE_9_10_17: 1680 return "bit9/bit10/bit17"; 1681 case I915_BIT_6_SWIZZLE_UNKNOWN: 1682 return "unknown"; 1683 } 1684 1685 return "bug"; 1686 } 1687 1688 static int i915_swizzle_info(struct seq_file *m, void *data) 1689 { 1690 struct drm_info_node *node = (struct drm_info_node *) m->private; 1691 struct drm_device *dev = node->minor->dev; 1692 struct drm_i915_private *dev_priv = dev->dev_private; 1693 int ret; 1694 1695 ret = mutex_lock_interruptible(&dev->struct_mutex); 1696 if (ret) 1697 return ret; 1698 intel_runtime_pm_get(dev_priv); 1699 1700 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1701 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1702 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1703 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1704 1705 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1706 seq_printf(m, "DDC = 0x%08x\n", 1707 I915_READ(DCC)); 1708 seq_printf(m, "C0DRB3 = 0x%04x\n", 1709 I915_READ16(C0DRB3)); 1710 seq_printf(m, "C1DRB3 = 0x%04x\n", 1711 I915_READ16(C1DRB3)); 1712 } else if (INTEL_INFO(dev)->gen >= 6) { 1713 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1714 I915_READ(MAD_DIMM_C0)); 1715 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1716 I915_READ(MAD_DIMM_C1)); 1717 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1718 I915_READ(MAD_DIMM_C2)); 1719 seq_printf(m, "TILECTL = 0x%08x\n", 1720 I915_READ(TILECTL)); 1721 if (IS_GEN8(dev)) 1722 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 1723 I915_READ(GAMTARBMODE)); 1724 else 1725 seq_printf(m, "ARB_MODE = 0x%08x\n", 1726 I915_READ(ARB_MODE)); 1727 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1728 I915_READ(DISP_ARB_CTL)); 1729 } 1730 intel_runtime_pm_put(dev_priv); 1731 mutex_unlock(&dev->struct_mutex); 1732 1733 return 0; 1734 } 1735 1736 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 1737 { 1738 struct drm_i915_private *dev_priv = dev->dev_private; 1739 struct intel_ring_buffer *ring; 1740 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1741 int unused, i; 1742 1743 if (!ppgtt) 1744 return; 1745 1746 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages); 1747 seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages); 1748 for_each_ring(ring, dev_priv, unused) { 1749 seq_printf(m, "%s\n", ring->name); 1750 for (i = 0; i < 4; i++) { 1751 u32 offset = 0x270 + i * 8; 1752 u64 pdp = I915_READ(ring->mmio_base + offset + 4); 1753 pdp <<= 32; 1754 pdp |= I915_READ(ring->mmio_base + offset); 1755 for (i = 0; i < 4; i++) 1756 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 1757 } 1758 } 1759 } 1760 1761 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) 1762 { 1763 struct drm_i915_private *dev_priv = dev->dev_private; 1764 struct intel_ring_buffer *ring; 1765 int i; 1766 1767 if (INTEL_INFO(dev)->gen == 6) 1768 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1769 1770 for_each_ring(ring, dev_priv, i) { 1771 seq_printf(m, "%s\n", ring->name); 1772 if (INTEL_INFO(dev)->gen == 7) 1773 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 1774 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 1775 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 1776 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 1777 } 1778 if (dev_priv->mm.aliasing_ppgtt) { 1779 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1780 1781 seq_puts(m, "aliasing PPGTT:\n"); 1782 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1783 } 1784 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1785 } 1786 1787 static int i915_ppgtt_info(struct seq_file *m, void *data) 1788 { 1789 struct drm_info_node *node = (struct drm_info_node *) m->private; 1790 struct drm_device *dev = node->minor->dev; 1791 struct drm_i915_private *dev_priv = dev->dev_private; 1792 1793 int ret = mutex_lock_interruptible(&dev->struct_mutex); 1794 if (ret) 1795 return ret; 1796 intel_runtime_pm_get(dev_priv); 1797 1798 if (INTEL_INFO(dev)->gen >= 8) 1799 gen8_ppgtt_info(m, dev); 1800 else if (INTEL_INFO(dev)->gen >= 6) 1801 gen6_ppgtt_info(m, dev); 1802 1803 intel_runtime_pm_put(dev_priv); 1804 mutex_unlock(&dev->struct_mutex); 1805 1806 return 0; 1807 } 1808 1809 static int i915_dpio_info(struct seq_file *m, void *data) 1810 { 1811 struct drm_info_node *node = (struct drm_info_node *) m->private; 1812 struct drm_device *dev = node->minor->dev; 1813 struct drm_i915_private *dev_priv = dev->dev_private; 1814 int ret; 1815 1816 1817 if (!IS_VALLEYVIEW(dev)) { 1818 seq_puts(m, "unsupported\n"); 1819 return 0; 1820 } 1821 1822 ret = mutex_lock_interruptible(&dev_priv->dpio_lock); 1823 if (ret) 1824 return ret; 1825 1826 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); 1827 1828 seq_printf(m, "DPIO PLL DW3 CH0 : 0x%08x\n", 1829 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(0))); 1830 seq_printf(m, "DPIO PLL DW3 CH1: 0x%08x\n", 1831 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(1))); 1832 1833 seq_printf(m, "DPIO PLL DW5 CH0: 0x%08x\n", 1834 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(0))); 1835 seq_printf(m, "DPIO PLL DW5 CH1: 0x%08x\n", 1836 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(1))); 1837 1838 seq_printf(m, "DPIO PLL DW7 CH0: 0x%08x\n", 1839 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(0))); 1840 seq_printf(m, "DPIO PLL DW7 CH1: 0x%08x\n", 1841 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(1))); 1842 1843 seq_printf(m, "DPIO PLL DW10 CH0: 0x%08x\n", 1844 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(0))); 1845 seq_printf(m, "DPIO PLL DW10 CH1: 0x%08x\n", 1846 vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(1))); 1847 1848 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1849 vlv_dpio_read(dev_priv, PIPE_A, VLV_CMN_DW0)); 1850 1851 mutex_unlock(&dev_priv->dpio_lock); 1852 1853 return 0; 1854 } 1855 1856 static int i915_llc(struct seq_file *m, void *data) 1857 { 1858 struct drm_info_node *node = (struct drm_info_node *) m->private; 1859 struct drm_device *dev = node->minor->dev; 1860 struct drm_i915_private *dev_priv = dev->dev_private; 1861 1862 /* Size calculation for LLC is a bit of a pain. Ignore for now. */ 1863 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); 1864 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size); 1865 1866 return 0; 1867 } 1868 1869 static int i915_edp_psr_status(struct seq_file *m, void *data) 1870 { 1871 struct drm_info_node *node = m->private; 1872 struct drm_device *dev = node->minor->dev; 1873 struct drm_i915_private *dev_priv = dev->dev_private; 1874 u32 psrperf = 0; 1875 bool enabled = false; 1876 1877 intel_runtime_pm_get(dev_priv); 1878 1879 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 1880 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 1881 1882 enabled = HAS_PSR(dev) && 1883 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; 1884 seq_printf(m, "Enabled: %s\n", yesno(enabled)); 1885 1886 if (HAS_PSR(dev)) 1887 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & 1888 EDP_PSR_PERF_CNT_MASK; 1889 seq_printf(m, "Performance_Counter: %u\n", psrperf); 1890 1891 intel_runtime_pm_put(dev_priv); 1892 return 0; 1893 } 1894 1895 static int i915_energy_uJ(struct seq_file *m, void *data) 1896 { 1897 struct drm_info_node *node = m->private; 1898 struct drm_device *dev = node->minor->dev; 1899 struct drm_i915_private *dev_priv = dev->dev_private; 1900 u64 power; 1901 u32 units; 1902 1903 if (INTEL_INFO(dev)->gen < 6) 1904 return -ENODEV; 1905 1906 rdmsrl(MSR_RAPL_POWER_UNIT, power); 1907 power = (power & 0x1f00) >> 8; 1908 units = 1000000 / (1 << power); /* convert to uJ */ 1909 power = I915_READ(MCH_SECP_NRG_STTS); 1910 power *= units; 1911 1912 seq_printf(m, "%llu", (long long unsigned)power); 1913 1914 return 0; 1915 } 1916 1917 static int i915_pc8_status(struct seq_file *m, void *unused) 1918 { 1919 struct drm_info_node *node = (struct drm_info_node *) m->private; 1920 struct drm_device *dev = node->minor->dev; 1921 struct drm_i915_private *dev_priv = dev->dev_private; 1922 1923 if (!IS_HASWELL(dev)) { 1924 seq_puts(m, "not supported\n"); 1925 return 0; 1926 } 1927 1928 mutex_lock(&dev_priv->pc8.lock); 1929 seq_printf(m, "Requirements met: %s\n", 1930 yesno(dev_priv->pc8.requirements_met)); 1931 seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle)); 1932 seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count); 1933 seq_printf(m, "IRQs disabled: %s\n", 1934 yesno(dev_priv->pc8.irqs_disabled)); 1935 seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled)); 1936 mutex_unlock(&dev_priv->pc8.lock); 1937 1938 return 0; 1939 } 1940 1941 static const char *power_domain_str(enum intel_display_power_domain domain) 1942 { 1943 switch (domain) { 1944 case POWER_DOMAIN_PIPE_A: 1945 return "PIPE_A"; 1946 case POWER_DOMAIN_PIPE_B: 1947 return "PIPE_B"; 1948 case POWER_DOMAIN_PIPE_C: 1949 return "PIPE_C"; 1950 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 1951 return "PIPE_A_PANEL_FITTER"; 1952 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 1953 return "PIPE_B_PANEL_FITTER"; 1954 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 1955 return "PIPE_C_PANEL_FITTER"; 1956 case POWER_DOMAIN_TRANSCODER_A: 1957 return "TRANSCODER_A"; 1958 case POWER_DOMAIN_TRANSCODER_B: 1959 return "TRANSCODER_B"; 1960 case POWER_DOMAIN_TRANSCODER_C: 1961 return "TRANSCODER_C"; 1962 case POWER_DOMAIN_TRANSCODER_EDP: 1963 return "TRANSCODER_EDP"; 1964 case POWER_DOMAIN_VGA: 1965 return "VGA"; 1966 case POWER_DOMAIN_AUDIO: 1967 return "AUDIO"; 1968 case POWER_DOMAIN_INIT: 1969 return "INIT"; 1970 default: 1971 WARN_ON(1); 1972 return "?"; 1973 } 1974 } 1975 1976 static int i915_power_domain_info(struct seq_file *m, void *unused) 1977 { 1978 struct drm_info_node *node = (struct drm_info_node *) m->private; 1979 struct drm_device *dev = node->minor->dev; 1980 struct drm_i915_private *dev_priv = dev->dev_private; 1981 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1982 int i; 1983 1984 mutex_lock(&power_domains->lock); 1985 1986 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 1987 for (i = 0; i < power_domains->power_well_count; i++) { 1988 struct i915_power_well *power_well; 1989 enum intel_display_power_domain power_domain; 1990 1991 power_well = &power_domains->power_wells[i]; 1992 seq_printf(m, "%-25s %d\n", power_well->name, 1993 power_well->count); 1994 1995 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM; 1996 power_domain++) { 1997 if (!(BIT(power_domain) & power_well->domains)) 1998 continue; 1999 2000 seq_printf(m, " %-23s %d\n", 2001 power_domain_str(power_domain), 2002 power_domains->domain_use_count[power_domain]); 2003 } 2004 } 2005 2006 mutex_unlock(&power_domains->lock); 2007 2008 return 0; 2009 } 2010 2011 struct pipe_crc_info { 2012 const char *name; 2013 struct drm_device *dev; 2014 enum pipe pipe; 2015 }; 2016 2017 static int i915_pipe_crc_open(struct inode *inode, struct file *filep) 2018 { 2019 struct pipe_crc_info *info = inode->i_private; 2020 struct drm_i915_private *dev_priv = info->dev->dev_private; 2021 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2022 2023 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes) 2024 return -ENODEV; 2025 2026 spin_lock_irq(&pipe_crc->lock); 2027 2028 if (pipe_crc->opened) { 2029 spin_unlock_irq(&pipe_crc->lock); 2030 return -EBUSY; /* already open */ 2031 } 2032 2033 pipe_crc->opened = true; 2034 filep->private_data = inode->i_private; 2035 2036 spin_unlock_irq(&pipe_crc->lock); 2037 2038 return 0; 2039 } 2040 2041 static int i915_pipe_crc_release(struct inode *inode, struct file *filep) 2042 { 2043 struct pipe_crc_info *info = inode->i_private; 2044 struct drm_i915_private *dev_priv = info->dev->dev_private; 2045 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2046 2047 spin_lock_irq(&pipe_crc->lock); 2048 pipe_crc->opened = false; 2049 spin_unlock_irq(&pipe_crc->lock); 2050 2051 return 0; 2052 } 2053 2054 /* (6 fields, 8 chars each, space separated (5) + '\n') */ 2055 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) 2056 /* account for \'0' */ 2057 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) 2058 2059 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) 2060 { 2061 assert_spin_locked(&pipe_crc->lock); 2062 return CIRC_CNT(pipe_crc->head, pipe_crc->tail, 2063 INTEL_PIPE_CRC_ENTRIES_NR); 2064 } 2065 2066 static ssize_t 2067 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, 2068 loff_t *pos) 2069 { 2070 struct pipe_crc_info *info = filep->private_data; 2071 struct drm_device *dev = info->dev; 2072 struct drm_i915_private *dev_priv = dev->dev_private; 2073 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2074 char buf[PIPE_CRC_BUFFER_LEN]; 2075 int head, tail, n_entries, n; 2076 ssize_t bytes_read; 2077 2078 /* 2079 * Don't allow user space to provide buffers not big enough to hold 2080 * a line of data. 2081 */ 2082 if (count < PIPE_CRC_LINE_LEN) 2083 return -EINVAL; 2084 2085 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) 2086 return 0; 2087 2088 /* nothing to read */ 2089 spin_lock_irq(&pipe_crc->lock); 2090 while (pipe_crc_data_count(pipe_crc) == 0) { 2091 int ret; 2092 2093 if (filep->f_flags & O_NONBLOCK) { 2094 spin_unlock_irq(&pipe_crc->lock); 2095 return -EAGAIN; 2096 } 2097 2098 ret = wait_event_interruptible_lock_irq(pipe_crc->wq, 2099 pipe_crc_data_count(pipe_crc), pipe_crc->lock); 2100 if (ret) { 2101 spin_unlock_irq(&pipe_crc->lock); 2102 return ret; 2103 } 2104 } 2105 2106 /* We now have one or more entries to read */ 2107 head = pipe_crc->head; 2108 tail = pipe_crc->tail; 2109 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR), 2110 count / PIPE_CRC_LINE_LEN); 2111 spin_unlock_irq(&pipe_crc->lock); 2112 2113 bytes_read = 0; 2114 n = 0; 2115 do { 2116 struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail]; 2117 int ret; 2118 2119 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, 2120 "%8u %8x %8x %8x %8x %8x\n", 2121 entry->frame, entry->crc[0], 2122 entry->crc[1], entry->crc[2], 2123 entry->crc[3], entry->crc[4]); 2124 2125 ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN, 2126 buf, PIPE_CRC_LINE_LEN); 2127 if (ret == PIPE_CRC_LINE_LEN) 2128 return -EFAULT; 2129 2130 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); 2131 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 2132 n++; 2133 } while (--n_entries); 2134 2135 spin_lock_irq(&pipe_crc->lock); 2136 pipe_crc->tail = tail; 2137 spin_unlock_irq(&pipe_crc->lock); 2138 2139 return bytes_read; 2140 } 2141 2142 static const struct file_operations i915_pipe_crc_fops = { 2143 .owner = THIS_MODULE, 2144 .open = i915_pipe_crc_open, 2145 .read = i915_pipe_crc_read, 2146 .release = i915_pipe_crc_release, 2147 }; 2148 2149 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { 2150 { 2151 .name = "i915_pipe_A_crc", 2152 .pipe = PIPE_A, 2153 }, 2154 { 2155 .name = "i915_pipe_B_crc", 2156 .pipe = PIPE_B, 2157 }, 2158 { 2159 .name = "i915_pipe_C_crc", 2160 .pipe = PIPE_C, 2161 }, 2162 }; 2163 2164 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, 2165 enum pipe pipe) 2166 { 2167 struct drm_device *dev = minor->dev; 2168 struct dentry *ent; 2169 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; 2170 2171 info->dev = dev; 2172 ent = debugfs_create_file(info->name, S_IRUGO, root, info, 2173 &i915_pipe_crc_fops); 2174 if (!ent) 2175 return -ENOMEM; 2176 2177 return drm_add_fake_info_node(minor, ent, info); 2178 } 2179 2180 static const char * const pipe_crc_sources[] = { 2181 "none", 2182 "plane1", 2183 "plane2", 2184 "pf", 2185 "pipe", 2186 "TV", 2187 "DP-B", 2188 "DP-C", 2189 "DP-D", 2190 "auto", 2191 }; 2192 2193 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) 2194 { 2195 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); 2196 return pipe_crc_sources[source]; 2197 } 2198 2199 static int display_crc_ctl_show(struct seq_file *m, void *data) 2200 { 2201 struct drm_device *dev = m->private; 2202 struct drm_i915_private *dev_priv = dev->dev_private; 2203 int i; 2204 2205 for (i = 0; i < I915_MAX_PIPES; i++) 2206 seq_printf(m, "%c %s\n", pipe_name(i), 2207 pipe_crc_source_name(dev_priv->pipe_crc[i].source)); 2208 2209 return 0; 2210 } 2211 2212 static int display_crc_ctl_open(struct inode *inode, struct file *file) 2213 { 2214 struct drm_device *dev = inode->i_private; 2215 2216 return single_open(file, display_crc_ctl_show, dev); 2217 } 2218 2219 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2220 uint32_t *val) 2221 { 2222 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2223 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2224 2225 switch (*source) { 2226 case INTEL_PIPE_CRC_SOURCE_PIPE: 2227 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; 2228 break; 2229 case INTEL_PIPE_CRC_SOURCE_NONE: 2230 *val = 0; 2231 break; 2232 default: 2233 return -EINVAL; 2234 } 2235 2236 return 0; 2237 } 2238 2239 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe, 2240 enum intel_pipe_crc_source *source) 2241 { 2242 struct intel_encoder *encoder; 2243 struct intel_crtc *crtc; 2244 struct intel_digital_port *dig_port; 2245 int ret = 0; 2246 2247 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2248 2249 mutex_lock(&dev->mode_config.mutex); 2250 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 2251 base.head) { 2252 if (!encoder->base.crtc) 2253 continue; 2254 2255 crtc = to_intel_crtc(encoder->base.crtc); 2256 2257 if (crtc->pipe != pipe) 2258 continue; 2259 2260 switch (encoder->type) { 2261 case INTEL_OUTPUT_TVOUT: 2262 *source = INTEL_PIPE_CRC_SOURCE_TV; 2263 break; 2264 case INTEL_OUTPUT_DISPLAYPORT: 2265 case INTEL_OUTPUT_EDP: 2266 dig_port = enc_to_dig_port(&encoder->base); 2267 switch (dig_port->port) { 2268 case PORT_B: 2269 *source = INTEL_PIPE_CRC_SOURCE_DP_B; 2270 break; 2271 case PORT_C: 2272 *source = INTEL_PIPE_CRC_SOURCE_DP_C; 2273 break; 2274 case PORT_D: 2275 *source = INTEL_PIPE_CRC_SOURCE_DP_D; 2276 break; 2277 default: 2278 WARN(1, "nonexisting DP port %c\n", 2279 port_name(dig_port->port)); 2280 break; 2281 } 2282 break; 2283 } 2284 } 2285 mutex_unlock(&dev->mode_config.mutex); 2286 2287 return ret; 2288 } 2289 2290 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, 2291 enum pipe pipe, 2292 enum intel_pipe_crc_source *source, 2293 uint32_t *val) 2294 { 2295 struct drm_i915_private *dev_priv = dev->dev_private; 2296 bool need_stable_symbols = false; 2297 2298 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 2299 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 2300 if (ret) 2301 return ret; 2302 } 2303 2304 switch (*source) { 2305 case INTEL_PIPE_CRC_SOURCE_PIPE: 2306 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; 2307 break; 2308 case INTEL_PIPE_CRC_SOURCE_DP_B: 2309 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; 2310 need_stable_symbols = true; 2311 break; 2312 case INTEL_PIPE_CRC_SOURCE_DP_C: 2313 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; 2314 need_stable_symbols = true; 2315 break; 2316 case INTEL_PIPE_CRC_SOURCE_NONE: 2317 *val = 0; 2318 break; 2319 default: 2320 return -EINVAL; 2321 } 2322 2323 /* 2324 * When the pipe CRC tap point is after the transcoders we need 2325 * to tweak symbol-level features to produce a deterministic series of 2326 * symbols for a given frame. We need to reset those features only once 2327 * a frame (instead of every nth symbol): 2328 * - DC-balance: used to ensure a better clock recovery from the data 2329 * link (SDVO) 2330 * - DisplayPort scrambling: used for EMI reduction 2331 */ 2332 if (need_stable_symbols) { 2333 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2334 2335 WARN_ON(!IS_G4X(dev)); 2336 2337 tmp |= DC_BALANCE_RESET_VLV; 2338 if (pipe == PIPE_A) 2339 tmp |= PIPE_A_SCRAMBLE_RESET; 2340 else 2341 tmp |= PIPE_B_SCRAMBLE_RESET; 2342 2343 I915_WRITE(PORT_DFT2_G4X, tmp); 2344 } 2345 2346 return 0; 2347 } 2348 2349 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, 2350 enum pipe pipe, 2351 enum intel_pipe_crc_source *source, 2352 uint32_t *val) 2353 { 2354 struct drm_i915_private *dev_priv = dev->dev_private; 2355 bool need_stable_symbols = false; 2356 2357 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 2358 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 2359 if (ret) 2360 return ret; 2361 } 2362 2363 switch (*source) { 2364 case INTEL_PIPE_CRC_SOURCE_PIPE: 2365 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; 2366 break; 2367 case INTEL_PIPE_CRC_SOURCE_TV: 2368 if (!SUPPORTS_TV(dev)) 2369 return -EINVAL; 2370 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; 2371 break; 2372 case INTEL_PIPE_CRC_SOURCE_DP_B: 2373 if (!IS_G4X(dev)) 2374 return -EINVAL; 2375 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; 2376 need_stable_symbols = true; 2377 break; 2378 case INTEL_PIPE_CRC_SOURCE_DP_C: 2379 if (!IS_G4X(dev)) 2380 return -EINVAL; 2381 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; 2382 need_stable_symbols = true; 2383 break; 2384 case INTEL_PIPE_CRC_SOURCE_DP_D: 2385 if (!IS_G4X(dev)) 2386 return -EINVAL; 2387 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; 2388 need_stable_symbols = true; 2389 break; 2390 case INTEL_PIPE_CRC_SOURCE_NONE: 2391 *val = 0; 2392 break; 2393 default: 2394 return -EINVAL; 2395 } 2396 2397 /* 2398 * When the pipe CRC tap point is after the transcoders we need 2399 * to tweak symbol-level features to produce a deterministic series of 2400 * symbols for a given frame. We need to reset those features only once 2401 * a frame (instead of every nth symbol): 2402 * - DC-balance: used to ensure a better clock recovery from the data 2403 * link (SDVO) 2404 * - DisplayPort scrambling: used for EMI reduction 2405 */ 2406 if (need_stable_symbols) { 2407 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2408 2409 WARN_ON(!IS_G4X(dev)); 2410 2411 I915_WRITE(PORT_DFT_I9XX, 2412 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); 2413 2414 if (pipe == PIPE_A) 2415 tmp |= PIPE_A_SCRAMBLE_RESET; 2416 else 2417 tmp |= PIPE_B_SCRAMBLE_RESET; 2418 2419 I915_WRITE(PORT_DFT2_G4X, tmp); 2420 } 2421 2422 return 0; 2423 } 2424 2425 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, 2426 enum pipe pipe) 2427 { 2428 struct drm_i915_private *dev_priv = dev->dev_private; 2429 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2430 2431 if (pipe == PIPE_A) 2432 tmp &= ~PIPE_A_SCRAMBLE_RESET; 2433 else 2434 tmp &= ~PIPE_B_SCRAMBLE_RESET; 2435 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) 2436 tmp &= ~DC_BALANCE_RESET_VLV; 2437 I915_WRITE(PORT_DFT2_G4X, tmp); 2438 2439 } 2440 2441 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, 2442 enum pipe pipe) 2443 { 2444 struct drm_i915_private *dev_priv = dev->dev_private; 2445 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2446 2447 if (pipe == PIPE_A) 2448 tmp &= ~PIPE_A_SCRAMBLE_RESET; 2449 else 2450 tmp &= ~PIPE_B_SCRAMBLE_RESET; 2451 I915_WRITE(PORT_DFT2_G4X, tmp); 2452 2453 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { 2454 I915_WRITE(PORT_DFT_I9XX, 2455 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); 2456 } 2457 } 2458 2459 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2460 uint32_t *val) 2461 { 2462 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2463 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2464 2465 switch (*source) { 2466 case INTEL_PIPE_CRC_SOURCE_PLANE1: 2467 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; 2468 break; 2469 case INTEL_PIPE_CRC_SOURCE_PLANE2: 2470 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; 2471 break; 2472 case INTEL_PIPE_CRC_SOURCE_PIPE: 2473 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; 2474 break; 2475 case INTEL_PIPE_CRC_SOURCE_NONE: 2476 *val = 0; 2477 break; 2478 default: 2479 return -EINVAL; 2480 } 2481 2482 return 0; 2483 } 2484 2485 static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2486 uint32_t *val) 2487 { 2488 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2489 *source = INTEL_PIPE_CRC_SOURCE_PF; 2490 2491 switch (*source) { 2492 case INTEL_PIPE_CRC_SOURCE_PLANE1: 2493 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; 2494 break; 2495 case INTEL_PIPE_CRC_SOURCE_PLANE2: 2496 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; 2497 break; 2498 case INTEL_PIPE_CRC_SOURCE_PF: 2499 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; 2500 break; 2501 case INTEL_PIPE_CRC_SOURCE_NONE: 2502 *val = 0; 2503 break; 2504 default: 2505 return -EINVAL; 2506 } 2507 2508 return 0; 2509 } 2510 2511 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, 2512 enum intel_pipe_crc_source source) 2513 { 2514 struct drm_i915_private *dev_priv = dev->dev_private; 2515 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 2516 u32 val = 0; /* shut up gcc */ 2517 int ret; 2518 2519 if (pipe_crc->source == source) 2520 return 0; 2521 2522 /* forbid changing the source without going back to 'none' */ 2523 if (pipe_crc->source && source) 2524 return -EINVAL; 2525 2526 if (IS_GEN2(dev)) 2527 ret = i8xx_pipe_crc_ctl_reg(&source, &val); 2528 else if (INTEL_INFO(dev)->gen < 5) 2529 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); 2530 else if (IS_VALLEYVIEW(dev)) 2531 ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val); 2532 else if (IS_GEN5(dev) || IS_GEN6(dev)) 2533 ret = ilk_pipe_crc_ctl_reg(&source, &val); 2534 else 2535 ret = ivb_pipe_crc_ctl_reg(&source, &val); 2536 2537 if (ret != 0) 2538 return ret; 2539 2540 /* none -> real source transition */ 2541 if (source) { 2542 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", 2543 pipe_name(pipe), pipe_crc_source_name(source)); 2544 2545 pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) * 2546 INTEL_PIPE_CRC_ENTRIES_NR, 2547 GFP_KERNEL); 2548 if (!pipe_crc->entries) 2549 return -ENOMEM; 2550 2551 spin_lock_irq(&pipe_crc->lock); 2552 pipe_crc->head = 0; 2553 pipe_crc->tail = 0; 2554 spin_unlock_irq(&pipe_crc->lock); 2555 } 2556 2557 pipe_crc->source = source; 2558 2559 I915_WRITE(PIPE_CRC_CTL(pipe), val); 2560 POSTING_READ(PIPE_CRC_CTL(pipe)); 2561 2562 /* real source -> none transition */ 2563 if (source == INTEL_PIPE_CRC_SOURCE_NONE) { 2564 struct intel_pipe_crc_entry *entries; 2565 2566 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", 2567 pipe_name(pipe)); 2568 2569 intel_wait_for_vblank(dev, pipe); 2570 2571 spin_lock_irq(&pipe_crc->lock); 2572 entries = pipe_crc->entries; 2573 pipe_crc->entries = NULL; 2574 spin_unlock_irq(&pipe_crc->lock); 2575 2576 kfree(entries); 2577 2578 if (IS_G4X(dev)) 2579 g4x_undo_pipe_scramble_reset(dev, pipe); 2580 else if (IS_VALLEYVIEW(dev)) 2581 vlv_undo_pipe_scramble_reset(dev, pipe); 2582 } 2583 2584 return 0; 2585 } 2586 2587 /* 2588 * Parse pipe CRC command strings: 2589 * command: wsp* object wsp+ name wsp+ source wsp* 2590 * object: 'pipe' 2591 * name: (A | B | C) 2592 * source: (none | plane1 | plane2 | pf) 2593 * wsp: (#0x20 | #0x9 | #0xA)+ 2594 * 2595 * eg.: 2596 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A 2597 * "pipe A none" -> Stop CRC 2598 */ 2599 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) 2600 { 2601 int n_words = 0; 2602 2603 while (*buf) { 2604 char *end; 2605 2606 /* skip leading white space */ 2607 buf = skip_spaces(buf); 2608 if (!*buf) 2609 break; /* end of buffer */ 2610 2611 /* find end of word */ 2612 for (end = buf; *end && !isspace(*end); end++) 2613 ; 2614 2615 if (n_words == max_words) { 2616 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", 2617 max_words); 2618 return -EINVAL; /* ran out of words[] before bytes */ 2619 } 2620 2621 if (*end) 2622 *end++ = '\0'; 2623 words[n_words++] = buf; 2624 buf = end; 2625 } 2626 2627 return n_words; 2628 } 2629 2630 enum intel_pipe_crc_object { 2631 PIPE_CRC_OBJECT_PIPE, 2632 }; 2633 2634 static const char * const pipe_crc_objects[] = { 2635 "pipe", 2636 }; 2637 2638 static int 2639 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) 2640 { 2641 int i; 2642 2643 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) 2644 if (!strcmp(buf, pipe_crc_objects[i])) { 2645 *o = i; 2646 return 0; 2647 } 2648 2649 return -EINVAL; 2650 } 2651 2652 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) 2653 { 2654 const char name = buf[0]; 2655 2656 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) 2657 return -EINVAL; 2658 2659 *pipe = name - 'A'; 2660 2661 return 0; 2662 } 2663 2664 static int 2665 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) 2666 { 2667 int i; 2668 2669 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) 2670 if (!strcmp(buf, pipe_crc_sources[i])) { 2671 *s = i; 2672 return 0; 2673 } 2674 2675 return -EINVAL; 2676 } 2677 2678 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len) 2679 { 2680 #define N_WORDS 3 2681 int n_words; 2682 char *words[N_WORDS]; 2683 enum pipe pipe; 2684 enum intel_pipe_crc_object object; 2685 enum intel_pipe_crc_source source; 2686 2687 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); 2688 if (n_words != N_WORDS) { 2689 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", 2690 N_WORDS); 2691 return -EINVAL; 2692 } 2693 2694 if (display_crc_ctl_parse_object(words[0], &object) < 0) { 2695 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); 2696 return -EINVAL; 2697 } 2698 2699 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { 2700 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); 2701 return -EINVAL; 2702 } 2703 2704 if (display_crc_ctl_parse_source(words[2], &source) < 0) { 2705 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); 2706 return -EINVAL; 2707 } 2708 2709 return pipe_crc_set_source(dev, pipe, source); 2710 } 2711 2712 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, 2713 size_t len, loff_t *offp) 2714 { 2715 struct seq_file *m = file->private_data; 2716 struct drm_device *dev = m->private; 2717 char *tmpbuf; 2718 int ret; 2719 2720 if (len == 0) 2721 return 0; 2722 2723 if (len > PAGE_SIZE - 1) { 2724 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", 2725 PAGE_SIZE); 2726 return -E2BIG; 2727 } 2728 2729 tmpbuf = kmalloc(len + 1, GFP_KERNEL); 2730 if (!tmpbuf) 2731 return -ENOMEM; 2732 2733 if (copy_from_user(tmpbuf, ubuf, len)) { 2734 ret = -EFAULT; 2735 goto out; 2736 } 2737 tmpbuf[len] = '\0'; 2738 2739 ret = display_crc_ctl_parse(dev, tmpbuf, len); 2740 2741 out: 2742 kfree(tmpbuf); 2743 if (ret < 0) 2744 return ret; 2745 2746 *offp += len; 2747 return len; 2748 } 2749 2750 static const struct file_operations i915_display_crc_ctl_fops = { 2751 .owner = THIS_MODULE, 2752 .open = display_crc_ctl_open, 2753 .read = seq_read, 2754 .llseek = seq_lseek, 2755 .release = single_release, 2756 .write = display_crc_ctl_write 2757 }; 2758 2759 static int 2760 i915_wedged_get(void *data, u64 *val) 2761 { 2762 struct drm_device *dev = data; 2763 drm_i915_private_t *dev_priv = dev->dev_private; 2764 2765 *val = atomic_read(&dev_priv->gpu_error.reset_counter); 2766 2767 return 0; 2768 } 2769 2770 static int 2771 i915_wedged_set(void *data, u64 val) 2772 { 2773 struct drm_device *dev = data; 2774 2775 DRM_INFO("Manually setting wedged to %llu\n", val); 2776 i915_handle_error(dev, val); 2777 2778 return 0; 2779 } 2780 2781 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 2782 i915_wedged_get, i915_wedged_set, 2783 "%llu\n"); 2784 2785 static int 2786 i915_ring_stop_get(void *data, u64 *val) 2787 { 2788 struct drm_device *dev = data; 2789 drm_i915_private_t *dev_priv = dev->dev_private; 2790 2791 *val = dev_priv->gpu_error.stop_rings; 2792 2793 return 0; 2794 } 2795 2796 static int 2797 i915_ring_stop_set(void *data, u64 val) 2798 { 2799 struct drm_device *dev = data; 2800 struct drm_i915_private *dev_priv = dev->dev_private; 2801 int ret; 2802 2803 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); 2804 2805 ret = mutex_lock_interruptible(&dev->struct_mutex); 2806 if (ret) 2807 return ret; 2808 2809 dev_priv->gpu_error.stop_rings = val; 2810 mutex_unlock(&dev->struct_mutex); 2811 2812 return 0; 2813 } 2814 2815 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, 2816 i915_ring_stop_get, i915_ring_stop_set, 2817 "0x%08llx\n"); 2818 2819 static int 2820 i915_ring_missed_irq_get(void *data, u64 *val) 2821 { 2822 struct drm_device *dev = data; 2823 struct drm_i915_private *dev_priv = dev->dev_private; 2824 2825 *val = dev_priv->gpu_error.missed_irq_rings; 2826 return 0; 2827 } 2828 2829 static int 2830 i915_ring_missed_irq_set(void *data, u64 val) 2831 { 2832 struct drm_device *dev = data; 2833 struct drm_i915_private *dev_priv = dev->dev_private; 2834 int ret; 2835 2836 /* Lock against concurrent debugfs callers */ 2837 ret = mutex_lock_interruptible(&dev->struct_mutex); 2838 if (ret) 2839 return ret; 2840 dev_priv->gpu_error.missed_irq_rings = val; 2841 mutex_unlock(&dev->struct_mutex); 2842 2843 return 0; 2844 } 2845 2846 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 2847 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 2848 "0x%08llx\n"); 2849 2850 static int 2851 i915_ring_test_irq_get(void *data, u64 *val) 2852 { 2853 struct drm_device *dev = data; 2854 struct drm_i915_private *dev_priv = dev->dev_private; 2855 2856 *val = dev_priv->gpu_error.test_irq_rings; 2857 2858 return 0; 2859 } 2860 2861 static int 2862 i915_ring_test_irq_set(void *data, u64 val) 2863 { 2864 struct drm_device *dev = data; 2865 struct drm_i915_private *dev_priv = dev->dev_private; 2866 int ret; 2867 2868 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 2869 2870 /* Lock against concurrent debugfs callers */ 2871 ret = mutex_lock_interruptible(&dev->struct_mutex); 2872 if (ret) 2873 return ret; 2874 2875 dev_priv->gpu_error.test_irq_rings = val; 2876 mutex_unlock(&dev->struct_mutex); 2877 2878 return 0; 2879 } 2880 2881 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 2882 i915_ring_test_irq_get, i915_ring_test_irq_set, 2883 "0x%08llx\n"); 2884 2885 #define DROP_UNBOUND 0x1 2886 #define DROP_BOUND 0x2 2887 #define DROP_RETIRE 0x4 2888 #define DROP_ACTIVE 0x8 2889 #define DROP_ALL (DROP_UNBOUND | \ 2890 DROP_BOUND | \ 2891 DROP_RETIRE | \ 2892 DROP_ACTIVE) 2893 static int 2894 i915_drop_caches_get(void *data, u64 *val) 2895 { 2896 *val = DROP_ALL; 2897 2898 return 0; 2899 } 2900 2901 static int 2902 i915_drop_caches_set(void *data, u64 val) 2903 { 2904 struct drm_device *dev = data; 2905 struct drm_i915_private *dev_priv = dev->dev_private; 2906 struct drm_i915_gem_object *obj, *next; 2907 struct i915_address_space *vm; 2908 struct i915_vma *vma, *x; 2909 int ret; 2910 2911 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 2912 2913 /* No need to check and wait for gpu resets, only libdrm auto-restarts 2914 * on ioctls on -EAGAIN. */ 2915 ret = mutex_lock_interruptible(&dev->struct_mutex); 2916 if (ret) 2917 return ret; 2918 2919 if (val & DROP_ACTIVE) { 2920 ret = i915_gpu_idle(dev); 2921 if (ret) 2922 goto unlock; 2923 } 2924 2925 if (val & (DROP_RETIRE | DROP_ACTIVE)) 2926 i915_gem_retire_requests(dev); 2927 2928 if (val & DROP_BOUND) { 2929 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 2930 list_for_each_entry_safe(vma, x, &vm->inactive_list, 2931 mm_list) { 2932 if (vma->obj->pin_count) 2933 continue; 2934 2935 ret = i915_vma_unbind(vma); 2936 if (ret) 2937 goto unlock; 2938 } 2939 } 2940 } 2941 2942 if (val & DROP_UNBOUND) { 2943 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, 2944 global_list) 2945 if (obj->pages_pin_count == 0) { 2946 ret = i915_gem_object_put_pages(obj); 2947 if (ret) 2948 goto unlock; 2949 } 2950 } 2951 2952 unlock: 2953 mutex_unlock(&dev->struct_mutex); 2954 2955 return ret; 2956 } 2957 2958 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 2959 i915_drop_caches_get, i915_drop_caches_set, 2960 "0x%08llx\n"); 2961 2962 static int 2963 i915_max_freq_get(void *data, u64 *val) 2964 { 2965 struct drm_device *dev = data; 2966 drm_i915_private_t *dev_priv = dev->dev_private; 2967 int ret; 2968 2969 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2970 return -ENODEV; 2971 2972 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 2973 2974 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 2975 if (ret) 2976 return ret; 2977 2978 if (IS_VALLEYVIEW(dev)) 2979 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay); 2980 else 2981 *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; 2982 mutex_unlock(&dev_priv->rps.hw_lock); 2983 2984 return 0; 2985 } 2986 2987 static int 2988 i915_max_freq_set(void *data, u64 val) 2989 { 2990 struct drm_device *dev = data; 2991 struct drm_i915_private *dev_priv = dev->dev_private; 2992 int ret; 2993 2994 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 2995 return -ENODEV; 2996 2997 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 2998 2999 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 3000 3001 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3002 if (ret) 3003 return ret; 3004 3005 /* 3006 * Turbo will still be enabled, but won't go above the set value. 3007 */ 3008 if (IS_VALLEYVIEW(dev)) { 3009 val = vlv_freq_opcode(dev_priv, val); 3010 dev_priv->rps.max_delay = val; 3011 valleyview_set_rps(dev, val); 3012 } else { 3013 do_div(val, GT_FREQUENCY_MULTIPLIER); 3014 dev_priv->rps.max_delay = val; 3015 gen6_set_rps(dev, val); 3016 } 3017 3018 mutex_unlock(&dev_priv->rps.hw_lock); 3019 3020 return 0; 3021 } 3022 3023 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 3024 i915_max_freq_get, i915_max_freq_set, 3025 "%llu\n"); 3026 3027 static int 3028 i915_min_freq_get(void *data, u64 *val) 3029 { 3030 struct drm_device *dev = data; 3031 drm_i915_private_t *dev_priv = dev->dev_private; 3032 int ret; 3033 3034 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3035 return -ENODEV; 3036 3037 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3038 3039 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3040 if (ret) 3041 return ret; 3042 3043 if (IS_VALLEYVIEW(dev)) 3044 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay); 3045 else 3046 *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; 3047 mutex_unlock(&dev_priv->rps.hw_lock); 3048 3049 return 0; 3050 } 3051 3052 static int 3053 i915_min_freq_set(void *data, u64 val) 3054 { 3055 struct drm_device *dev = data; 3056 struct drm_i915_private *dev_priv = dev->dev_private; 3057 int ret; 3058 3059 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3060 return -ENODEV; 3061 3062 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3063 3064 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 3065 3066 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3067 if (ret) 3068 return ret; 3069 3070 /* 3071 * Turbo will still be enabled, but won't go below the set value. 3072 */ 3073 if (IS_VALLEYVIEW(dev)) { 3074 val = vlv_freq_opcode(dev_priv, val); 3075 dev_priv->rps.min_delay = val; 3076 valleyview_set_rps(dev, val); 3077 } else { 3078 do_div(val, GT_FREQUENCY_MULTIPLIER); 3079 dev_priv->rps.min_delay = val; 3080 gen6_set_rps(dev, val); 3081 } 3082 mutex_unlock(&dev_priv->rps.hw_lock); 3083 3084 return 0; 3085 } 3086 3087 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 3088 i915_min_freq_get, i915_min_freq_set, 3089 "%llu\n"); 3090 3091 static int 3092 i915_cache_sharing_get(void *data, u64 *val) 3093 { 3094 struct drm_device *dev = data; 3095 drm_i915_private_t *dev_priv = dev->dev_private; 3096 u32 snpcr; 3097 int ret; 3098 3099 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3100 return -ENODEV; 3101 3102 ret = mutex_lock_interruptible(&dev->struct_mutex); 3103 if (ret) 3104 return ret; 3105 intel_runtime_pm_get(dev_priv); 3106 3107 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 3108 3109 intel_runtime_pm_put(dev_priv); 3110 mutex_unlock(&dev_priv->dev->struct_mutex); 3111 3112 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 3113 3114 return 0; 3115 } 3116 3117 static int 3118 i915_cache_sharing_set(void *data, u64 val) 3119 { 3120 struct drm_device *dev = data; 3121 struct drm_i915_private *dev_priv = dev->dev_private; 3122 u32 snpcr; 3123 3124 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3125 return -ENODEV; 3126 3127 if (val > 3) 3128 return -EINVAL; 3129 3130 intel_runtime_pm_get(dev_priv); 3131 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 3132 3133 /* Update the cache sharing policy here as well */ 3134 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 3135 snpcr &= ~GEN6_MBC_SNPCR_MASK; 3136 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 3137 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 3138 3139 intel_runtime_pm_put(dev_priv); 3140 return 0; 3141 } 3142 3143 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 3144 i915_cache_sharing_get, i915_cache_sharing_set, 3145 "%llu\n"); 3146 3147 static int i915_forcewake_open(struct inode *inode, struct file *file) 3148 { 3149 struct drm_device *dev = inode->i_private; 3150 struct drm_i915_private *dev_priv = dev->dev_private; 3151 3152 if (INTEL_INFO(dev)->gen < 6) 3153 return 0; 3154 3155 intel_runtime_pm_get(dev_priv); 3156 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 3157 3158 return 0; 3159 } 3160 3161 static int i915_forcewake_release(struct inode *inode, struct file *file) 3162 { 3163 struct drm_device *dev = inode->i_private; 3164 struct drm_i915_private *dev_priv = dev->dev_private; 3165 3166 if (INTEL_INFO(dev)->gen < 6) 3167 return 0; 3168 3169 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3170 intel_runtime_pm_put(dev_priv); 3171 3172 return 0; 3173 } 3174 3175 static const struct file_operations i915_forcewake_fops = { 3176 .owner = THIS_MODULE, 3177 .open = i915_forcewake_open, 3178 .release = i915_forcewake_release, 3179 }; 3180 3181 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 3182 { 3183 struct drm_device *dev = minor->dev; 3184 struct dentry *ent; 3185 3186 ent = debugfs_create_file("i915_forcewake_user", 3187 S_IRUSR, 3188 root, dev, 3189 &i915_forcewake_fops); 3190 if (!ent) 3191 return -ENOMEM; 3192 3193 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 3194 } 3195 3196 static int i915_debugfs_create(struct dentry *root, 3197 struct drm_minor *minor, 3198 const char *name, 3199 const struct file_operations *fops) 3200 { 3201 struct drm_device *dev = minor->dev; 3202 struct dentry *ent; 3203 3204 ent = debugfs_create_file(name, 3205 S_IRUGO | S_IWUSR, 3206 root, dev, 3207 fops); 3208 if (!ent) 3209 return -ENOMEM; 3210 3211 return drm_add_fake_info_node(minor, ent, fops); 3212 } 3213 3214 static const struct drm_info_list i915_debugfs_list[] = { 3215 {"i915_capabilities", i915_capabilities, 0}, 3216 {"i915_gem_objects", i915_gem_object_info, 0}, 3217 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 3218 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 3219 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 3220 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 3221 {"i915_gem_stolen", i915_gem_stolen_list_info }, 3222 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 3223 {"i915_gem_request", i915_gem_request_info, 0}, 3224 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 3225 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 3226 {"i915_gem_interrupt", i915_interrupt_info, 0}, 3227 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 3228 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 3229 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 3230 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 3231 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 3232 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 3233 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 3234 {"i915_inttoext_table", i915_inttoext_table, 0}, 3235 {"i915_drpc_info", i915_drpc_info, 0}, 3236 {"i915_emon_status", i915_emon_status, 0}, 3237 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 3238 {"i915_gfxec", i915_gfxec, 0}, 3239 {"i915_fbc_status", i915_fbc_status, 0}, 3240 {"i915_ips_status", i915_ips_status, 0}, 3241 {"i915_sr_status", i915_sr_status, 0}, 3242 {"i915_opregion", i915_opregion, 0}, 3243 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 3244 {"i915_context_status", i915_context_status, 0}, 3245 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 3246 {"i915_swizzle_info", i915_swizzle_info, 0}, 3247 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 3248 {"i915_dpio", i915_dpio_info, 0}, 3249 {"i915_llc", i915_llc, 0}, 3250 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 3251 {"i915_energy_uJ", i915_energy_uJ, 0}, 3252 {"i915_pc8_status", i915_pc8_status, 0}, 3253 {"i915_power_domain_info", i915_power_domain_info, 0}, 3254 }; 3255 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 3256 3257 static const struct i915_debugfs_files { 3258 const char *name; 3259 const struct file_operations *fops; 3260 } i915_debugfs_files[] = { 3261 {"i915_wedged", &i915_wedged_fops}, 3262 {"i915_max_freq", &i915_max_freq_fops}, 3263 {"i915_min_freq", &i915_min_freq_fops}, 3264 {"i915_cache_sharing", &i915_cache_sharing_fops}, 3265 {"i915_ring_stop", &i915_ring_stop_fops}, 3266 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 3267 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 3268 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 3269 {"i915_error_state", &i915_error_state_fops}, 3270 {"i915_next_seqno", &i915_next_seqno_fops}, 3271 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 3272 }; 3273 3274 void intel_display_crc_init(struct drm_device *dev) 3275 { 3276 struct drm_i915_private *dev_priv = dev->dev_private; 3277 enum pipe pipe; 3278 3279 for_each_pipe(pipe) { 3280 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 3281 3282 pipe_crc->opened = false; 3283 spin_lock_init(&pipe_crc->lock); 3284 init_waitqueue_head(&pipe_crc->wq); 3285 } 3286 } 3287 3288 int i915_debugfs_init(struct drm_minor *minor) 3289 { 3290 int ret, i; 3291 3292 ret = i915_forcewake_create(minor->debugfs_root, minor); 3293 if (ret) 3294 return ret; 3295 3296 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 3297 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); 3298 if (ret) 3299 return ret; 3300 } 3301 3302 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 3303 ret = i915_debugfs_create(minor->debugfs_root, minor, 3304 i915_debugfs_files[i].name, 3305 i915_debugfs_files[i].fops); 3306 if (ret) 3307 return ret; 3308 } 3309 3310 return drm_debugfs_create_files(i915_debugfs_list, 3311 I915_DEBUGFS_ENTRIES, 3312 minor->debugfs_root, minor); 3313 } 3314 3315 void i915_debugfs_cleanup(struct drm_minor *minor) 3316 { 3317 int i; 3318 3319 drm_debugfs_remove_files(i915_debugfs_list, 3320 I915_DEBUGFS_ENTRIES, minor); 3321 3322 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 3323 1, minor); 3324 3325 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 3326 struct drm_info_list *info_list = 3327 (struct drm_info_list *)&i915_pipe_crc_data[i]; 3328 3329 drm_debugfs_remove_files(info_list, 1, minor); 3330 } 3331 3332 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 3333 struct drm_info_list *info_list = 3334 (struct drm_info_list *) i915_debugfs_files[i].fops; 3335 3336 drm_debugfs_remove_files(info_list, 1, minor); 3337 } 3338 } 3339