1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/circ_buf.h> 31 #include <linux/ctype.h> 32 #include <linux/debugfs.h> 33 #include <linux/slab.h> 34 #include <linux/export.h> 35 #include <linux/list_sort.h> 36 #include <asm/msr-index.h> 37 #include <drm/drmP.h> 38 #include "intel_drv.h" 39 #include "intel_ringbuffer.h" 40 #include <drm/i915_drm.h> 41 #include "i915_drv.h" 42 43 enum { 44 ACTIVE_LIST, 45 INACTIVE_LIST, 46 PINNED_LIST, 47 }; 48 49 /* As the drm_debugfs_init() routines are called before dev->dev_private is 50 * allocated we need to hook into the minor for release. */ 51 static int 52 drm_add_fake_info_node(struct drm_minor *minor, 53 struct dentry *ent, 54 const void *key) 55 { 56 struct drm_info_node *node; 57 58 node = kmalloc(sizeof(*node), GFP_KERNEL); 59 if (node == NULL) { 60 debugfs_remove(ent); 61 return -ENOMEM; 62 } 63 64 node->minor = minor; 65 node->dent = ent; 66 node->info_ent = (void *) key; 67 68 mutex_lock(&minor->debugfs_lock); 69 list_add(&node->list, &minor->debugfs_list); 70 mutex_unlock(&minor->debugfs_lock); 71 72 return 0; 73 } 74 75 static int i915_capabilities(struct seq_file *m, void *data) 76 { 77 struct drm_info_node *node = m->private; 78 struct drm_device *dev = node->minor->dev; 79 const struct intel_device_info *info = INTEL_INFO(dev); 80 81 seq_printf(m, "gen: %d\n", info->gen); 82 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 83 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 84 #define SEP_SEMICOLON ; 85 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON); 86 #undef PRINT_FLAG 87 #undef SEP_SEMICOLON 88 89 return 0; 90 } 91 92 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 93 { 94 if (obj->pin_display) 95 return "p"; 96 else 97 return " "; 98 } 99 100 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 101 { 102 switch (obj->tiling_mode) { 103 default: 104 case I915_TILING_NONE: return " "; 105 case I915_TILING_X: return "X"; 106 case I915_TILING_Y: return "Y"; 107 } 108 } 109 110 static inline const char *get_global_flag(struct drm_i915_gem_object *obj) 111 { 112 return i915_gem_obj_to_ggtt(obj) ? "g" : " "; 113 } 114 115 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) 116 { 117 u64 size = 0; 118 struct i915_vma *vma; 119 120 list_for_each_entry(vma, &obj->vma_list, vma_link) { 121 if (i915_is_ggtt(vma->vm) && 122 drm_mm_node_allocated(&vma->node)) 123 size += vma->node.size; 124 } 125 126 return size; 127 } 128 129 static void 130 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 131 { 132 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 133 struct intel_engine_cs *ring; 134 struct i915_vma *vma; 135 int pin_count = 0; 136 int i; 137 138 seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ", 139 &obj->base, 140 obj->active ? "*" : " ", 141 get_pin_flag(obj), 142 get_tiling_flag(obj), 143 get_global_flag(obj), 144 obj->base.size / 1024, 145 obj->base.read_domains, 146 obj->base.write_domain); 147 for_each_ring(ring, dev_priv, i) 148 seq_printf(m, "%x ", 149 i915_gem_request_get_seqno(obj->last_read_req[i])); 150 seq_printf(m, "] %x %x%s%s%s", 151 i915_gem_request_get_seqno(obj->last_write_req), 152 i915_gem_request_get_seqno(obj->last_fenced_req), 153 i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level), 154 obj->dirty ? " dirty" : "", 155 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 156 if (obj->base.name) 157 seq_printf(m, " (name: %d)", obj->base.name); 158 list_for_each_entry(vma, &obj->vma_list, vma_link) { 159 if (vma->pin_count > 0) 160 pin_count++; 161 } 162 seq_printf(m, " (pinned x %d)", pin_count); 163 if (obj->pin_display) 164 seq_printf(m, " (display)"); 165 if (obj->fence_reg != I915_FENCE_REG_NONE) 166 seq_printf(m, " (fence: %d)", obj->fence_reg); 167 list_for_each_entry(vma, &obj->vma_list, vma_link) { 168 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", 169 i915_is_ggtt(vma->vm) ? "g" : "pp", 170 vma->node.start, vma->node.size); 171 if (i915_is_ggtt(vma->vm)) 172 seq_printf(m, ", type: %u)", vma->ggtt_view.type); 173 else 174 seq_puts(m, ")"); 175 } 176 if (obj->stolen) 177 seq_printf(m, " (stolen: %08llx)", obj->stolen->start); 178 if (obj->pin_display || obj->fault_mappable) { 179 char s[3], *t = s; 180 if (obj->pin_display) 181 *t++ = 'p'; 182 if (obj->fault_mappable) 183 *t++ = 'f'; 184 *t = '\0'; 185 seq_printf(m, " (%s mappable)", s); 186 } 187 if (obj->last_write_req != NULL) 188 seq_printf(m, " (%s)", 189 i915_gem_request_get_ring(obj->last_write_req)->name); 190 if (obj->frontbuffer_bits) 191 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); 192 } 193 194 static void describe_ctx(struct seq_file *m, struct intel_context *ctx) 195 { 196 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i'); 197 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 198 seq_putc(m, ' '); 199 } 200 201 static int i915_gem_object_list_info(struct seq_file *m, void *data) 202 { 203 struct drm_info_node *node = m->private; 204 uintptr_t list = (uintptr_t) node->info_ent->data; 205 struct list_head *head; 206 struct drm_device *dev = node->minor->dev; 207 struct drm_i915_private *dev_priv = dev->dev_private; 208 struct i915_address_space *vm = &dev_priv->gtt.base; 209 struct i915_vma *vma; 210 u64 total_obj_size, total_gtt_size; 211 int count, ret; 212 213 ret = mutex_lock_interruptible(&dev->struct_mutex); 214 if (ret) 215 return ret; 216 217 /* FIXME: the user of this interface might want more than just GGTT */ 218 switch (list) { 219 case ACTIVE_LIST: 220 seq_puts(m, "Active:\n"); 221 head = &vm->active_list; 222 break; 223 case INACTIVE_LIST: 224 seq_puts(m, "Inactive:\n"); 225 head = &vm->inactive_list; 226 break; 227 default: 228 mutex_unlock(&dev->struct_mutex); 229 return -EINVAL; 230 } 231 232 total_obj_size = total_gtt_size = count = 0; 233 list_for_each_entry(vma, head, mm_list) { 234 seq_printf(m, " "); 235 describe_obj(m, vma->obj); 236 seq_printf(m, "\n"); 237 total_obj_size += vma->obj->base.size; 238 total_gtt_size += vma->node.size; 239 count++; 240 } 241 mutex_unlock(&dev->struct_mutex); 242 243 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 244 count, total_obj_size, total_gtt_size); 245 return 0; 246 } 247 248 static int obj_rank_by_stolen(void *priv, 249 struct list_head *A, struct list_head *B) 250 { 251 struct drm_i915_gem_object *a = 252 container_of(A, struct drm_i915_gem_object, obj_exec_link); 253 struct drm_i915_gem_object *b = 254 container_of(B, struct drm_i915_gem_object, obj_exec_link); 255 256 if (a->stolen->start < b->stolen->start) 257 return -1; 258 if (a->stolen->start > b->stolen->start) 259 return 1; 260 return 0; 261 } 262 263 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 264 { 265 struct drm_info_node *node = m->private; 266 struct drm_device *dev = node->minor->dev; 267 struct drm_i915_private *dev_priv = dev->dev_private; 268 struct drm_i915_gem_object *obj; 269 u64 total_obj_size, total_gtt_size; 270 LIST_HEAD(stolen); 271 int count, ret; 272 273 ret = mutex_lock_interruptible(&dev->struct_mutex); 274 if (ret) 275 return ret; 276 277 total_obj_size = total_gtt_size = count = 0; 278 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 279 if (obj->stolen == NULL) 280 continue; 281 282 list_add(&obj->obj_exec_link, &stolen); 283 284 total_obj_size += obj->base.size; 285 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 286 count++; 287 } 288 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 289 if (obj->stolen == NULL) 290 continue; 291 292 list_add(&obj->obj_exec_link, &stolen); 293 294 total_obj_size += obj->base.size; 295 count++; 296 } 297 list_sort(NULL, &stolen, obj_rank_by_stolen); 298 seq_puts(m, "Stolen:\n"); 299 while (!list_empty(&stolen)) { 300 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link); 301 seq_puts(m, " "); 302 describe_obj(m, obj); 303 seq_putc(m, '\n'); 304 list_del_init(&obj->obj_exec_link); 305 } 306 mutex_unlock(&dev->struct_mutex); 307 308 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 309 count, total_obj_size, total_gtt_size); 310 return 0; 311 } 312 313 #define count_objects(list, member) do { \ 314 list_for_each_entry(obj, list, member) { \ 315 size += i915_gem_obj_total_ggtt_size(obj); \ 316 ++count; \ 317 if (obj->map_and_fenceable) { \ 318 mappable_size += i915_gem_obj_ggtt_size(obj); \ 319 ++mappable_count; \ 320 } \ 321 } \ 322 } while (0) 323 324 struct file_stats { 325 struct drm_i915_file_private *file_priv; 326 unsigned long count; 327 u64 total, unbound; 328 u64 global, shared; 329 u64 active, inactive; 330 }; 331 332 static int per_file_stats(int id, void *ptr, void *data) 333 { 334 struct drm_i915_gem_object *obj = ptr; 335 struct file_stats *stats = data; 336 struct i915_vma *vma; 337 338 stats->count++; 339 stats->total += obj->base.size; 340 341 if (obj->base.name || obj->base.dma_buf) 342 stats->shared += obj->base.size; 343 344 if (USES_FULL_PPGTT(obj->base.dev)) { 345 list_for_each_entry(vma, &obj->vma_list, vma_link) { 346 struct i915_hw_ppgtt *ppgtt; 347 348 if (!drm_mm_node_allocated(&vma->node)) 349 continue; 350 351 if (i915_is_ggtt(vma->vm)) { 352 stats->global += obj->base.size; 353 continue; 354 } 355 356 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); 357 if (ppgtt->file_priv != stats->file_priv) 358 continue; 359 360 if (obj->active) /* XXX per-vma statistic */ 361 stats->active += obj->base.size; 362 else 363 stats->inactive += obj->base.size; 364 365 return 0; 366 } 367 } else { 368 if (i915_gem_obj_ggtt_bound(obj)) { 369 stats->global += obj->base.size; 370 if (obj->active) 371 stats->active += obj->base.size; 372 else 373 stats->inactive += obj->base.size; 374 return 0; 375 } 376 } 377 378 if (!list_empty(&obj->global_list)) 379 stats->unbound += obj->base.size; 380 381 return 0; 382 } 383 384 #define print_file_stats(m, name, stats) do { \ 385 if (stats.count) \ 386 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \ 387 name, \ 388 stats.count, \ 389 stats.total, \ 390 stats.active, \ 391 stats.inactive, \ 392 stats.global, \ 393 stats.shared, \ 394 stats.unbound); \ 395 } while (0) 396 397 static void print_batch_pool_stats(struct seq_file *m, 398 struct drm_i915_private *dev_priv) 399 { 400 struct drm_i915_gem_object *obj; 401 struct file_stats stats; 402 struct intel_engine_cs *ring; 403 int i, j; 404 405 memset(&stats, 0, sizeof(stats)); 406 407 for_each_ring(ring, dev_priv, i) { 408 for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) { 409 list_for_each_entry(obj, 410 &ring->batch_pool.cache_list[j], 411 batch_pool_link) 412 per_file_stats(0, obj, &stats); 413 } 414 } 415 416 print_file_stats(m, "[k]batch pool", stats); 417 } 418 419 #define count_vmas(list, member) do { \ 420 list_for_each_entry(vma, list, member) { \ 421 size += i915_gem_obj_total_ggtt_size(vma->obj); \ 422 ++count; \ 423 if (vma->obj->map_and_fenceable) { \ 424 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ 425 ++mappable_count; \ 426 } \ 427 } \ 428 } while (0) 429 430 static int i915_gem_object_info(struct seq_file *m, void* data) 431 { 432 struct drm_info_node *node = m->private; 433 struct drm_device *dev = node->minor->dev; 434 struct drm_i915_private *dev_priv = dev->dev_private; 435 u32 count, mappable_count, purgeable_count; 436 u64 size, mappable_size, purgeable_size; 437 struct drm_i915_gem_object *obj; 438 struct i915_address_space *vm = &dev_priv->gtt.base; 439 struct drm_file *file; 440 struct i915_vma *vma; 441 int ret; 442 443 ret = mutex_lock_interruptible(&dev->struct_mutex); 444 if (ret) 445 return ret; 446 447 seq_printf(m, "%u objects, %zu bytes\n", 448 dev_priv->mm.object_count, 449 dev_priv->mm.object_memory); 450 451 size = count = mappable_size = mappable_count = 0; 452 count_objects(&dev_priv->mm.bound_list, global_list); 453 seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n", 454 count, mappable_count, size, mappable_size); 455 456 size = count = mappable_size = mappable_count = 0; 457 count_vmas(&vm->active_list, mm_list); 458 seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n", 459 count, mappable_count, size, mappable_size); 460 461 size = count = mappable_size = mappable_count = 0; 462 count_vmas(&vm->inactive_list, mm_list); 463 seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n", 464 count, mappable_count, size, mappable_size); 465 466 size = count = purgeable_size = purgeable_count = 0; 467 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 468 size += obj->base.size, ++count; 469 if (obj->madv == I915_MADV_DONTNEED) 470 purgeable_size += obj->base.size, ++purgeable_count; 471 } 472 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); 473 474 size = count = mappable_size = mappable_count = 0; 475 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 476 if (obj->fault_mappable) { 477 size += i915_gem_obj_ggtt_size(obj); 478 ++count; 479 } 480 if (obj->pin_display) { 481 mappable_size += i915_gem_obj_ggtt_size(obj); 482 ++mappable_count; 483 } 484 if (obj->madv == I915_MADV_DONTNEED) { 485 purgeable_size += obj->base.size; 486 ++purgeable_count; 487 } 488 } 489 seq_printf(m, "%u purgeable objects, %llu bytes\n", 490 purgeable_count, purgeable_size); 491 seq_printf(m, "%u pinned mappable objects, %llu bytes\n", 492 mappable_count, mappable_size); 493 seq_printf(m, "%u fault mappable objects, %llu bytes\n", 494 count, size); 495 496 seq_printf(m, "%llu [%llu] gtt total\n", 497 dev_priv->gtt.base.total, 498 (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); 499 500 seq_putc(m, '\n'); 501 print_batch_pool_stats(m, dev_priv); 502 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 503 struct file_stats stats; 504 struct task_struct *task; 505 506 memset(&stats, 0, sizeof(stats)); 507 stats.file_priv = file->driver_priv; 508 spin_lock(&file->table_lock); 509 idr_for_each(&file->object_idr, per_file_stats, &stats); 510 spin_unlock(&file->table_lock); 511 /* 512 * Although we have a valid reference on file->pid, that does 513 * not guarantee that the task_struct who called get_pid() is 514 * still alive (e.g. get_pid(current) => fork() => exit()). 515 * Therefore, we need to protect this ->comm access using RCU. 516 */ 517 rcu_read_lock(); 518 task = pid_task(file->pid, PIDTYPE_PID); 519 print_file_stats(m, task ? task->comm : "<unknown>", stats); 520 rcu_read_unlock(); 521 } 522 523 mutex_unlock(&dev->struct_mutex); 524 525 return 0; 526 } 527 528 static int i915_gem_gtt_info(struct seq_file *m, void *data) 529 { 530 struct drm_info_node *node = m->private; 531 struct drm_device *dev = node->minor->dev; 532 uintptr_t list = (uintptr_t) node->info_ent->data; 533 struct drm_i915_private *dev_priv = dev->dev_private; 534 struct drm_i915_gem_object *obj; 535 u64 total_obj_size, total_gtt_size; 536 int count, ret; 537 538 ret = mutex_lock_interruptible(&dev->struct_mutex); 539 if (ret) 540 return ret; 541 542 total_obj_size = total_gtt_size = count = 0; 543 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 544 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj)) 545 continue; 546 547 seq_puts(m, " "); 548 describe_obj(m, obj); 549 seq_putc(m, '\n'); 550 total_obj_size += obj->base.size; 551 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 552 count++; 553 } 554 555 mutex_unlock(&dev->struct_mutex); 556 557 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 558 count, total_obj_size, total_gtt_size); 559 560 return 0; 561 } 562 563 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 564 { 565 struct drm_info_node *node = m->private; 566 struct drm_device *dev = node->minor->dev; 567 struct drm_i915_private *dev_priv = dev->dev_private; 568 struct intel_crtc *crtc; 569 int ret; 570 571 ret = mutex_lock_interruptible(&dev->struct_mutex); 572 if (ret) 573 return ret; 574 575 for_each_intel_crtc(dev, crtc) { 576 const char pipe = pipe_name(crtc->pipe); 577 const char plane = plane_name(crtc->plane); 578 struct intel_unpin_work *work; 579 580 spin_lock_irq(&dev->event_lock); 581 work = crtc->unpin_work; 582 if (work == NULL) { 583 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 584 pipe, plane); 585 } else { 586 u32 addr; 587 588 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 589 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 590 pipe, plane); 591 } else { 592 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 593 pipe, plane); 594 } 595 if (work->flip_queued_req) { 596 struct intel_engine_cs *ring = 597 i915_gem_request_get_ring(work->flip_queued_req); 598 599 seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n", 600 ring->name, 601 i915_gem_request_get_seqno(work->flip_queued_req), 602 dev_priv->next_seqno, 603 ring->get_seqno(ring, true), 604 i915_gem_request_completed(work->flip_queued_req, true)); 605 } else 606 seq_printf(m, "Flip not associated with any ring\n"); 607 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", 608 work->flip_queued_vblank, 609 work->flip_ready_vblank, 610 drm_crtc_vblank_count(&crtc->base)); 611 if (work->enable_stall_check) 612 seq_puts(m, "Stall check enabled, "); 613 else 614 seq_puts(m, "Stall check waiting for page flip ioctl, "); 615 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 616 617 if (INTEL_INFO(dev)->gen >= 4) 618 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane))); 619 else 620 addr = I915_READ(DSPADDR(crtc->plane)); 621 seq_printf(m, "Current scanout address 0x%08x\n", addr); 622 623 if (work->pending_flip_obj) { 624 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset); 625 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset); 626 } 627 } 628 spin_unlock_irq(&dev->event_lock); 629 } 630 631 mutex_unlock(&dev->struct_mutex); 632 633 return 0; 634 } 635 636 static int i915_gem_batch_pool_info(struct seq_file *m, void *data) 637 { 638 struct drm_info_node *node = m->private; 639 struct drm_device *dev = node->minor->dev; 640 struct drm_i915_private *dev_priv = dev->dev_private; 641 struct drm_i915_gem_object *obj; 642 struct intel_engine_cs *ring; 643 int total = 0; 644 int ret, i, j; 645 646 ret = mutex_lock_interruptible(&dev->struct_mutex); 647 if (ret) 648 return ret; 649 650 for_each_ring(ring, dev_priv, i) { 651 for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) { 652 int count; 653 654 count = 0; 655 list_for_each_entry(obj, 656 &ring->batch_pool.cache_list[j], 657 batch_pool_link) 658 count++; 659 seq_printf(m, "%s cache[%d]: %d objects\n", 660 ring->name, j, count); 661 662 list_for_each_entry(obj, 663 &ring->batch_pool.cache_list[j], 664 batch_pool_link) { 665 seq_puts(m, " "); 666 describe_obj(m, obj); 667 seq_putc(m, '\n'); 668 } 669 670 total += count; 671 } 672 } 673 674 seq_printf(m, "total: %d\n", total); 675 676 mutex_unlock(&dev->struct_mutex); 677 678 return 0; 679 } 680 681 static int i915_gem_request_info(struct seq_file *m, void *data) 682 { 683 struct drm_info_node *node = m->private; 684 struct drm_device *dev = node->minor->dev; 685 struct drm_i915_private *dev_priv = dev->dev_private; 686 struct intel_engine_cs *ring; 687 struct drm_i915_gem_request *req; 688 int ret, any, i; 689 690 ret = mutex_lock_interruptible(&dev->struct_mutex); 691 if (ret) 692 return ret; 693 694 any = 0; 695 for_each_ring(ring, dev_priv, i) { 696 int count; 697 698 count = 0; 699 list_for_each_entry(req, &ring->request_list, list) 700 count++; 701 if (count == 0) 702 continue; 703 704 seq_printf(m, "%s requests: %d\n", ring->name, count); 705 list_for_each_entry(req, &ring->request_list, list) { 706 struct task_struct *task; 707 708 rcu_read_lock(); 709 task = NULL; 710 if (req->pid) 711 task = pid_task(req->pid, PIDTYPE_PID); 712 seq_printf(m, " %x @ %d: %s [%d]\n", 713 req->seqno, 714 (int) (jiffies - req->emitted_jiffies), 715 task ? task->comm : "<unknown>", 716 task ? task->pid : -1); 717 rcu_read_unlock(); 718 } 719 720 any++; 721 } 722 mutex_unlock(&dev->struct_mutex); 723 724 if (any == 0) 725 seq_puts(m, "No requests\n"); 726 727 return 0; 728 } 729 730 static void i915_ring_seqno_info(struct seq_file *m, 731 struct intel_engine_cs *ring) 732 { 733 if (ring->get_seqno) { 734 seq_printf(m, "Current sequence (%s): %x\n", 735 ring->name, ring->get_seqno(ring, false)); 736 } 737 } 738 739 static int i915_gem_seqno_info(struct seq_file *m, void *data) 740 { 741 struct drm_info_node *node = m->private; 742 struct drm_device *dev = node->minor->dev; 743 struct drm_i915_private *dev_priv = dev->dev_private; 744 struct intel_engine_cs *ring; 745 int ret, i; 746 747 ret = mutex_lock_interruptible(&dev->struct_mutex); 748 if (ret) 749 return ret; 750 intel_runtime_pm_get(dev_priv); 751 752 for_each_ring(ring, dev_priv, i) 753 i915_ring_seqno_info(m, ring); 754 755 intel_runtime_pm_put(dev_priv); 756 mutex_unlock(&dev->struct_mutex); 757 758 return 0; 759 } 760 761 762 static int i915_interrupt_info(struct seq_file *m, void *data) 763 { 764 struct drm_info_node *node = m->private; 765 struct drm_device *dev = node->minor->dev; 766 struct drm_i915_private *dev_priv = dev->dev_private; 767 struct intel_engine_cs *ring; 768 int ret, i, pipe; 769 770 ret = mutex_lock_interruptible(&dev->struct_mutex); 771 if (ret) 772 return ret; 773 intel_runtime_pm_get(dev_priv); 774 775 if (IS_CHERRYVIEW(dev)) { 776 seq_printf(m, "Master Interrupt Control:\t%08x\n", 777 I915_READ(GEN8_MASTER_IRQ)); 778 779 seq_printf(m, "Display IER:\t%08x\n", 780 I915_READ(VLV_IER)); 781 seq_printf(m, "Display IIR:\t%08x\n", 782 I915_READ(VLV_IIR)); 783 seq_printf(m, "Display IIR_RW:\t%08x\n", 784 I915_READ(VLV_IIR_RW)); 785 seq_printf(m, "Display IMR:\t%08x\n", 786 I915_READ(VLV_IMR)); 787 for_each_pipe(dev_priv, pipe) 788 seq_printf(m, "Pipe %c stat:\t%08x\n", 789 pipe_name(pipe), 790 I915_READ(PIPESTAT(pipe))); 791 792 seq_printf(m, "Port hotplug:\t%08x\n", 793 I915_READ(PORT_HOTPLUG_EN)); 794 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 795 I915_READ(VLV_DPFLIPSTAT)); 796 seq_printf(m, "DPINVGTT:\t%08x\n", 797 I915_READ(DPINVGTT)); 798 799 for (i = 0; i < 4; i++) { 800 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 801 i, I915_READ(GEN8_GT_IMR(i))); 802 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 803 i, I915_READ(GEN8_GT_IIR(i))); 804 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 805 i, I915_READ(GEN8_GT_IER(i))); 806 } 807 808 seq_printf(m, "PCU interrupt mask:\t%08x\n", 809 I915_READ(GEN8_PCU_IMR)); 810 seq_printf(m, "PCU interrupt identity:\t%08x\n", 811 I915_READ(GEN8_PCU_IIR)); 812 seq_printf(m, "PCU interrupt enable:\t%08x\n", 813 I915_READ(GEN8_PCU_IER)); 814 } else if (INTEL_INFO(dev)->gen >= 8) { 815 seq_printf(m, "Master Interrupt Control:\t%08x\n", 816 I915_READ(GEN8_MASTER_IRQ)); 817 818 for (i = 0; i < 4; i++) { 819 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 820 i, I915_READ(GEN8_GT_IMR(i))); 821 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 822 i, I915_READ(GEN8_GT_IIR(i))); 823 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 824 i, I915_READ(GEN8_GT_IER(i))); 825 } 826 827 for_each_pipe(dev_priv, pipe) { 828 if (!intel_display_power_is_enabled(dev_priv, 829 POWER_DOMAIN_PIPE(pipe))) { 830 seq_printf(m, "Pipe %c power disabled\n", 831 pipe_name(pipe)); 832 continue; 833 } 834 seq_printf(m, "Pipe %c IMR:\t%08x\n", 835 pipe_name(pipe), 836 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 837 seq_printf(m, "Pipe %c IIR:\t%08x\n", 838 pipe_name(pipe), 839 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 840 seq_printf(m, "Pipe %c IER:\t%08x\n", 841 pipe_name(pipe), 842 I915_READ(GEN8_DE_PIPE_IER(pipe))); 843 } 844 845 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 846 I915_READ(GEN8_DE_PORT_IMR)); 847 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 848 I915_READ(GEN8_DE_PORT_IIR)); 849 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 850 I915_READ(GEN8_DE_PORT_IER)); 851 852 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 853 I915_READ(GEN8_DE_MISC_IMR)); 854 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 855 I915_READ(GEN8_DE_MISC_IIR)); 856 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 857 I915_READ(GEN8_DE_MISC_IER)); 858 859 seq_printf(m, "PCU interrupt mask:\t%08x\n", 860 I915_READ(GEN8_PCU_IMR)); 861 seq_printf(m, "PCU interrupt identity:\t%08x\n", 862 I915_READ(GEN8_PCU_IIR)); 863 seq_printf(m, "PCU interrupt enable:\t%08x\n", 864 I915_READ(GEN8_PCU_IER)); 865 } else if (IS_VALLEYVIEW(dev)) { 866 seq_printf(m, "Display IER:\t%08x\n", 867 I915_READ(VLV_IER)); 868 seq_printf(m, "Display IIR:\t%08x\n", 869 I915_READ(VLV_IIR)); 870 seq_printf(m, "Display IIR_RW:\t%08x\n", 871 I915_READ(VLV_IIR_RW)); 872 seq_printf(m, "Display IMR:\t%08x\n", 873 I915_READ(VLV_IMR)); 874 for_each_pipe(dev_priv, pipe) 875 seq_printf(m, "Pipe %c stat:\t%08x\n", 876 pipe_name(pipe), 877 I915_READ(PIPESTAT(pipe))); 878 879 seq_printf(m, "Master IER:\t%08x\n", 880 I915_READ(VLV_MASTER_IER)); 881 882 seq_printf(m, "Render IER:\t%08x\n", 883 I915_READ(GTIER)); 884 seq_printf(m, "Render IIR:\t%08x\n", 885 I915_READ(GTIIR)); 886 seq_printf(m, "Render IMR:\t%08x\n", 887 I915_READ(GTIMR)); 888 889 seq_printf(m, "PM IER:\t\t%08x\n", 890 I915_READ(GEN6_PMIER)); 891 seq_printf(m, "PM IIR:\t\t%08x\n", 892 I915_READ(GEN6_PMIIR)); 893 seq_printf(m, "PM IMR:\t\t%08x\n", 894 I915_READ(GEN6_PMIMR)); 895 896 seq_printf(m, "Port hotplug:\t%08x\n", 897 I915_READ(PORT_HOTPLUG_EN)); 898 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 899 I915_READ(VLV_DPFLIPSTAT)); 900 seq_printf(m, "DPINVGTT:\t%08x\n", 901 I915_READ(DPINVGTT)); 902 903 } else if (!HAS_PCH_SPLIT(dev)) { 904 seq_printf(m, "Interrupt enable: %08x\n", 905 I915_READ(IER)); 906 seq_printf(m, "Interrupt identity: %08x\n", 907 I915_READ(IIR)); 908 seq_printf(m, "Interrupt mask: %08x\n", 909 I915_READ(IMR)); 910 for_each_pipe(dev_priv, pipe) 911 seq_printf(m, "Pipe %c stat: %08x\n", 912 pipe_name(pipe), 913 I915_READ(PIPESTAT(pipe))); 914 } else { 915 seq_printf(m, "North Display Interrupt enable: %08x\n", 916 I915_READ(DEIER)); 917 seq_printf(m, "North Display Interrupt identity: %08x\n", 918 I915_READ(DEIIR)); 919 seq_printf(m, "North Display Interrupt mask: %08x\n", 920 I915_READ(DEIMR)); 921 seq_printf(m, "South Display Interrupt enable: %08x\n", 922 I915_READ(SDEIER)); 923 seq_printf(m, "South Display Interrupt identity: %08x\n", 924 I915_READ(SDEIIR)); 925 seq_printf(m, "South Display Interrupt mask: %08x\n", 926 I915_READ(SDEIMR)); 927 seq_printf(m, "Graphics Interrupt enable: %08x\n", 928 I915_READ(GTIER)); 929 seq_printf(m, "Graphics Interrupt identity: %08x\n", 930 I915_READ(GTIIR)); 931 seq_printf(m, "Graphics Interrupt mask: %08x\n", 932 I915_READ(GTIMR)); 933 } 934 for_each_ring(ring, dev_priv, i) { 935 if (INTEL_INFO(dev)->gen >= 6) { 936 seq_printf(m, 937 "Graphics Interrupt mask (%s): %08x\n", 938 ring->name, I915_READ_IMR(ring)); 939 } 940 i915_ring_seqno_info(m, ring); 941 } 942 intel_runtime_pm_put(dev_priv); 943 mutex_unlock(&dev->struct_mutex); 944 945 return 0; 946 } 947 948 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 949 { 950 struct drm_info_node *node = m->private; 951 struct drm_device *dev = node->minor->dev; 952 struct drm_i915_private *dev_priv = dev->dev_private; 953 int i, ret; 954 955 ret = mutex_lock_interruptible(&dev->struct_mutex); 956 if (ret) 957 return ret; 958 959 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 960 for (i = 0; i < dev_priv->num_fence_regs; i++) { 961 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 962 963 seq_printf(m, "Fence %d, pin count = %d, object = ", 964 i, dev_priv->fence_regs[i].pin_count); 965 if (obj == NULL) 966 seq_puts(m, "unused"); 967 else 968 describe_obj(m, obj); 969 seq_putc(m, '\n'); 970 } 971 972 mutex_unlock(&dev->struct_mutex); 973 return 0; 974 } 975 976 static int i915_hws_info(struct seq_file *m, void *data) 977 { 978 struct drm_info_node *node = m->private; 979 struct drm_device *dev = node->minor->dev; 980 struct drm_i915_private *dev_priv = dev->dev_private; 981 struct intel_engine_cs *ring; 982 const u32 *hws; 983 int i; 984 985 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 986 hws = ring->status_page.page_addr; 987 if (hws == NULL) 988 return 0; 989 990 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 991 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 992 i * 4, 993 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 994 } 995 return 0; 996 } 997 998 static ssize_t 999 i915_error_state_write(struct file *filp, 1000 const char __user *ubuf, 1001 size_t cnt, 1002 loff_t *ppos) 1003 { 1004 struct i915_error_state_file_priv *error_priv = filp->private_data; 1005 struct drm_device *dev = error_priv->dev; 1006 int ret; 1007 1008 DRM_DEBUG_DRIVER("Resetting error state\n"); 1009 1010 ret = mutex_lock_interruptible(&dev->struct_mutex); 1011 if (ret) 1012 return ret; 1013 1014 i915_destroy_error_state(dev); 1015 mutex_unlock(&dev->struct_mutex); 1016 1017 return cnt; 1018 } 1019 1020 static int i915_error_state_open(struct inode *inode, struct file *file) 1021 { 1022 struct drm_device *dev = inode->i_private; 1023 struct i915_error_state_file_priv *error_priv; 1024 1025 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 1026 if (!error_priv) 1027 return -ENOMEM; 1028 1029 error_priv->dev = dev; 1030 1031 i915_error_state_get(dev, error_priv); 1032 1033 file->private_data = error_priv; 1034 1035 return 0; 1036 } 1037 1038 static int i915_error_state_release(struct inode *inode, struct file *file) 1039 { 1040 struct i915_error_state_file_priv *error_priv = file->private_data; 1041 1042 i915_error_state_put(error_priv); 1043 kfree(error_priv); 1044 1045 return 0; 1046 } 1047 1048 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, 1049 size_t count, loff_t *pos) 1050 { 1051 struct i915_error_state_file_priv *error_priv = file->private_data; 1052 struct drm_i915_error_state_buf error_str; 1053 loff_t tmp_pos = 0; 1054 ssize_t ret_count = 0; 1055 int ret; 1056 1057 ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos); 1058 if (ret) 1059 return ret; 1060 1061 ret = i915_error_state_to_str(&error_str, error_priv); 1062 if (ret) 1063 goto out; 1064 1065 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, 1066 error_str.buf, 1067 error_str.bytes); 1068 1069 if (ret_count < 0) 1070 ret = ret_count; 1071 else 1072 *pos = error_str.start + ret_count; 1073 out: 1074 i915_error_state_buf_release(&error_str); 1075 return ret ?: ret_count; 1076 } 1077 1078 static const struct file_operations i915_error_state_fops = { 1079 .owner = THIS_MODULE, 1080 .open = i915_error_state_open, 1081 .read = i915_error_state_read, 1082 .write = i915_error_state_write, 1083 .llseek = default_llseek, 1084 .release = i915_error_state_release, 1085 }; 1086 1087 static int 1088 i915_next_seqno_get(void *data, u64 *val) 1089 { 1090 struct drm_device *dev = data; 1091 struct drm_i915_private *dev_priv = dev->dev_private; 1092 int ret; 1093 1094 ret = mutex_lock_interruptible(&dev->struct_mutex); 1095 if (ret) 1096 return ret; 1097 1098 *val = dev_priv->next_seqno; 1099 mutex_unlock(&dev->struct_mutex); 1100 1101 return 0; 1102 } 1103 1104 static int 1105 i915_next_seqno_set(void *data, u64 val) 1106 { 1107 struct drm_device *dev = data; 1108 int ret; 1109 1110 ret = mutex_lock_interruptible(&dev->struct_mutex); 1111 if (ret) 1112 return ret; 1113 1114 ret = i915_gem_set_seqno(dev, val); 1115 mutex_unlock(&dev->struct_mutex); 1116 1117 return ret; 1118 } 1119 1120 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 1121 i915_next_seqno_get, i915_next_seqno_set, 1122 "0x%llx\n"); 1123 1124 static int i915_frequency_info(struct seq_file *m, void *unused) 1125 { 1126 struct drm_info_node *node = m->private; 1127 struct drm_device *dev = node->minor->dev; 1128 struct drm_i915_private *dev_priv = dev->dev_private; 1129 int ret = 0; 1130 1131 intel_runtime_pm_get(dev_priv); 1132 1133 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1134 1135 if (IS_GEN5(dev)) { 1136 u16 rgvswctl = I915_READ16(MEMSWCTL); 1137 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1138 1139 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 1140 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 1141 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 1142 MEMSTAT_VID_SHIFT); 1143 seq_printf(m, "Current P-state: %d\n", 1144 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1145 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1146 u32 freq_sts; 1147 1148 mutex_lock(&dev_priv->rps.hw_lock); 1149 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1150 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1151 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1152 1153 seq_printf(m, "actual GPU freq: %d MHz\n", 1154 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1155 1156 seq_printf(m, "current GPU freq: %d MHz\n", 1157 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 1158 1159 seq_printf(m, "max GPU freq: %d MHz\n", 1160 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1161 1162 seq_printf(m, "min GPU freq: %d MHz\n", 1163 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1164 1165 seq_printf(m, "idle GPU freq: %d MHz\n", 1166 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); 1167 1168 seq_printf(m, 1169 "efficient (RPe) frequency: %d MHz\n", 1170 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1171 mutex_unlock(&dev_priv->rps.hw_lock); 1172 } else if (INTEL_INFO(dev)->gen >= 6) { 1173 u32 rp_state_limits; 1174 u32 gt_perf_status; 1175 u32 rp_state_cap; 1176 u32 rpmodectl, rpinclimit, rpdeclimit; 1177 u32 rpstat, cagf, reqf; 1178 u32 rpupei, rpcurup, rpprevup; 1179 u32 rpdownei, rpcurdown, rpprevdown; 1180 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 1181 int max_freq; 1182 1183 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1184 if (IS_BROXTON(dev)) { 1185 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 1186 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); 1187 } else { 1188 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1189 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1190 } 1191 1192 /* RPSTAT1 is in the GT power well */ 1193 ret = mutex_lock_interruptible(&dev->struct_mutex); 1194 if (ret) 1195 goto out; 1196 1197 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1198 1199 reqf = I915_READ(GEN6_RPNSWREQ); 1200 if (IS_GEN9(dev)) 1201 reqf >>= 23; 1202 else { 1203 reqf &= ~GEN6_TURBO_DISABLE; 1204 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1205 reqf >>= 24; 1206 else 1207 reqf >>= 25; 1208 } 1209 reqf = intel_gpu_freq(dev_priv, reqf); 1210 1211 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1212 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 1213 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 1214 1215 rpstat = I915_READ(GEN6_RPSTAT1); 1216 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 1217 rpcurup = I915_READ(GEN6_RP_CUR_UP); 1218 rpprevup = I915_READ(GEN6_RP_PREV_UP); 1219 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 1220 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 1221 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 1222 if (IS_GEN9(dev)) 1223 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; 1224 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1225 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1226 else 1227 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1228 cagf = intel_gpu_freq(dev_priv, cagf); 1229 1230 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1231 mutex_unlock(&dev->struct_mutex); 1232 1233 if (IS_GEN6(dev) || IS_GEN7(dev)) { 1234 pm_ier = I915_READ(GEN6_PMIER); 1235 pm_imr = I915_READ(GEN6_PMIMR); 1236 pm_isr = I915_READ(GEN6_PMISR); 1237 pm_iir = I915_READ(GEN6_PMIIR); 1238 pm_mask = I915_READ(GEN6_PMINTRMSK); 1239 } else { 1240 pm_ier = I915_READ(GEN8_GT_IER(2)); 1241 pm_imr = I915_READ(GEN8_GT_IMR(2)); 1242 pm_isr = I915_READ(GEN8_GT_ISR(2)); 1243 pm_iir = I915_READ(GEN8_GT_IIR(2)); 1244 pm_mask = I915_READ(GEN6_PMINTRMSK); 1245 } 1246 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1247 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1248 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1249 seq_printf(m, "Render p-state ratio: %d\n", 1250 (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8); 1251 seq_printf(m, "Render p-state VID: %d\n", 1252 gt_perf_status & 0xff); 1253 seq_printf(m, "Render p-state limit: %d\n", 1254 rp_state_limits & 0xff); 1255 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1256 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 1257 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 1258 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 1259 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1260 seq_printf(m, "CAGF: %dMHz\n", cagf); 1261 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 1262 GEN6_CURICONT_MASK); 1263 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 1264 GEN6_CURBSYTAVG_MASK); 1265 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 1266 GEN6_CURBSYTAVG_MASK); 1267 seq_printf(m, "Up threshold: %d%%\n", 1268 dev_priv->rps.up_threshold); 1269 1270 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 1271 GEN6_CURIAVG_MASK); 1272 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 1273 GEN6_CURBSYTAVG_MASK); 1274 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 1275 GEN6_CURBSYTAVG_MASK); 1276 seq_printf(m, "Down threshold: %d%%\n", 1277 dev_priv->rps.down_threshold); 1278 1279 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 : 1280 rp_state_cap >> 16) & 0xff; 1281 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? 1282 GEN9_FREQ_SCALER : 1); 1283 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1284 intel_gpu_freq(dev_priv, max_freq)); 1285 1286 max_freq = (rp_state_cap & 0xff00) >> 8; 1287 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? 1288 GEN9_FREQ_SCALER : 1); 1289 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1290 intel_gpu_freq(dev_priv, max_freq)); 1291 1292 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 : 1293 rp_state_cap >> 0) & 0xff; 1294 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? 1295 GEN9_FREQ_SCALER : 1); 1296 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1297 intel_gpu_freq(dev_priv, max_freq)); 1298 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1299 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1300 1301 seq_printf(m, "Current freq: %d MHz\n", 1302 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 1303 seq_printf(m, "Actual freq: %d MHz\n", cagf); 1304 seq_printf(m, "Idle freq: %d MHz\n", 1305 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); 1306 seq_printf(m, "Min freq: %d MHz\n", 1307 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1308 seq_printf(m, "Max freq: %d MHz\n", 1309 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1310 seq_printf(m, 1311 "efficient (RPe) frequency: %d MHz\n", 1312 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1313 } else { 1314 seq_puts(m, "no P-state info available\n"); 1315 } 1316 1317 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq); 1318 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); 1319 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); 1320 1321 out: 1322 intel_runtime_pm_put(dev_priv); 1323 return ret; 1324 } 1325 1326 static int i915_hangcheck_info(struct seq_file *m, void *unused) 1327 { 1328 struct drm_info_node *node = m->private; 1329 struct drm_device *dev = node->minor->dev; 1330 struct drm_i915_private *dev_priv = dev->dev_private; 1331 struct intel_engine_cs *ring; 1332 u64 acthd[I915_NUM_RINGS]; 1333 u32 seqno[I915_NUM_RINGS]; 1334 u32 instdone[I915_NUM_INSTDONE_REG]; 1335 int i, j; 1336 1337 if (!i915.enable_hangcheck) { 1338 seq_printf(m, "Hangcheck disabled\n"); 1339 return 0; 1340 } 1341 1342 intel_runtime_pm_get(dev_priv); 1343 1344 for_each_ring(ring, dev_priv, i) { 1345 seqno[i] = ring->get_seqno(ring, false); 1346 acthd[i] = intel_ring_get_active_head(ring); 1347 } 1348 1349 i915_get_extra_instdone(dev, instdone); 1350 1351 intel_runtime_pm_put(dev_priv); 1352 1353 if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) { 1354 seq_printf(m, "Hangcheck active, fires in %dms\n", 1355 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - 1356 jiffies)); 1357 } else 1358 seq_printf(m, "Hangcheck inactive\n"); 1359 1360 for_each_ring(ring, dev_priv, i) { 1361 seq_printf(m, "%s:\n", ring->name); 1362 seq_printf(m, "\tseqno = %x [current %x]\n", 1363 ring->hangcheck.seqno, seqno[i]); 1364 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", 1365 (long long)ring->hangcheck.acthd, 1366 (long long)acthd[i]); 1367 seq_printf(m, "\tmax ACTHD = 0x%08llx\n", 1368 (long long)ring->hangcheck.max_acthd); 1369 seq_printf(m, "\tscore = %d\n", ring->hangcheck.score); 1370 seq_printf(m, "\taction = %d\n", ring->hangcheck.action); 1371 1372 if (ring->id == RCS) { 1373 seq_puts(m, "\tinstdone read ="); 1374 1375 for (j = 0; j < I915_NUM_INSTDONE_REG; j++) 1376 seq_printf(m, " 0x%08x", instdone[j]); 1377 1378 seq_puts(m, "\n\tinstdone accu ="); 1379 1380 for (j = 0; j < I915_NUM_INSTDONE_REG; j++) 1381 seq_printf(m, " 0x%08x", 1382 ring->hangcheck.instdone[j]); 1383 1384 seq_puts(m, "\n"); 1385 } 1386 } 1387 1388 return 0; 1389 } 1390 1391 static int ironlake_drpc_info(struct seq_file *m) 1392 { 1393 struct drm_info_node *node = m->private; 1394 struct drm_device *dev = node->minor->dev; 1395 struct drm_i915_private *dev_priv = dev->dev_private; 1396 u32 rgvmodectl, rstdbyctl; 1397 u16 crstandvid; 1398 int ret; 1399 1400 ret = mutex_lock_interruptible(&dev->struct_mutex); 1401 if (ret) 1402 return ret; 1403 intel_runtime_pm_get(dev_priv); 1404 1405 rgvmodectl = I915_READ(MEMMODECTL); 1406 rstdbyctl = I915_READ(RSTDBYCTL); 1407 crstandvid = I915_READ16(CRSTANDVID); 1408 1409 intel_runtime_pm_put(dev_priv); 1410 mutex_unlock(&dev->struct_mutex); 1411 1412 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); 1413 seq_printf(m, "Boost freq: %d\n", 1414 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1415 MEMMODE_BOOST_FREQ_SHIFT); 1416 seq_printf(m, "HW control enabled: %s\n", 1417 yesno(rgvmodectl & MEMMODE_HWIDLE_EN)); 1418 seq_printf(m, "SW control enabled: %s\n", 1419 yesno(rgvmodectl & MEMMODE_SWMODE_EN)); 1420 seq_printf(m, "Gated voltage change: %s\n", 1421 yesno(rgvmodectl & MEMMODE_RCLK_GATE)); 1422 seq_printf(m, "Starting frequency: P%d\n", 1423 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1424 seq_printf(m, "Max P-state: P%d\n", 1425 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1426 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1427 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1428 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1429 seq_printf(m, "Render standby enabled: %s\n", 1430 yesno(!(rstdbyctl & RCX_SW_EXIT))); 1431 seq_puts(m, "Current RS state: "); 1432 switch (rstdbyctl & RSX_STATUS_MASK) { 1433 case RSX_STATUS_ON: 1434 seq_puts(m, "on\n"); 1435 break; 1436 case RSX_STATUS_RC1: 1437 seq_puts(m, "RC1\n"); 1438 break; 1439 case RSX_STATUS_RC1E: 1440 seq_puts(m, "RC1E\n"); 1441 break; 1442 case RSX_STATUS_RS1: 1443 seq_puts(m, "RS1\n"); 1444 break; 1445 case RSX_STATUS_RS2: 1446 seq_puts(m, "RS2 (RC6)\n"); 1447 break; 1448 case RSX_STATUS_RS3: 1449 seq_puts(m, "RC3 (RC6+)\n"); 1450 break; 1451 default: 1452 seq_puts(m, "unknown\n"); 1453 break; 1454 } 1455 1456 return 0; 1457 } 1458 1459 static int i915_forcewake_domains(struct seq_file *m, void *data) 1460 { 1461 struct drm_info_node *node = m->private; 1462 struct drm_device *dev = node->minor->dev; 1463 struct drm_i915_private *dev_priv = dev->dev_private; 1464 struct intel_uncore_forcewake_domain *fw_domain; 1465 int i; 1466 1467 spin_lock_irq(&dev_priv->uncore.lock); 1468 for_each_fw_domain(fw_domain, dev_priv, i) { 1469 seq_printf(m, "%s.wake_count = %u\n", 1470 intel_uncore_forcewake_domain_to_str(i), 1471 fw_domain->wake_count); 1472 } 1473 spin_unlock_irq(&dev_priv->uncore.lock); 1474 1475 return 0; 1476 } 1477 1478 static int vlv_drpc_info(struct seq_file *m) 1479 { 1480 struct drm_info_node *node = m->private; 1481 struct drm_device *dev = node->minor->dev; 1482 struct drm_i915_private *dev_priv = dev->dev_private; 1483 u32 rpmodectl1, rcctl1, pw_status; 1484 1485 intel_runtime_pm_get(dev_priv); 1486 1487 pw_status = I915_READ(VLV_GTLC_PW_STATUS); 1488 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1489 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1490 1491 intel_runtime_pm_put(dev_priv); 1492 1493 seq_printf(m, "Video Turbo Mode: %s\n", 1494 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1495 seq_printf(m, "Turbo enabled: %s\n", 1496 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1497 seq_printf(m, "HW control enabled: %s\n", 1498 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1499 seq_printf(m, "SW control enabled: %s\n", 1500 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1501 GEN6_RP_MEDIA_SW_MODE)); 1502 seq_printf(m, "RC6 Enabled: %s\n", 1503 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1504 GEN6_RC_CTL_EI_MODE(1)))); 1505 seq_printf(m, "Render Power Well: %s\n", 1506 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1507 seq_printf(m, "Media Power Well: %s\n", 1508 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1509 1510 seq_printf(m, "Render RC6 residency since boot: %u\n", 1511 I915_READ(VLV_GT_RENDER_RC6)); 1512 seq_printf(m, "Media RC6 residency since boot: %u\n", 1513 I915_READ(VLV_GT_MEDIA_RC6)); 1514 1515 return i915_forcewake_domains(m, NULL); 1516 } 1517 1518 static int gen6_drpc_info(struct seq_file *m) 1519 { 1520 struct drm_info_node *node = m->private; 1521 struct drm_device *dev = node->minor->dev; 1522 struct drm_i915_private *dev_priv = dev->dev_private; 1523 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1524 unsigned forcewake_count; 1525 int count = 0, ret; 1526 1527 ret = mutex_lock_interruptible(&dev->struct_mutex); 1528 if (ret) 1529 return ret; 1530 intel_runtime_pm_get(dev_priv); 1531 1532 spin_lock_irq(&dev_priv->uncore.lock); 1533 forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count; 1534 spin_unlock_irq(&dev_priv->uncore.lock); 1535 1536 if (forcewake_count) { 1537 seq_puts(m, "RC information inaccurate because somebody " 1538 "holds a forcewake reference \n"); 1539 } else { 1540 /* NB: we cannot use forcewake, else we read the wrong values */ 1541 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1542 udelay(10); 1543 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1544 } 1545 1546 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS); 1547 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1548 1549 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1550 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1551 mutex_unlock(&dev->struct_mutex); 1552 mutex_lock(&dev_priv->rps.hw_lock); 1553 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1554 mutex_unlock(&dev_priv->rps.hw_lock); 1555 1556 intel_runtime_pm_put(dev_priv); 1557 1558 seq_printf(m, "Video Turbo Mode: %s\n", 1559 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1560 seq_printf(m, "HW control enabled: %s\n", 1561 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1562 seq_printf(m, "SW control enabled: %s\n", 1563 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1564 GEN6_RP_MEDIA_SW_MODE)); 1565 seq_printf(m, "RC1e Enabled: %s\n", 1566 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1567 seq_printf(m, "RC6 Enabled: %s\n", 1568 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1569 seq_printf(m, "Deep RC6 Enabled: %s\n", 1570 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1571 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1572 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1573 seq_puts(m, "Current RC state: "); 1574 switch (gt_core_status & GEN6_RCn_MASK) { 1575 case GEN6_RC0: 1576 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1577 seq_puts(m, "Core Power Down\n"); 1578 else 1579 seq_puts(m, "on\n"); 1580 break; 1581 case GEN6_RC3: 1582 seq_puts(m, "RC3\n"); 1583 break; 1584 case GEN6_RC6: 1585 seq_puts(m, "RC6\n"); 1586 break; 1587 case GEN6_RC7: 1588 seq_puts(m, "RC7\n"); 1589 break; 1590 default: 1591 seq_puts(m, "Unknown\n"); 1592 break; 1593 } 1594 1595 seq_printf(m, "Core Power Down: %s\n", 1596 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1597 1598 /* Not exactly sure what this is */ 1599 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1600 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1601 seq_printf(m, "RC6 residency since boot: %u\n", 1602 I915_READ(GEN6_GT_GFX_RC6)); 1603 seq_printf(m, "RC6+ residency since boot: %u\n", 1604 I915_READ(GEN6_GT_GFX_RC6p)); 1605 seq_printf(m, "RC6++ residency since boot: %u\n", 1606 I915_READ(GEN6_GT_GFX_RC6pp)); 1607 1608 seq_printf(m, "RC6 voltage: %dmV\n", 1609 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1610 seq_printf(m, "RC6+ voltage: %dmV\n", 1611 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1612 seq_printf(m, "RC6++ voltage: %dmV\n", 1613 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1614 return 0; 1615 } 1616 1617 static int i915_drpc_info(struct seq_file *m, void *unused) 1618 { 1619 struct drm_info_node *node = m->private; 1620 struct drm_device *dev = node->minor->dev; 1621 1622 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 1623 return vlv_drpc_info(m); 1624 else if (INTEL_INFO(dev)->gen >= 6) 1625 return gen6_drpc_info(m); 1626 else 1627 return ironlake_drpc_info(m); 1628 } 1629 1630 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) 1631 { 1632 struct drm_info_node *node = m->private; 1633 struct drm_device *dev = node->minor->dev; 1634 struct drm_i915_private *dev_priv = dev->dev_private; 1635 1636 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 1637 dev_priv->fb_tracking.busy_bits); 1638 1639 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 1640 dev_priv->fb_tracking.flip_bits); 1641 1642 return 0; 1643 } 1644 1645 static int i915_fbc_status(struct seq_file *m, void *unused) 1646 { 1647 struct drm_info_node *node = m->private; 1648 struct drm_device *dev = node->minor->dev; 1649 struct drm_i915_private *dev_priv = dev->dev_private; 1650 1651 if (!HAS_FBC(dev)) { 1652 seq_puts(m, "FBC unsupported on this chipset\n"); 1653 return 0; 1654 } 1655 1656 intel_runtime_pm_get(dev_priv); 1657 mutex_lock(&dev_priv->fbc.lock); 1658 1659 if (intel_fbc_is_active(dev_priv)) 1660 seq_puts(m, "FBC enabled\n"); 1661 else 1662 seq_printf(m, "FBC disabled: %s\n", 1663 dev_priv->fbc.no_fbc_reason); 1664 1665 if (INTEL_INFO(dev_priv)->gen >= 7) 1666 seq_printf(m, "Compressing: %s\n", 1667 yesno(I915_READ(FBC_STATUS2) & 1668 FBC_COMPRESSION_MASK)); 1669 1670 mutex_unlock(&dev_priv->fbc.lock); 1671 intel_runtime_pm_put(dev_priv); 1672 1673 return 0; 1674 } 1675 1676 static int i915_fbc_fc_get(void *data, u64 *val) 1677 { 1678 struct drm_device *dev = data; 1679 struct drm_i915_private *dev_priv = dev->dev_private; 1680 1681 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1682 return -ENODEV; 1683 1684 *val = dev_priv->fbc.false_color; 1685 1686 return 0; 1687 } 1688 1689 static int i915_fbc_fc_set(void *data, u64 val) 1690 { 1691 struct drm_device *dev = data; 1692 struct drm_i915_private *dev_priv = dev->dev_private; 1693 u32 reg; 1694 1695 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1696 return -ENODEV; 1697 1698 mutex_lock(&dev_priv->fbc.lock); 1699 1700 reg = I915_READ(ILK_DPFC_CONTROL); 1701 dev_priv->fbc.false_color = val; 1702 1703 I915_WRITE(ILK_DPFC_CONTROL, val ? 1704 (reg | FBC_CTL_FALSE_COLOR) : 1705 (reg & ~FBC_CTL_FALSE_COLOR)); 1706 1707 mutex_unlock(&dev_priv->fbc.lock); 1708 return 0; 1709 } 1710 1711 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops, 1712 i915_fbc_fc_get, i915_fbc_fc_set, 1713 "%llu\n"); 1714 1715 static int i915_ips_status(struct seq_file *m, void *unused) 1716 { 1717 struct drm_info_node *node = m->private; 1718 struct drm_device *dev = node->minor->dev; 1719 struct drm_i915_private *dev_priv = dev->dev_private; 1720 1721 if (!HAS_IPS(dev)) { 1722 seq_puts(m, "not supported\n"); 1723 return 0; 1724 } 1725 1726 intel_runtime_pm_get(dev_priv); 1727 1728 seq_printf(m, "Enabled by kernel parameter: %s\n", 1729 yesno(i915.enable_ips)); 1730 1731 if (INTEL_INFO(dev)->gen >= 8) { 1732 seq_puts(m, "Currently: unknown\n"); 1733 } else { 1734 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1735 seq_puts(m, "Currently: enabled\n"); 1736 else 1737 seq_puts(m, "Currently: disabled\n"); 1738 } 1739 1740 intel_runtime_pm_put(dev_priv); 1741 1742 return 0; 1743 } 1744 1745 static int i915_sr_status(struct seq_file *m, void *unused) 1746 { 1747 struct drm_info_node *node = m->private; 1748 struct drm_device *dev = node->minor->dev; 1749 struct drm_i915_private *dev_priv = dev->dev_private; 1750 bool sr_enabled = false; 1751 1752 intel_runtime_pm_get(dev_priv); 1753 1754 if (HAS_PCH_SPLIT(dev)) 1755 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1756 else if (IS_CRESTLINE(dev) || IS_G4X(dev) || 1757 IS_I945G(dev) || IS_I945GM(dev)) 1758 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1759 else if (IS_I915GM(dev)) 1760 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1761 else if (IS_PINEVIEW(dev)) 1762 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1763 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 1764 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 1765 1766 intel_runtime_pm_put(dev_priv); 1767 1768 seq_printf(m, "self-refresh: %s\n", 1769 sr_enabled ? "enabled" : "disabled"); 1770 1771 return 0; 1772 } 1773 1774 static int i915_emon_status(struct seq_file *m, void *unused) 1775 { 1776 struct drm_info_node *node = m->private; 1777 struct drm_device *dev = node->minor->dev; 1778 struct drm_i915_private *dev_priv = dev->dev_private; 1779 unsigned long temp, chipset, gfx; 1780 int ret; 1781 1782 if (!IS_GEN5(dev)) 1783 return -ENODEV; 1784 1785 ret = mutex_lock_interruptible(&dev->struct_mutex); 1786 if (ret) 1787 return ret; 1788 1789 temp = i915_mch_val(dev_priv); 1790 chipset = i915_chipset_val(dev_priv); 1791 gfx = i915_gfx_val(dev_priv); 1792 mutex_unlock(&dev->struct_mutex); 1793 1794 seq_printf(m, "GMCH temp: %ld\n", temp); 1795 seq_printf(m, "Chipset power: %ld\n", chipset); 1796 seq_printf(m, "GFX power: %ld\n", gfx); 1797 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1798 1799 return 0; 1800 } 1801 1802 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1803 { 1804 struct drm_info_node *node = m->private; 1805 struct drm_device *dev = node->minor->dev; 1806 struct drm_i915_private *dev_priv = dev->dev_private; 1807 int ret = 0; 1808 int gpu_freq, ia_freq; 1809 unsigned int max_gpu_freq, min_gpu_freq; 1810 1811 if (!HAS_CORE_RING_FREQ(dev)) { 1812 seq_puts(m, "unsupported on this chipset\n"); 1813 return 0; 1814 } 1815 1816 intel_runtime_pm_get(dev_priv); 1817 1818 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1819 1820 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1821 if (ret) 1822 goto out; 1823 1824 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 1825 /* Convert GT frequency to 50 HZ units */ 1826 min_gpu_freq = 1827 dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER; 1828 max_gpu_freq = 1829 dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER; 1830 } else { 1831 min_gpu_freq = dev_priv->rps.min_freq_softlimit; 1832 max_gpu_freq = dev_priv->rps.max_freq_softlimit; 1833 } 1834 1835 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1836 1837 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { 1838 ia_freq = gpu_freq; 1839 sandybridge_pcode_read(dev_priv, 1840 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1841 &ia_freq); 1842 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1843 intel_gpu_freq(dev_priv, (gpu_freq * 1844 (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? 1845 GEN9_FREQ_SCALER : 1))), 1846 ((ia_freq >> 0) & 0xff) * 100, 1847 ((ia_freq >> 8) & 0xff) * 100); 1848 } 1849 1850 mutex_unlock(&dev_priv->rps.hw_lock); 1851 1852 out: 1853 intel_runtime_pm_put(dev_priv); 1854 return ret; 1855 } 1856 1857 static int i915_opregion(struct seq_file *m, void *unused) 1858 { 1859 struct drm_info_node *node = m->private; 1860 struct drm_device *dev = node->minor->dev; 1861 struct drm_i915_private *dev_priv = dev->dev_private; 1862 struct intel_opregion *opregion = &dev_priv->opregion; 1863 int ret; 1864 1865 ret = mutex_lock_interruptible(&dev->struct_mutex); 1866 if (ret) 1867 goto out; 1868 1869 if (opregion->header) 1870 seq_write(m, opregion->header, OPREGION_SIZE); 1871 1872 mutex_unlock(&dev->struct_mutex); 1873 1874 out: 1875 return 0; 1876 } 1877 1878 static int i915_vbt(struct seq_file *m, void *unused) 1879 { 1880 struct drm_info_node *node = m->private; 1881 struct drm_device *dev = node->minor->dev; 1882 struct drm_i915_private *dev_priv = dev->dev_private; 1883 struct intel_opregion *opregion = &dev_priv->opregion; 1884 1885 if (opregion->vbt) 1886 seq_write(m, opregion->vbt, opregion->vbt_size); 1887 1888 return 0; 1889 } 1890 1891 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1892 { 1893 struct drm_info_node *node = m->private; 1894 struct drm_device *dev = node->minor->dev; 1895 struct intel_framebuffer *fbdev_fb = NULL; 1896 struct drm_framebuffer *drm_fb; 1897 1898 #ifdef CONFIG_DRM_FBDEV_EMULATION 1899 if (to_i915(dev)->fbdev) { 1900 fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb); 1901 1902 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1903 fbdev_fb->base.width, 1904 fbdev_fb->base.height, 1905 fbdev_fb->base.depth, 1906 fbdev_fb->base.bits_per_pixel, 1907 fbdev_fb->base.modifier[0], 1908 atomic_read(&fbdev_fb->base.refcount.refcount)); 1909 describe_obj(m, fbdev_fb->obj); 1910 seq_putc(m, '\n'); 1911 } 1912 #endif 1913 1914 mutex_lock(&dev->mode_config.fb_lock); 1915 drm_for_each_fb(drm_fb, dev) { 1916 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 1917 if (fb == fbdev_fb) 1918 continue; 1919 1920 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1921 fb->base.width, 1922 fb->base.height, 1923 fb->base.depth, 1924 fb->base.bits_per_pixel, 1925 fb->base.modifier[0], 1926 atomic_read(&fb->base.refcount.refcount)); 1927 describe_obj(m, fb->obj); 1928 seq_putc(m, '\n'); 1929 } 1930 mutex_unlock(&dev->mode_config.fb_lock); 1931 1932 return 0; 1933 } 1934 1935 static void describe_ctx_ringbuf(struct seq_file *m, 1936 struct intel_ringbuffer *ringbuf) 1937 { 1938 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)", 1939 ringbuf->space, ringbuf->head, ringbuf->tail, 1940 ringbuf->last_retired_head); 1941 } 1942 1943 static int i915_context_status(struct seq_file *m, void *unused) 1944 { 1945 struct drm_info_node *node = m->private; 1946 struct drm_device *dev = node->minor->dev; 1947 struct drm_i915_private *dev_priv = dev->dev_private; 1948 struct intel_engine_cs *ring; 1949 struct intel_context *ctx; 1950 int ret, i; 1951 1952 ret = mutex_lock_interruptible(&dev->struct_mutex); 1953 if (ret) 1954 return ret; 1955 1956 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1957 if (!i915.enable_execlists && 1958 ctx->legacy_hw_ctx.rcs_state == NULL) 1959 continue; 1960 1961 seq_puts(m, "HW context "); 1962 describe_ctx(m, ctx); 1963 if (ctx == dev_priv->kernel_context) 1964 seq_printf(m, "(kernel context) "); 1965 1966 if (i915.enable_execlists) { 1967 seq_putc(m, '\n'); 1968 for_each_ring(ring, dev_priv, i) { 1969 struct drm_i915_gem_object *ctx_obj = 1970 ctx->engine[i].state; 1971 struct intel_ringbuffer *ringbuf = 1972 ctx->engine[i].ringbuf; 1973 1974 seq_printf(m, "%s: ", ring->name); 1975 if (ctx_obj) 1976 describe_obj(m, ctx_obj); 1977 if (ringbuf) 1978 describe_ctx_ringbuf(m, ringbuf); 1979 seq_putc(m, '\n'); 1980 } 1981 } else { 1982 describe_obj(m, ctx->legacy_hw_ctx.rcs_state); 1983 } 1984 1985 seq_putc(m, '\n'); 1986 } 1987 1988 mutex_unlock(&dev->struct_mutex); 1989 1990 return 0; 1991 } 1992 1993 static void i915_dump_lrc_obj(struct seq_file *m, 1994 struct intel_context *ctx, 1995 struct intel_engine_cs *ring) 1996 { 1997 struct page *page; 1998 uint32_t *reg_state; 1999 int j; 2000 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; 2001 unsigned long ggtt_offset = 0; 2002 2003 if (ctx_obj == NULL) { 2004 seq_printf(m, "Context on %s with no gem object\n", 2005 ring->name); 2006 return; 2007 } 2008 2009 seq_printf(m, "CONTEXT: %s %u\n", ring->name, 2010 intel_execlists_ctx_id(ctx, ring)); 2011 2012 if (!i915_gem_obj_ggtt_bound(ctx_obj)) 2013 seq_puts(m, "\tNot bound in GGTT\n"); 2014 else 2015 ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj); 2016 2017 if (i915_gem_object_get_pages(ctx_obj)) { 2018 seq_puts(m, "\tFailed to get pages for context object\n"); 2019 return; 2020 } 2021 2022 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); 2023 if (!WARN_ON(page == NULL)) { 2024 reg_state = kmap_atomic(page); 2025 2026 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { 2027 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n", 2028 ggtt_offset + 4096 + (j * 4), 2029 reg_state[j], reg_state[j + 1], 2030 reg_state[j + 2], reg_state[j + 3]); 2031 } 2032 kunmap_atomic(reg_state); 2033 } 2034 2035 seq_putc(m, '\n'); 2036 } 2037 2038 static int i915_dump_lrc(struct seq_file *m, void *unused) 2039 { 2040 struct drm_info_node *node = (struct drm_info_node *) m->private; 2041 struct drm_device *dev = node->minor->dev; 2042 struct drm_i915_private *dev_priv = dev->dev_private; 2043 struct intel_engine_cs *ring; 2044 struct intel_context *ctx; 2045 int ret, i; 2046 2047 if (!i915.enable_execlists) { 2048 seq_printf(m, "Logical Ring Contexts are disabled\n"); 2049 return 0; 2050 } 2051 2052 ret = mutex_lock_interruptible(&dev->struct_mutex); 2053 if (ret) 2054 return ret; 2055 2056 list_for_each_entry(ctx, &dev_priv->context_list, link) 2057 if (ctx != dev_priv->kernel_context) 2058 for_each_ring(ring, dev_priv, i) 2059 i915_dump_lrc_obj(m, ctx, ring); 2060 2061 mutex_unlock(&dev->struct_mutex); 2062 2063 return 0; 2064 } 2065 2066 static int i915_execlists(struct seq_file *m, void *data) 2067 { 2068 struct drm_info_node *node = (struct drm_info_node *)m->private; 2069 struct drm_device *dev = node->minor->dev; 2070 struct drm_i915_private *dev_priv = dev->dev_private; 2071 struct intel_engine_cs *ring; 2072 u32 status_pointer; 2073 u8 read_pointer; 2074 u8 write_pointer; 2075 u32 status; 2076 u32 ctx_id; 2077 struct list_head *cursor; 2078 int ring_id, i; 2079 int ret; 2080 2081 if (!i915.enable_execlists) { 2082 seq_puts(m, "Logical Ring Contexts are disabled\n"); 2083 return 0; 2084 } 2085 2086 ret = mutex_lock_interruptible(&dev->struct_mutex); 2087 if (ret) 2088 return ret; 2089 2090 intel_runtime_pm_get(dev_priv); 2091 2092 for_each_ring(ring, dev_priv, ring_id) { 2093 struct drm_i915_gem_request *head_req = NULL; 2094 int count = 0; 2095 unsigned long flags; 2096 2097 seq_printf(m, "%s\n", ring->name); 2098 2099 status = I915_READ(RING_EXECLIST_STATUS_LO(ring)); 2100 ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring)); 2101 seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n", 2102 status, ctx_id); 2103 2104 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); 2105 seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer); 2106 2107 read_pointer = ring->next_context_status_buffer; 2108 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer); 2109 if (read_pointer > write_pointer) 2110 write_pointer += GEN8_CSB_ENTRIES; 2111 seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n", 2112 read_pointer, write_pointer); 2113 2114 for (i = 0; i < GEN8_CSB_ENTRIES; i++) { 2115 status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i)); 2116 ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i)); 2117 2118 seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n", 2119 i, status, ctx_id); 2120 } 2121 2122 spin_lock_irqsave(&ring->execlist_lock, flags); 2123 list_for_each(cursor, &ring->execlist_queue) 2124 count++; 2125 head_req = list_first_entry_or_null(&ring->execlist_queue, 2126 struct drm_i915_gem_request, execlist_link); 2127 spin_unlock_irqrestore(&ring->execlist_lock, flags); 2128 2129 seq_printf(m, "\t%d requests in queue\n", count); 2130 if (head_req) { 2131 seq_printf(m, "\tHead request id: %u\n", 2132 intel_execlists_ctx_id(head_req->ctx, ring)); 2133 seq_printf(m, "\tHead request tail: %u\n", 2134 head_req->tail); 2135 } 2136 2137 seq_putc(m, '\n'); 2138 } 2139 2140 intel_runtime_pm_put(dev_priv); 2141 mutex_unlock(&dev->struct_mutex); 2142 2143 return 0; 2144 } 2145 2146 static const char *swizzle_string(unsigned swizzle) 2147 { 2148 switch (swizzle) { 2149 case I915_BIT_6_SWIZZLE_NONE: 2150 return "none"; 2151 case I915_BIT_6_SWIZZLE_9: 2152 return "bit9"; 2153 case I915_BIT_6_SWIZZLE_9_10: 2154 return "bit9/bit10"; 2155 case I915_BIT_6_SWIZZLE_9_11: 2156 return "bit9/bit11"; 2157 case I915_BIT_6_SWIZZLE_9_10_11: 2158 return "bit9/bit10/bit11"; 2159 case I915_BIT_6_SWIZZLE_9_17: 2160 return "bit9/bit17"; 2161 case I915_BIT_6_SWIZZLE_9_10_17: 2162 return "bit9/bit10/bit17"; 2163 case I915_BIT_6_SWIZZLE_UNKNOWN: 2164 return "unknown"; 2165 } 2166 2167 return "bug"; 2168 } 2169 2170 static int i915_swizzle_info(struct seq_file *m, void *data) 2171 { 2172 struct drm_info_node *node = m->private; 2173 struct drm_device *dev = node->minor->dev; 2174 struct drm_i915_private *dev_priv = dev->dev_private; 2175 int ret; 2176 2177 ret = mutex_lock_interruptible(&dev->struct_mutex); 2178 if (ret) 2179 return ret; 2180 intel_runtime_pm_get(dev_priv); 2181 2182 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 2183 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 2184 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 2185 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 2186 2187 if (IS_GEN3(dev) || IS_GEN4(dev)) { 2188 seq_printf(m, "DDC = 0x%08x\n", 2189 I915_READ(DCC)); 2190 seq_printf(m, "DDC2 = 0x%08x\n", 2191 I915_READ(DCC2)); 2192 seq_printf(m, "C0DRB3 = 0x%04x\n", 2193 I915_READ16(C0DRB3)); 2194 seq_printf(m, "C1DRB3 = 0x%04x\n", 2195 I915_READ16(C1DRB3)); 2196 } else if (INTEL_INFO(dev)->gen >= 6) { 2197 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 2198 I915_READ(MAD_DIMM_C0)); 2199 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 2200 I915_READ(MAD_DIMM_C1)); 2201 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 2202 I915_READ(MAD_DIMM_C2)); 2203 seq_printf(m, "TILECTL = 0x%08x\n", 2204 I915_READ(TILECTL)); 2205 if (INTEL_INFO(dev)->gen >= 8) 2206 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 2207 I915_READ(GAMTARBMODE)); 2208 else 2209 seq_printf(m, "ARB_MODE = 0x%08x\n", 2210 I915_READ(ARB_MODE)); 2211 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 2212 I915_READ(DISP_ARB_CTL)); 2213 } 2214 2215 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 2216 seq_puts(m, "L-shaped memory detected\n"); 2217 2218 intel_runtime_pm_put(dev_priv); 2219 mutex_unlock(&dev->struct_mutex); 2220 2221 return 0; 2222 } 2223 2224 static int per_file_ctx(int id, void *ptr, void *data) 2225 { 2226 struct intel_context *ctx = ptr; 2227 struct seq_file *m = data; 2228 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2229 2230 if (!ppgtt) { 2231 seq_printf(m, " no ppgtt for context %d\n", 2232 ctx->user_handle); 2233 return 0; 2234 } 2235 2236 if (i915_gem_context_is_default(ctx)) 2237 seq_puts(m, " default context:\n"); 2238 else 2239 seq_printf(m, " context %d:\n", ctx->user_handle); 2240 ppgtt->debug_dump(ppgtt, m); 2241 2242 return 0; 2243 } 2244 2245 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2246 { 2247 struct drm_i915_private *dev_priv = dev->dev_private; 2248 struct intel_engine_cs *ring; 2249 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2250 int unused, i; 2251 2252 if (!ppgtt) 2253 return; 2254 2255 for_each_ring(ring, dev_priv, unused) { 2256 seq_printf(m, "%s\n", ring->name); 2257 for (i = 0; i < 4; i++) { 2258 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i)); 2259 pdp <<= 32; 2260 pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i)); 2261 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 2262 } 2263 } 2264 } 2265 2266 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2267 { 2268 struct drm_i915_private *dev_priv = dev->dev_private; 2269 struct intel_engine_cs *ring; 2270 int i; 2271 2272 if (INTEL_INFO(dev)->gen == 6) 2273 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2274 2275 for_each_ring(ring, dev_priv, i) { 2276 seq_printf(m, "%s\n", ring->name); 2277 if (INTEL_INFO(dev)->gen == 7) 2278 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 2279 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 2280 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 2281 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 2282 } 2283 if (dev_priv->mm.aliasing_ppgtt) { 2284 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2285 2286 seq_puts(m, "aliasing PPGTT:\n"); 2287 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset); 2288 2289 ppgtt->debug_dump(ppgtt, m); 2290 } 2291 2292 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 2293 } 2294 2295 static int i915_ppgtt_info(struct seq_file *m, void *data) 2296 { 2297 struct drm_info_node *node = m->private; 2298 struct drm_device *dev = node->minor->dev; 2299 struct drm_i915_private *dev_priv = dev->dev_private; 2300 struct drm_file *file; 2301 2302 int ret = mutex_lock_interruptible(&dev->struct_mutex); 2303 if (ret) 2304 return ret; 2305 intel_runtime_pm_get(dev_priv); 2306 2307 if (INTEL_INFO(dev)->gen >= 8) 2308 gen8_ppgtt_info(m, dev); 2309 else if (INTEL_INFO(dev)->gen >= 6) 2310 gen6_ppgtt_info(m, dev); 2311 2312 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2313 struct drm_i915_file_private *file_priv = file->driver_priv; 2314 struct task_struct *task; 2315 2316 task = get_pid_task(file->pid, PIDTYPE_PID); 2317 if (!task) { 2318 ret = -ESRCH; 2319 goto out_put; 2320 } 2321 seq_printf(m, "\nproc: %s\n", task->comm); 2322 put_task_struct(task); 2323 idr_for_each(&file_priv->context_idr, per_file_ctx, 2324 (void *)(unsigned long)m); 2325 } 2326 2327 out_put: 2328 intel_runtime_pm_put(dev_priv); 2329 mutex_unlock(&dev->struct_mutex); 2330 2331 return ret; 2332 } 2333 2334 static int count_irq_waiters(struct drm_i915_private *i915) 2335 { 2336 struct intel_engine_cs *ring; 2337 int count = 0; 2338 int i; 2339 2340 for_each_ring(ring, i915, i) 2341 count += ring->irq_refcount; 2342 2343 return count; 2344 } 2345 2346 static int i915_rps_boost_info(struct seq_file *m, void *data) 2347 { 2348 struct drm_info_node *node = m->private; 2349 struct drm_device *dev = node->minor->dev; 2350 struct drm_i915_private *dev_priv = dev->dev_private; 2351 struct drm_file *file; 2352 2353 seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled); 2354 seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy); 2355 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); 2356 seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n", 2357 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 2358 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 2359 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit), 2360 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit), 2361 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 2362 spin_lock(&dev_priv->rps.client_lock); 2363 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2364 struct drm_i915_file_private *file_priv = file->driver_priv; 2365 struct task_struct *task; 2366 2367 rcu_read_lock(); 2368 task = pid_task(file->pid, PIDTYPE_PID); 2369 seq_printf(m, "%s [%d]: %d boosts%s\n", 2370 task ? task->comm : "<unknown>", 2371 task ? task->pid : -1, 2372 file_priv->rps.boosts, 2373 list_empty(&file_priv->rps.link) ? "" : ", active"); 2374 rcu_read_unlock(); 2375 } 2376 seq_printf(m, "Semaphore boosts: %d%s\n", 2377 dev_priv->rps.semaphores.boosts, 2378 list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active"); 2379 seq_printf(m, "MMIO flip boosts: %d%s\n", 2380 dev_priv->rps.mmioflips.boosts, 2381 list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active"); 2382 seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts); 2383 spin_unlock(&dev_priv->rps.client_lock); 2384 2385 return 0; 2386 } 2387 2388 static int i915_llc(struct seq_file *m, void *data) 2389 { 2390 struct drm_info_node *node = m->private; 2391 struct drm_device *dev = node->minor->dev; 2392 struct drm_i915_private *dev_priv = dev->dev_private; 2393 2394 /* Size calculation for LLC is a bit of a pain. Ignore for now. */ 2395 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); 2396 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size); 2397 2398 return 0; 2399 } 2400 2401 static int i915_guc_load_status_info(struct seq_file *m, void *data) 2402 { 2403 struct drm_info_node *node = m->private; 2404 struct drm_i915_private *dev_priv = node->minor->dev->dev_private; 2405 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 2406 u32 tmp, i; 2407 2408 if (!HAS_GUC_UCODE(dev_priv->dev)) 2409 return 0; 2410 2411 seq_printf(m, "GuC firmware status:\n"); 2412 seq_printf(m, "\tpath: %s\n", 2413 guc_fw->guc_fw_path); 2414 seq_printf(m, "\tfetch: %s\n", 2415 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status)); 2416 seq_printf(m, "\tload: %s\n", 2417 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); 2418 seq_printf(m, "\tversion wanted: %d.%d\n", 2419 guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); 2420 seq_printf(m, "\tversion found: %d.%d\n", 2421 guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found); 2422 seq_printf(m, "\theader: offset is %d; size = %d\n", 2423 guc_fw->header_offset, guc_fw->header_size); 2424 seq_printf(m, "\tuCode: offset is %d; size = %d\n", 2425 guc_fw->ucode_offset, guc_fw->ucode_size); 2426 seq_printf(m, "\tRSA: offset is %d; size = %d\n", 2427 guc_fw->rsa_offset, guc_fw->rsa_size); 2428 2429 tmp = I915_READ(GUC_STATUS); 2430 2431 seq_printf(m, "\nGuC status 0x%08x:\n", tmp); 2432 seq_printf(m, "\tBootrom status = 0x%x\n", 2433 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); 2434 seq_printf(m, "\tuKernel status = 0x%x\n", 2435 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); 2436 seq_printf(m, "\tMIA Core status = 0x%x\n", 2437 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); 2438 seq_puts(m, "\nScratch registers:\n"); 2439 for (i = 0; i < 16; i++) 2440 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i))); 2441 2442 return 0; 2443 } 2444 2445 static void i915_guc_client_info(struct seq_file *m, 2446 struct drm_i915_private *dev_priv, 2447 struct i915_guc_client *client) 2448 { 2449 struct intel_engine_cs *ring; 2450 uint64_t tot = 0; 2451 uint32_t i; 2452 2453 seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n", 2454 client->priority, client->ctx_index, client->proc_desc_offset); 2455 seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n", 2456 client->doorbell_id, client->doorbell_offset, client->cookie); 2457 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", 2458 client->wq_size, client->wq_offset, client->wq_tail); 2459 2460 seq_printf(m, "\tFailed to queue: %u\n", client->q_fail); 2461 seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); 2462 seq_printf(m, "\tLast submission result: %d\n", client->retcode); 2463 2464 for_each_ring(ring, dev_priv, i) { 2465 seq_printf(m, "\tSubmissions: %llu %s\n", 2466 client->submissions[i], 2467 ring->name); 2468 tot += client->submissions[i]; 2469 } 2470 seq_printf(m, "\tTotal: %llu\n", tot); 2471 } 2472 2473 static int i915_guc_info(struct seq_file *m, void *data) 2474 { 2475 struct drm_info_node *node = m->private; 2476 struct drm_device *dev = node->minor->dev; 2477 struct drm_i915_private *dev_priv = dev->dev_private; 2478 struct intel_guc guc; 2479 struct i915_guc_client client = {}; 2480 struct intel_engine_cs *ring; 2481 enum intel_ring_id i; 2482 u64 total = 0; 2483 2484 if (!HAS_GUC_SCHED(dev_priv->dev)) 2485 return 0; 2486 2487 if (mutex_lock_interruptible(&dev->struct_mutex)) 2488 return 0; 2489 2490 /* Take a local copy of the GuC data, so we can dump it at leisure */ 2491 guc = dev_priv->guc; 2492 if (guc.execbuf_client) 2493 client = *guc.execbuf_client; 2494 2495 mutex_unlock(&dev->struct_mutex); 2496 2497 seq_printf(m, "GuC total action count: %llu\n", guc.action_count); 2498 seq_printf(m, "GuC action failure count: %u\n", guc.action_fail); 2499 seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd); 2500 seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status); 2501 seq_printf(m, "GuC last action error code: %d\n", guc.action_err); 2502 2503 seq_printf(m, "\nGuC submissions:\n"); 2504 for_each_ring(ring, dev_priv, i) { 2505 seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n", 2506 ring->name, guc.submissions[i], 2507 guc.last_seqno[i], guc.last_seqno[i]); 2508 total += guc.submissions[i]; 2509 } 2510 seq_printf(m, "\t%s: %llu\n", "Total", total); 2511 2512 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client); 2513 i915_guc_client_info(m, dev_priv, &client); 2514 2515 /* Add more as required ... */ 2516 2517 return 0; 2518 } 2519 2520 static int i915_guc_log_dump(struct seq_file *m, void *data) 2521 { 2522 struct drm_info_node *node = m->private; 2523 struct drm_device *dev = node->minor->dev; 2524 struct drm_i915_private *dev_priv = dev->dev_private; 2525 struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj; 2526 u32 *log; 2527 int i = 0, pg; 2528 2529 if (!log_obj) 2530 return 0; 2531 2532 for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) { 2533 log = kmap_atomic(i915_gem_object_get_page(log_obj, pg)); 2534 2535 for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4) 2536 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n", 2537 *(log + i), *(log + i + 1), 2538 *(log + i + 2), *(log + i + 3)); 2539 2540 kunmap_atomic(log); 2541 } 2542 2543 seq_putc(m, '\n'); 2544 2545 return 0; 2546 } 2547 2548 static int i915_edp_psr_status(struct seq_file *m, void *data) 2549 { 2550 struct drm_info_node *node = m->private; 2551 struct drm_device *dev = node->minor->dev; 2552 struct drm_i915_private *dev_priv = dev->dev_private; 2553 u32 psrperf = 0; 2554 u32 stat[3]; 2555 enum pipe pipe; 2556 bool enabled = false; 2557 2558 if (!HAS_PSR(dev)) { 2559 seq_puts(m, "PSR not supported\n"); 2560 return 0; 2561 } 2562 2563 intel_runtime_pm_get(dev_priv); 2564 2565 mutex_lock(&dev_priv->psr.lock); 2566 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 2567 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 2568 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); 2569 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); 2570 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", 2571 dev_priv->psr.busy_frontbuffer_bits); 2572 seq_printf(m, "Re-enable work scheduled: %s\n", 2573 yesno(work_busy(&dev_priv->psr.work.work))); 2574 2575 if (HAS_DDI(dev)) 2576 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 2577 else { 2578 for_each_pipe(dev_priv, pipe) { 2579 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & 2580 VLV_EDP_PSR_CURR_STATE_MASK; 2581 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2582 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2583 enabled = true; 2584 } 2585 } 2586 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); 2587 2588 if (!HAS_DDI(dev)) 2589 for_each_pipe(dev_priv, pipe) { 2590 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2591 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2592 seq_printf(m, " pipe %c", pipe_name(pipe)); 2593 } 2594 seq_puts(m, "\n"); 2595 2596 /* 2597 * VLV/CHV PSR has no kind of performance counter 2598 * SKL+ Perf counter is reset to 0 everytime DC state is entered 2599 */ 2600 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2601 psrperf = I915_READ(EDP_PSR_PERF_CNT) & 2602 EDP_PSR_PERF_CNT_MASK; 2603 2604 seq_printf(m, "Performance_Counter: %u\n", psrperf); 2605 } 2606 mutex_unlock(&dev_priv->psr.lock); 2607 2608 intel_runtime_pm_put(dev_priv); 2609 return 0; 2610 } 2611 2612 static int i915_sink_crc(struct seq_file *m, void *data) 2613 { 2614 struct drm_info_node *node = m->private; 2615 struct drm_device *dev = node->minor->dev; 2616 struct intel_encoder *encoder; 2617 struct intel_connector *connector; 2618 struct intel_dp *intel_dp = NULL; 2619 int ret; 2620 u8 crc[6]; 2621 2622 drm_modeset_lock_all(dev); 2623 for_each_intel_connector(dev, connector) { 2624 2625 if (connector->base.dpms != DRM_MODE_DPMS_ON) 2626 continue; 2627 2628 if (!connector->base.encoder) 2629 continue; 2630 2631 encoder = to_intel_encoder(connector->base.encoder); 2632 if (encoder->type != INTEL_OUTPUT_EDP) 2633 continue; 2634 2635 intel_dp = enc_to_intel_dp(&encoder->base); 2636 2637 ret = intel_dp_sink_crc(intel_dp, crc); 2638 if (ret) 2639 goto out; 2640 2641 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 2642 crc[0], crc[1], crc[2], 2643 crc[3], crc[4], crc[5]); 2644 goto out; 2645 } 2646 ret = -ENODEV; 2647 out: 2648 drm_modeset_unlock_all(dev); 2649 return ret; 2650 } 2651 2652 static int i915_energy_uJ(struct seq_file *m, void *data) 2653 { 2654 struct drm_info_node *node = m->private; 2655 struct drm_device *dev = node->minor->dev; 2656 struct drm_i915_private *dev_priv = dev->dev_private; 2657 u64 power; 2658 u32 units; 2659 2660 if (INTEL_INFO(dev)->gen < 6) 2661 return -ENODEV; 2662 2663 intel_runtime_pm_get(dev_priv); 2664 2665 rdmsrl(MSR_RAPL_POWER_UNIT, power); 2666 power = (power & 0x1f00) >> 8; 2667 units = 1000000 / (1 << power); /* convert to uJ */ 2668 power = I915_READ(MCH_SECP_NRG_STTS); 2669 power *= units; 2670 2671 intel_runtime_pm_put(dev_priv); 2672 2673 seq_printf(m, "%llu", (long long unsigned)power); 2674 2675 return 0; 2676 } 2677 2678 static int i915_runtime_pm_status(struct seq_file *m, void *unused) 2679 { 2680 struct drm_info_node *node = m->private; 2681 struct drm_device *dev = node->minor->dev; 2682 struct drm_i915_private *dev_priv = dev->dev_private; 2683 2684 if (!HAS_RUNTIME_PM(dev)) { 2685 seq_puts(m, "not supported\n"); 2686 return 0; 2687 } 2688 2689 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); 2690 seq_printf(m, "IRQs disabled: %s\n", 2691 yesno(!intel_irqs_enabled(dev_priv))); 2692 #ifdef CONFIG_PM 2693 seq_printf(m, "Usage count: %d\n", 2694 atomic_read(&dev->dev->power.usage_count)); 2695 #else 2696 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 2697 #endif 2698 2699 return 0; 2700 } 2701 2702 static int i915_power_domain_info(struct seq_file *m, void *unused) 2703 { 2704 struct drm_info_node *node = m->private; 2705 struct drm_device *dev = node->minor->dev; 2706 struct drm_i915_private *dev_priv = dev->dev_private; 2707 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2708 int i; 2709 2710 mutex_lock(&power_domains->lock); 2711 2712 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2713 for (i = 0; i < power_domains->power_well_count; i++) { 2714 struct i915_power_well *power_well; 2715 enum intel_display_power_domain power_domain; 2716 2717 power_well = &power_domains->power_wells[i]; 2718 seq_printf(m, "%-25s %d\n", power_well->name, 2719 power_well->count); 2720 2721 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM; 2722 power_domain++) { 2723 if (!(BIT(power_domain) & power_well->domains)) 2724 continue; 2725 2726 seq_printf(m, " %-23s %d\n", 2727 intel_display_power_domain_str(power_domain), 2728 power_domains->domain_use_count[power_domain]); 2729 } 2730 } 2731 2732 mutex_unlock(&power_domains->lock); 2733 2734 return 0; 2735 } 2736 2737 static int i915_dmc_info(struct seq_file *m, void *unused) 2738 { 2739 struct drm_info_node *node = m->private; 2740 struct drm_device *dev = node->minor->dev; 2741 struct drm_i915_private *dev_priv = dev->dev_private; 2742 struct intel_csr *csr; 2743 2744 if (!HAS_CSR(dev)) { 2745 seq_puts(m, "not supported\n"); 2746 return 0; 2747 } 2748 2749 csr = &dev_priv->csr; 2750 2751 intel_runtime_pm_get(dev_priv); 2752 2753 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); 2754 seq_printf(m, "path: %s\n", csr->fw_path); 2755 2756 if (!csr->dmc_payload) 2757 goto out; 2758 2759 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), 2760 CSR_VERSION_MINOR(csr->version)); 2761 2762 if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) { 2763 seq_printf(m, "DC3 -> DC5 count: %d\n", 2764 I915_READ(SKL_CSR_DC3_DC5_COUNT)); 2765 seq_printf(m, "DC5 -> DC6 count: %d\n", 2766 I915_READ(SKL_CSR_DC5_DC6_COUNT)); 2767 } else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) { 2768 seq_printf(m, "DC3 -> DC5 count: %d\n", 2769 I915_READ(BXT_CSR_DC3_DC5_COUNT)); 2770 } 2771 2772 out: 2773 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); 2774 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); 2775 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); 2776 2777 intel_runtime_pm_put(dev_priv); 2778 2779 return 0; 2780 } 2781 2782 static void intel_seq_print_mode(struct seq_file *m, int tabs, 2783 struct drm_display_mode *mode) 2784 { 2785 int i; 2786 2787 for (i = 0; i < tabs; i++) 2788 seq_putc(m, '\t'); 2789 2790 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2791 mode->base.id, mode->name, 2792 mode->vrefresh, mode->clock, 2793 mode->hdisplay, mode->hsync_start, 2794 mode->hsync_end, mode->htotal, 2795 mode->vdisplay, mode->vsync_start, 2796 mode->vsync_end, mode->vtotal, 2797 mode->type, mode->flags); 2798 } 2799 2800 static void intel_encoder_info(struct seq_file *m, 2801 struct intel_crtc *intel_crtc, 2802 struct intel_encoder *intel_encoder) 2803 { 2804 struct drm_info_node *node = m->private; 2805 struct drm_device *dev = node->minor->dev; 2806 struct drm_crtc *crtc = &intel_crtc->base; 2807 struct intel_connector *intel_connector; 2808 struct drm_encoder *encoder; 2809 2810 encoder = &intel_encoder->base; 2811 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2812 encoder->base.id, encoder->name); 2813 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2814 struct drm_connector *connector = &intel_connector->base; 2815 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2816 connector->base.id, 2817 connector->name, 2818 drm_get_connector_status_name(connector->status)); 2819 if (connector->status == connector_status_connected) { 2820 struct drm_display_mode *mode = &crtc->mode; 2821 seq_printf(m, ", mode:\n"); 2822 intel_seq_print_mode(m, 2, mode); 2823 } else { 2824 seq_putc(m, '\n'); 2825 } 2826 } 2827 } 2828 2829 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2830 { 2831 struct drm_info_node *node = m->private; 2832 struct drm_device *dev = node->minor->dev; 2833 struct drm_crtc *crtc = &intel_crtc->base; 2834 struct intel_encoder *intel_encoder; 2835 struct drm_plane_state *plane_state = crtc->primary->state; 2836 struct drm_framebuffer *fb = plane_state->fb; 2837 2838 if (fb) 2839 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2840 fb->base.id, plane_state->src_x >> 16, 2841 plane_state->src_y >> 16, fb->width, fb->height); 2842 else 2843 seq_puts(m, "\tprimary plane disabled\n"); 2844 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2845 intel_encoder_info(m, intel_crtc, intel_encoder); 2846 } 2847 2848 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 2849 { 2850 struct drm_display_mode *mode = panel->fixed_mode; 2851 2852 seq_printf(m, "\tfixed mode:\n"); 2853 intel_seq_print_mode(m, 2, mode); 2854 } 2855 2856 static void intel_dp_info(struct seq_file *m, 2857 struct intel_connector *intel_connector) 2858 { 2859 struct intel_encoder *intel_encoder = intel_connector->encoder; 2860 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2861 2862 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2863 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); 2864 if (intel_encoder->type == INTEL_OUTPUT_EDP) 2865 intel_panel_info(m, &intel_connector->panel); 2866 } 2867 2868 static void intel_dp_mst_info(struct seq_file *m, 2869 struct intel_connector *intel_connector) 2870 { 2871 struct intel_encoder *intel_encoder = intel_connector->encoder; 2872 struct intel_dp_mst_encoder *intel_mst = 2873 enc_to_mst(&intel_encoder->base); 2874 struct intel_digital_port *intel_dig_port = intel_mst->primary; 2875 struct intel_dp *intel_dp = &intel_dig_port->dp; 2876 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, 2877 intel_connector->port); 2878 2879 seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); 2880 } 2881 2882 static void intel_hdmi_info(struct seq_file *m, 2883 struct intel_connector *intel_connector) 2884 { 2885 struct intel_encoder *intel_encoder = intel_connector->encoder; 2886 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 2887 2888 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); 2889 } 2890 2891 static void intel_lvds_info(struct seq_file *m, 2892 struct intel_connector *intel_connector) 2893 { 2894 intel_panel_info(m, &intel_connector->panel); 2895 } 2896 2897 static void intel_connector_info(struct seq_file *m, 2898 struct drm_connector *connector) 2899 { 2900 struct intel_connector *intel_connector = to_intel_connector(connector); 2901 struct intel_encoder *intel_encoder = intel_connector->encoder; 2902 struct drm_display_mode *mode; 2903 2904 seq_printf(m, "connector %d: type %s, status: %s\n", 2905 connector->base.id, connector->name, 2906 drm_get_connector_status_name(connector->status)); 2907 if (connector->status == connector_status_connected) { 2908 seq_printf(m, "\tname: %s\n", connector->display_info.name); 2909 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 2910 connector->display_info.width_mm, 2911 connector->display_info.height_mm); 2912 seq_printf(m, "\tsubpixel order: %s\n", 2913 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 2914 seq_printf(m, "\tCEA rev: %d\n", 2915 connector->display_info.cea_rev); 2916 } 2917 if (intel_encoder) { 2918 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2919 intel_encoder->type == INTEL_OUTPUT_EDP) 2920 intel_dp_info(m, intel_connector); 2921 else if (intel_encoder->type == INTEL_OUTPUT_HDMI) 2922 intel_hdmi_info(m, intel_connector); 2923 else if (intel_encoder->type == INTEL_OUTPUT_LVDS) 2924 intel_lvds_info(m, intel_connector); 2925 else if (intel_encoder->type == INTEL_OUTPUT_DP_MST) 2926 intel_dp_mst_info(m, intel_connector); 2927 } 2928 2929 seq_printf(m, "\tmodes:\n"); 2930 list_for_each_entry(mode, &connector->modes, head) 2931 intel_seq_print_mode(m, 2, mode); 2932 } 2933 2934 static bool cursor_active(struct drm_device *dev, int pipe) 2935 { 2936 struct drm_i915_private *dev_priv = dev->dev_private; 2937 u32 state; 2938 2939 if (IS_845G(dev) || IS_I865G(dev)) 2940 state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 2941 else 2942 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 2943 2944 return state; 2945 } 2946 2947 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) 2948 { 2949 struct drm_i915_private *dev_priv = dev->dev_private; 2950 u32 pos; 2951 2952 pos = I915_READ(CURPOS(pipe)); 2953 2954 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK; 2955 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT)) 2956 *x = -*x; 2957 2958 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK; 2959 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT)) 2960 *y = -*y; 2961 2962 return cursor_active(dev, pipe); 2963 } 2964 2965 static const char *plane_type(enum drm_plane_type type) 2966 { 2967 switch (type) { 2968 case DRM_PLANE_TYPE_OVERLAY: 2969 return "OVL"; 2970 case DRM_PLANE_TYPE_PRIMARY: 2971 return "PRI"; 2972 case DRM_PLANE_TYPE_CURSOR: 2973 return "CUR"; 2974 /* 2975 * Deliberately omitting default: to generate compiler warnings 2976 * when a new drm_plane_type gets added. 2977 */ 2978 } 2979 2980 return "unknown"; 2981 } 2982 2983 static const char *plane_rotation(unsigned int rotation) 2984 { 2985 static char buf[48]; 2986 /* 2987 * According to doc only one DRM_ROTATE_ is allowed but this 2988 * will print them all to visualize if the values are misused 2989 */ 2990 snprintf(buf, sizeof(buf), 2991 "%s%s%s%s%s%s(0x%08x)", 2992 (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "", 2993 (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "", 2994 (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "", 2995 (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "", 2996 (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "", 2997 (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "", 2998 rotation); 2999 3000 return buf; 3001 } 3002 3003 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3004 { 3005 struct drm_info_node *node = m->private; 3006 struct drm_device *dev = node->minor->dev; 3007 struct intel_plane *intel_plane; 3008 3009 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3010 struct drm_plane_state *state; 3011 struct drm_plane *plane = &intel_plane->base; 3012 3013 if (!plane->state) { 3014 seq_puts(m, "plane->state is NULL!\n"); 3015 continue; 3016 } 3017 3018 state = plane->state; 3019 3020 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n", 3021 plane->base.id, 3022 plane_type(intel_plane->base.type), 3023 state->crtc_x, state->crtc_y, 3024 state->crtc_w, state->crtc_h, 3025 (state->src_x >> 16), 3026 ((state->src_x & 0xffff) * 15625) >> 10, 3027 (state->src_y >> 16), 3028 ((state->src_y & 0xffff) * 15625) >> 10, 3029 (state->src_w >> 16), 3030 ((state->src_w & 0xffff) * 15625) >> 10, 3031 (state->src_h >> 16), 3032 ((state->src_h & 0xffff) * 15625) >> 10, 3033 state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A", 3034 plane_rotation(state->rotation)); 3035 } 3036 } 3037 3038 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3039 { 3040 struct intel_crtc_state *pipe_config; 3041 int num_scalers = intel_crtc->num_scalers; 3042 int i; 3043 3044 pipe_config = to_intel_crtc_state(intel_crtc->base.state); 3045 3046 /* Not all platformas have a scaler */ 3047 if (num_scalers) { 3048 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", 3049 num_scalers, 3050 pipe_config->scaler_state.scaler_users, 3051 pipe_config->scaler_state.scaler_id); 3052 3053 for (i = 0; i < SKL_NUM_SCALERS; i++) { 3054 struct intel_scaler *sc = 3055 &pipe_config->scaler_state.scalers[i]; 3056 3057 seq_printf(m, ", scalers[%d]: use=%s, mode=%x", 3058 i, yesno(sc->in_use), sc->mode); 3059 } 3060 seq_puts(m, "\n"); 3061 } else { 3062 seq_puts(m, "\tNo scalers available on this platform\n"); 3063 } 3064 } 3065 3066 static int i915_display_info(struct seq_file *m, void *unused) 3067 { 3068 struct drm_info_node *node = m->private; 3069 struct drm_device *dev = node->minor->dev; 3070 struct drm_i915_private *dev_priv = dev->dev_private; 3071 struct intel_crtc *crtc; 3072 struct drm_connector *connector; 3073 3074 intel_runtime_pm_get(dev_priv); 3075 drm_modeset_lock_all(dev); 3076 seq_printf(m, "CRTC info\n"); 3077 seq_printf(m, "---------\n"); 3078 for_each_intel_crtc(dev, crtc) { 3079 bool active; 3080 struct intel_crtc_state *pipe_config; 3081 int x, y; 3082 3083 pipe_config = to_intel_crtc_state(crtc->base.state); 3084 3085 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n", 3086 crtc->base.base.id, pipe_name(crtc->pipe), 3087 yesno(pipe_config->base.active), 3088 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 3089 yesno(pipe_config->dither), pipe_config->pipe_bpp); 3090 3091 if (pipe_config->base.active) { 3092 intel_crtc_info(m, crtc); 3093 3094 active = cursor_position(dev, crtc->pipe, &x, &y); 3095 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n", 3096 yesno(crtc->cursor_base), 3097 x, y, crtc->base.cursor->state->crtc_w, 3098 crtc->base.cursor->state->crtc_h, 3099 crtc->cursor_addr, yesno(active)); 3100 intel_scaler_info(m, crtc); 3101 intel_plane_info(m, crtc); 3102 } 3103 3104 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 3105 yesno(!crtc->cpu_fifo_underrun_disabled), 3106 yesno(!crtc->pch_fifo_underrun_disabled)); 3107 } 3108 3109 seq_printf(m, "\n"); 3110 seq_printf(m, "Connector info\n"); 3111 seq_printf(m, "--------------\n"); 3112 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 3113 intel_connector_info(m, connector); 3114 } 3115 drm_modeset_unlock_all(dev); 3116 intel_runtime_pm_put(dev_priv); 3117 3118 return 0; 3119 } 3120 3121 static int i915_semaphore_status(struct seq_file *m, void *unused) 3122 { 3123 struct drm_info_node *node = (struct drm_info_node *) m->private; 3124 struct drm_device *dev = node->minor->dev; 3125 struct drm_i915_private *dev_priv = dev->dev_private; 3126 struct intel_engine_cs *ring; 3127 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 3128 int i, j, ret; 3129 3130 if (!i915_semaphore_is_enabled(dev)) { 3131 seq_puts(m, "Semaphores are disabled\n"); 3132 return 0; 3133 } 3134 3135 ret = mutex_lock_interruptible(&dev->struct_mutex); 3136 if (ret) 3137 return ret; 3138 intel_runtime_pm_get(dev_priv); 3139 3140 if (IS_BROADWELL(dev)) { 3141 struct page *page; 3142 uint64_t *seqno; 3143 3144 page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); 3145 3146 seqno = (uint64_t *)kmap_atomic(page); 3147 for_each_ring(ring, dev_priv, i) { 3148 uint64_t offset; 3149 3150 seq_printf(m, "%s\n", ring->name); 3151 3152 seq_puts(m, " Last signal:"); 3153 for (j = 0; j < num_rings; j++) { 3154 offset = i * I915_NUM_RINGS + j; 3155 seq_printf(m, "0x%08llx (0x%02llx) ", 3156 seqno[offset], offset * 8); 3157 } 3158 seq_putc(m, '\n'); 3159 3160 seq_puts(m, " Last wait: "); 3161 for (j = 0; j < num_rings; j++) { 3162 offset = i + (j * I915_NUM_RINGS); 3163 seq_printf(m, "0x%08llx (0x%02llx) ", 3164 seqno[offset], offset * 8); 3165 } 3166 seq_putc(m, '\n'); 3167 3168 } 3169 kunmap_atomic(seqno); 3170 } else { 3171 seq_puts(m, " Last signal:"); 3172 for_each_ring(ring, dev_priv, i) 3173 for (j = 0; j < num_rings; j++) 3174 seq_printf(m, "0x%08x\n", 3175 I915_READ(ring->semaphore.mbox.signal[j])); 3176 seq_putc(m, '\n'); 3177 } 3178 3179 seq_puts(m, "\nSync seqno:\n"); 3180 for_each_ring(ring, dev_priv, i) { 3181 for (j = 0; j < num_rings; j++) { 3182 seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]); 3183 } 3184 seq_putc(m, '\n'); 3185 } 3186 seq_putc(m, '\n'); 3187 3188 intel_runtime_pm_put(dev_priv); 3189 mutex_unlock(&dev->struct_mutex); 3190 return 0; 3191 } 3192 3193 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 3194 { 3195 struct drm_info_node *node = (struct drm_info_node *) m->private; 3196 struct drm_device *dev = node->minor->dev; 3197 struct drm_i915_private *dev_priv = dev->dev_private; 3198 int i; 3199 3200 drm_modeset_lock_all(dev); 3201 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3202 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 3203 3204 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); 3205 seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n", 3206 pll->config.crtc_mask, pll->active, yesno(pll->on)); 3207 seq_printf(m, " tracked hardware state:\n"); 3208 seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll); 3209 seq_printf(m, " dpll_md: 0x%08x\n", 3210 pll->config.hw_state.dpll_md); 3211 seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0); 3212 seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1); 3213 seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll); 3214 } 3215 drm_modeset_unlock_all(dev); 3216 3217 return 0; 3218 } 3219 3220 static int i915_wa_registers(struct seq_file *m, void *unused) 3221 { 3222 int i; 3223 int ret; 3224 struct drm_info_node *node = (struct drm_info_node *) m->private; 3225 struct drm_device *dev = node->minor->dev; 3226 struct drm_i915_private *dev_priv = dev->dev_private; 3227 3228 ret = mutex_lock_interruptible(&dev->struct_mutex); 3229 if (ret) 3230 return ret; 3231 3232 intel_runtime_pm_get(dev_priv); 3233 3234 seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count); 3235 for (i = 0; i < dev_priv->workarounds.count; ++i) { 3236 i915_reg_t addr; 3237 u32 mask, value, read; 3238 bool ok; 3239 3240 addr = dev_priv->workarounds.reg[i].addr; 3241 mask = dev_priv->workarounds.reg[i].mask; 3242 value = dev_priv->workarounds.reg[i].value; 3243 read = I915_READ(addr); 3244 ok = (value & mask) == (read & mask); 3245 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", 3246 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL"); 3247 } 3248 3249 intel_runtime_pm_put(dev_priv); 3250 mutex_unlock(&dev->struct_mutex); 3251 3252 return 0; 3253 } 3254 3255 static int i915_ddb_info(struct seq_file *m, void *unused) 3256 { 3257 struct drm_info_node *node = m->private; 3258 struct drm_device *dev = node->minor->dev; 3259 struct drm_i915_private *dev_priv = dev->dev_private; 3260 struct skl_ddb_allocation *ddb; 3261 struct skl_ddb_entry *entry; 3262 enum pipe pipe; 3263 int plane; 3264 3265 if (INTEL_INFO(dev)->gen < 9) 3266 return 0; 3267 3268 drm_modeset_lock_all(dev); 3269 3270 ddb = &dev_priv->wm.skl_hw.ddb; 3271 3272 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 3273 3274 for_each_pipe(dev_priv, pipe) { 3275 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 3276 3277 for_each_plane(dev_priv, pipe, plane) { 3278 entry = &ddb->plane[pipe][plane]; 3279 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, 3280 entry->start, entry->end, 3281 skl_ddb_entry_size(entry)); 3282 } 3283 3284 entry = &ddb->plane[pipe][PLANE_CURSOR]; 3285 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 3286 entry->end, skl_ddb_entry_size(entry)); 3287 } 3288 3289 drm_modeset_unlock_all(dev); 3290 3291 return 0; 3292 } 3293 3294 static void drrs_status_per_crtc(struct seq_file *m, 3295 struct drm_device *dev, struct intel_crtc *intel_crtc) 3296 { 3297 struct intel_encoder *intel_encoder; 3298 struct drm_i915_private *dev_priv = dev->dev_private; 3299 struct i915_drrs *drrs = &dev_priv->drrs; 3300 int vrefresh = 0; 3301 3302 for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) { 3303 /* Encoder connected on this CRTC */ 3304 switch (intel_encoder->type) { 3305 case INTEL_OUTPUT_EDP: 3306 seq_puts(m, "eDP:\n"); 3307 break; 3308 case INTEL_OUTPUT_DSI: 3309 seq_puts(m, "DSI:\n"); 3310 break; 3311 case INTEL_OUTPUT_HDMI: 3312 seq_puts(m, "HDMI:\n"); 3313 break; 3314 case INTEL_OUTPUT_DISPLAYPORT: 3315 seq_puts(m, "DP:\n"); 3316 break; 3317 default: 3318 seq_printf(m, "Other encoder (id=%d).\n", 3319 intel_encoder->type); 3320 return; 3321 } 3322 } 3323 3324 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) 3325 seq_puts(m, "\tVBT: DRRS_type: Static"); 3326 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT) 3327 seq_puts(m, "\tVBT: DRRS_type: Seamless"); 3328 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED) 3329 seq_puts(m, "\tVBT: DRRS_type: None"); 3330 else 3331 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value"); 3332 3333 seq_puts(m, "\n\n"); 3334 3335 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { 3336 struct intel_panel *panel; 3337 3338 mutex_lock(&drrs->mutex); 3339 /* DRRS Supported */ 3340 seq_puts(m, "\tDRRS Supported: Yes\n"); 3341 3342 /* disable_drrs() will make drrs->dp NULL */ 3343 if (!drrs->dp) { 3344 seq_puts(m, "Idleness DRRS: Disabled"); 3345 mutex_unlock(&drrs->mutex); 3346 return; 3347 } 3348 3349 panel = &drrs->dp->attached_connector->panel; 3350 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", 3351 drrs->busy_frontbuffer_bits); 3352 3353 seq_puts(m, "\n\t\t"); 3354 if (drrs->refresh_rate_type == DRRS_HIGH_RR) { 3355 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); 3356 vrefresh = panel->fixed_mode->vrefresh; 3357 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { 3358 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); 3359 vrefresh = panel->downclock_mode->vrefresh; 3360 } else { 3361 seq_printf(m, "DRRS_State: Unknown(%d)\n", 3362 drrs->refresh_rate_type); 3363 mutex_unlock(&drrs->mutex); 3364 return; 3365 } 3366 seq_printf(m, "\t\tVrefresh: %d", vrefresh); 3367 3368 seq_puts(m, "\n\t\t"); 3369 mutex_unlock(&drrs->mutex); 3370 } else { 3371 /* DRRS not supported. Print the VBT parameter*/ 3372 seq_puts(m, "\tDRRS Supported : No"); 3373 } 3374 seq_puts(m, "\n"); 3375 } 3376 3377 static int i915_drrs_status(struct seq_file *m, void *unused) 3378 { 3379 struct drm_info_node *node = m->private; 3380 struct drm_device *dev = node->minor->dev; 3381 struct intel_crtc *intel_crtc; 3382 int active_crtc_cnt = 0; 3383 3384 for_each_intel_crtc(dev, intel_crtc) { 3385 drm_modeset_lock(&intel_crtc->base.mutex, NULL); 3386 3387 if (intel_crtc->base.state->active) { 3388 active_crtc_cnt++; 3389 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 3390 3391 drrs_status_per_crtc(m, dev, intel_crtc); 3392 } 3393 3394 drm_modeset_unlock(&intel_crtc->base.mutex); 3395 } 3396 3397 if (!active_crtc_cnt) 3398 seq_puts(m, "No active crtc found\n"); 3399 3400 return 0; 3401 } 3402 3403 struct pipe_crc_info { 3404 const char *name; 3405 struct drm_device *dev; 3406 enum pipe pipe; 3407 }; 3408 3409 static int i915_dp_mst_info(struct seq_file *m, void *unused) 3410 { 3411 struct drm_info_node *node = (struct drm_info_node *) m->private; 3412 struct drm_device *dev = node->minor->dev; 3413 struct drm_encoder *encoder; 3414 struct intel_encoder *intel_encoder; 3415 struct intel_digital_port *intel_dig_port; 3416 drm_modeset_lock_all(dev); 3417 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3418 intel_encoder = to_intel_encoder(encoder); 3419 if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT) 3420 continue; 3421 intel_dig_port = enc_to_dig_port(encoder); 3422 if (!intel_dig_port->dp.can_mst) 3423 continue; 3424 3425 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 3426 } 3427 drm_modeset_unlock_all(dev); 3428 return 0; 3429 } 3430 3431 static int i915_pipe_crc_open(struct inode *inode, struct file *filep) 3432 { 3433 struct pipe_crc_info *info = inode->i_private; 3434 struct drm_i915_private *dev_priv = info->dev->dev_private; 3435 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3436 3437 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes) 3438 return -ENODEV; 3439 3440 spin_lock_irq(&pipe_crc->lock); 3441 3442 if (pipe_crc->opened) { 3443 spin_unlock_irq(&pipe_crc->lock); 3444 return -EBUSY; /* already open */ 3445 } 3446 3447 pipe_crc->opened = true; 3448 filep->private_data = inode->i_private; 3449 3450 spin_unlock_irq(&pipe_crc->lock); 3451 3452 return 0; 3453 } 3454 3455 static int i915_pipe_crc_release(struct inode *inode, struct file *filep) 3456 { 3457 struct pipe_crc_info *info = inode->i_private; 3458 struct drm_i915_private *dev_priv = info->dev->dev_private; 3459 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3460 3461 spin_lock_irq(&pipe_crc->lock); 3462 pipe_crc->opened = false; 3463 spin_unlock_irq(&pipe_crc->lock); 3464 3465 return 0; 3466 } 3467 3468 /* (6 fields, 8 chars each, space separated (5) + '\n') */ 3469 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) 3470 /* account for \'0' */ 3471 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) 3472 3473 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) 3474 { 3475 assert_spin_locked(&pipe_crc->lock); 3476 return CIRC_CNT(pipe_crc->head, pipe_crc->tail, 3477 INTEL_PIPE_CRC_ENTRIES_NR); 3478 } 3479 3480 static ssize_t 3481 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, 3482 loff_t *pos) 3483 { 3484 struct pipe_crc_info *info = filep->private_data; 3485 struct drm_device *dev = info->dev; 3486 struct drm_i915_private *dev_priv = dev->dev_private; 3487 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3488 char buf[PIPE_CRC_BUFFER_LEN]; 3489 int n_entries; 3490 ssize_t bytes_read; 3491 3492 /* 3493 * Don't allow user space to provide buffers not big enough to hold 3494 * a line of data. 3495 */ 3496 if (count < PIPE_CRC_LINE_LEN) 3497 return -EINVAL; 3498 3499 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) 3500 return 0; 3501 3502 /* nothing to read */ 3503 spin_lock_irq(&pipe_crc->lock); 3504 while (pipe_crc_data_count(pipe_crc) == 0) { 3505 int ret; 3506 3507 if (filep->f_flags & O_NONBLOCK) { 3508 spin_unlock_irq(&pipe_crc->lock); 3509 return -EAGAIN; 3510 } 3511 3512 ret = wait_event_interruptible_lock_irq(pipe_crc->wq, 3513 pipe_crc_data_count(pipe_crc), pipe_crc->lock); 3514 if (ret) { 3515 spin_unlock_irq(&pipe_crc->lock); 3516 return ret; 3517 } 3518 } 3519 3520 /* We now have one or more entries to read */ 3521 n_entries = count / PIPE_CRC_LINE_LEN; 3522 3523 bytes_read = 0; 3524 while (n_entries > 0) { 3525 struct intel_pipe_crc_entry *entry = 3526 &pipe_crc->entries[pipe_crc->tail]; 3527 int ret; 3528 3529 if (CIRC_CNT(pipe_crc->head, pipe_crc->tail, 3530 INTEL_PIPE_CRC_ENTRIES_NR) < 1) 3531 break; 3532 3533 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); 3534 pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 3535 3536 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, 3537 "%8u %8x %8x %8x %8x %8x\n", 3538 entry->frame, entry->crc[0], 3539 entry->crc[1], entry->crc[2], 3540 entry->crc[3], entry->crc[4]); 3541 3542 spin_unlock_irq(&pipe_crc->lock); 3543 3544 ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN); 3545 if (ret == PIPE_CRC_LINE_LEN) 3546 return -EFAULT; 3547 3548 user_buf += PIPE_CRC_LINE_LEN; 3549 n_entries--; 3550 3551 spin_lock_irq(&pipe_crc->lock); 3552 } 3553 3554 spin_unlock_irq(&pipe_crc->lock); 3555 3556 return bytes_read; 3557 } 3558 3559 static const struct file_operations i915_pipe_crc_fops = { 3560 .owner = THIS_MODULE, 3561 .open = i915_pipe_crc_open, 3562 .read = i915_pipe_crc_read, 3563 .release = i915_pipe_crc_release, 3564 }; 3565 3566 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { 3567 { 3568 .name = "i915_pipe_A_crc", 3569 .pipe = PIPE_A, 3570 }, 3571 { 3572 .name = "i915_pipe_B_crc", 3573 .pipe = PIPE_B, 3574 }, 3575 { 3576 .name = "i915_pipe_C_crc", 3577 .pipe = PIPE_C, 3578 }, 3579 }; 3580 3581 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, 3582 enum pipe pipe) 3583 { 3584 struct drm_device *dev = minor->dev; 3585 struct dentry *ent; 3586 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; 3587 3588 info->dev = dev; 3589 ent = debugfs_create_file(info->name, S_IRUGO, root, info, 3590 &i915_pipe_crc_fops); 3591 if (!ent) 3592 return -ENOMEM; 3593 3594 return drm_add_fake_info_node(minor, ent, info); 3595 } 3596 3597 static const char * const pipe_crc_sources[] = { 3598 "none", 3599 "plane1", 3600 "plane2", 3601 "pf", 3602 "pipe", 3603 "TV", 3604 "DP-B", 3605 "DP-C", 3606 "DP-D", 3607 "auto", 3608 }; 3609 3610 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) 3611 { 3612 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); 3613 return pipe_crc_sources[source]; 3614 } 3615 3616 static int display_crc_ctl_show(struct seq_file *m, void *data) 3617 { 3618 struct drm_device *dev = m->private; 3619 struct drm_i915_private *dev_priv = dev->dev_private; 3620 int i; 3621 3622 for (i = 0; i < I915_MAX_PIPES; i++) 3623 seq_printf(m, "%c %s\n", pipe_name(i), 3624 pipe_crc_source_name(dev_priv->pipe_crc[i].source)); 3625 3626 return 0; 3627 } 3628 3629 static int display_crc_ctl_open(struct inode *inode, struct file *file) 3630 { 3631 struct drm_device *dev = inode->i_private; 3632 3633 return single_open(file, display_crc_ctl_show, dev); 3634 } 3635 3636 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 3637 uint32_t *val) 3638 { 3639 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3640 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3641 3642 switch (*source) { 3643 case INTEL_PIPE_CRC_SOURCE_PIPE: 3644 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; 3645 break; 3646 case INTEL_PIPE_CRC_SOURCE_NONE: 3647 *val = 0; 3648 break; 3649 default: 3650 return -EINVAL; 3651 } 3652 3653 return 0; 3654 } 3655 3656 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe, 3657 enum intel_pipe_crc_source *source) 3658 { 3659 struct intel_encoder *encoder; 3660 struct intel_crtc *crtc; 3661 struct intel_digital_port *dig_port; 3662 int ret = 0; 3663 3664 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3665 3666 drm_modeset_lock_all(dev); 3667 for_each_intel_encoder(dev, encoder) { 3668 if (!encoder->base.crtc) 3669 continue; 3670 3671 crtc = to_intel_crtc(encoder->base.crtc); 3672 3673 if (crtc->pipe != pipe) 3674 continue; 3675 3676 switch (encoder->type) { 3677 case INTEL_OUTPUT_TVOUT: 3678 *source = INTEL_PIPE_CRC_SOURCE_TV; 3679 break; 3680 case INTEL_OUTPUT_DISPLAYPORT: 3681 case INTEL_OUTPUT_EDP: 3682 dig_port = enc_to_dig_port(&encoder->base); 3683 switch (dig_port->port) { 3684 case PORT_B: 3685 *source = INTEL_PIPE_CRC_SOURCE_DP_B; 3686 break; 3687 case PORT_C: 3688 *source = INTEL_PIPE_CRC_SOURCE_DP_C; 3689 break; 3690 case PORT_D: 3691 *source = INTEL_PIPE_CRC_SOURCE_DP_D; 3692 break; 3693 default: 3694 WARN(1, "nonexisting DP port %c\n", 3695 port_name(dig_port->port)); 3696 break; 3697 } 3698 break; 3699 default: 3700 break; 3701 } 3702 } 3703 drm_modeset_unlock_all(dev); 3704 3705 return ret; 3706 } 3707 3708 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, 3709 enum pipe pipe, 3710 enum intel_pipe_crc_source *source, 3711 uint32_t *val) 3712 { 3713 struct drm_i915_private *dev_priv = dev->dev_private; 3714 bool need_stable_symbols = false; 3715 3716 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3717 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 3718 if (ret) 3719 return ret; 3720 } 3721 3722 switch (*source) { 3723 case INTEL_PIPE_CRC_SOURCE_PIPE: 3724 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; 3725 break; 3726 case INTEL_PIPE_CRC_SOURCE_DP_B: 3727 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; 3728 need_stable_symbols = true; 3729 break; 3730 case INTEL_PIPE_CRC_SOURCE_DP_C: 3731 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; 3732 need_stable_symbols = true; 3733 break; 3734 case INTEL_PIPE_CRC_SOURCE_DP_D: 3735 if (!IS_CHERRYVIEW(dev)) 3736 return -EINVAL; 3737 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV; 3738 need_stable_symbols = true; 3739 break; 3740 case INTEL_PIPE_CRC_SOURCE_NONE: 3741 *val = 0; 3742 break; 3743 default: 3744 return -EINVAL; 3745 } 3746 3747 /* 3748 * When the pipe CRC tap point is after the transcoders we need 3749 * to tweak symbol-level features to produce a deterministic series of 3750 * symbols for a given frame. We need to reset those features only once 3751 * a frame (instead of every nth symbol): 3752 * - DC-balance: used to ensure a better clock recovery from the data 3753 * link (SDVO) 3754 * - DisplayPort scrambling: used for EMI reduction 3755 */ 3756 if (need_stable_symbols) { 3757 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3758 3759 tmp |= DC_BALANCE_RESET_VLV; 3760 switch (pipe) { 3761 case PIPE_A: 3762 tmp |= PIPE_A_SCRAMBLE_RESET; 3763 break; 3764 case PIPE_B: 3765 tmp |= PIPE_B_SCRAMBLE_RESET; 3766 break; 3767 case PIPE_C: 3768 tmp |= PIPE_C_SCRAMBLE_RESET; 3769 break; 3770 default: 3771 return -EINVAL; 3772 } 3773 I915_WRITE(PORT_DFT2_G4X, tmp); 3774 } 3775 3776 return 0; 3777 } 3778 3779 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, 3780 enum pipe pipe, 3781 enum intel_pipe_crc_source *source, 3782 uint32_t *val) 3783 { 3784 struct drm_i915_private *dev_priv = dev->dev_private; 3785 bool need_stable_symbols = false; 3786 3787 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3788 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 3789 if (ret) 3790 return ret; 3791 } 3792 3793 switch (*source) { 3794 case INTEL_PIPE_CRC_SOURCE_PIPE: 3795 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; 3796 break; 3797 case INTEL_PIPE_CRC_SOURCE_TV: 3798 if (!SUPPORTS_TV(dev)) 3799 return -EINVAL; 3800 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; 3801 break; 3802 case INTEL_PIPE_CRC_SOURCE_DP_B: 3803 if (!IS_G4X(dev)) 3804 return -EINVAL; 3805 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; 3806 need_stable_symbols = true; 3807 break; 3808 case INTEL_PIPE_CRC_SOURCE_DP_C: 3809 if (!IS_G4X(dev)) 3810 return -EINVAL; 3811 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; 3812 need_stable_symbols = true; 3813 break; 3814 case INTEL_PIPE_CRC_SOURCE_DP_D: 3815 if (!IS_G4X(dev)) 3816 return -EINVAL; 3817 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; 3818 need_stable_symbols = true; 3819 break; 3820 case INTEL_PIPE_CRC_SOURCE_NONE: 3821 *val = 0; 3822 break; 3823 default: 3824 return -EINVAL; 3825 } 3826 3827 /* 3828 * When the pipe CRC tap point is after the transcoders we need 3829 * to tweak symbol-level features to produce a deterministic series of 3830 * symbols for a given frame. We need to reset those features only once 3831 * a frame (instead of every nth symbol): 3832 * - DC-balance: used to ensure a better clock recovery from the data 3833 * link (SDVO) 3834 * - DisplayPort scrambling: used for EMI reduction 3835 */ 3836 if (need_stable_symbols) { 3837 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3838 3839 WARN_ON(!IS_G4X(dev)); 3840 3841 I915_WRITE(PORT_DFT_I9XX, 3842 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); 3843 3844 if (pipe == PIPE_A) 3845 tmp |= PIPE_A_SCRAMBLE_RESET; 3846 else 3847 tmp |= PIPE_B_SCRAMBLE_RESET; 3848 3849 I915_WRITE(PORT_DFT2_G4X, tmp); 3850 } 3851 3852 return 0; 3853 } 3854 3855 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, 3856 enum pipe pipe) 3857 { 3858 struct drm_i915_private *dev_priv = dev->dev_private; 3859 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3860 3861 switch (pipe) { 3862 case PIPE_A: 3863 tmp &= ~PIPE_A_SCRAMBLE_RESET; 3864 break; 3865 case PIPE_B: 3866 tmp &= ~PIPE_B_SCRAMBLE_RESET; 3867 break; 3868 case PIPE_C: 3869 tmp &= ~PIPE_C_SCRAMBLE_RESET; 3870 break; 3871 default: 3872 return; 3873 } 3874 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) 3875 tmp &= ~DC_BALANCE_RESET_VLV; 3876 I915_WRITE(PORT_DFT2_G4X, tmp); 3877 3878 } 3879 3880 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, 3881 enum pipe pipe) 3882 { 3883 struct drm_i915_private *dev_priv = dev->dev_private; 3884 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3885 3886 if (pipe == PIPE_A) 3887 tmp &= ~PIPE_A_SCRAMBLE_RESET; 3888 else 3889 tmp &= ~PIPE_B_SCRAMBLE_RESET; 3890 I915_WRITE(PORT_DFT2_G4X, tmp); 3891 3892 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { 3893 I915_WRITE(PORT_DFT_I9XX, 3894 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); 3895 } 3896 } 3897 3898 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 3899 uint32_t *val) 3900 { 3901 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3902 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3903 3904 switch (*source) { 3905 case INTEL_PIPE_CRC_SOURCE_PLANE1: 3906 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; 3907 break; 3908 case INTEL_PIPE_CRC_SOURCE_PLANE2: 3909 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; 3910 break; 3911 case INTEL_PIPE_CRC_SOURCE_PIPE: 3912 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; 3913 break; 3914 case INTEL_PIPE_CRC_SOURCE_NONE: 3915 *val = 0; 3916 break; 3917 default: 3918 return -EINVAL; 3919 } 3920 3921 return 0; 3922 } 3923 3924 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable) 3925 { 3926 struct drm_i915_private *dev_priv = dev->dev_private; 3927 struct intel_crtc *crtc = 3928 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); 3929 struct intel_crtc_state *pipe_config; 3930 struct drm_atomic_state *state; 3931 int ret = 0; 3932 3933 drm_modeset_lock_all(dev); 3934 state = drm_atomic_state_alloc(dev); 3935 if (!state) { 3936 ret = -ENOMEM; 3937 goto out; 3938 } 3939 3940 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base); 3941 pipe_config = intel_atomic_get_crtc_state(state, crtc); 3942 if (IS_ERR(pipe_config)) { 3943 ret = PTR_ERR(pipe_config); 3944 goto out; 3945 } 3946 3947 pipe_config->pch_pfit.force_thru = enable; 3948 if (pipe_config->cpu_transcoder == TRANSCODER_EDP && 3949 pipe_config->pch_pfit.enabled != enable) 3950 pipe_config->base.connectors_changed = true; 3951 3952 ret = drm_atomic_commit(state); 3953 out: 3954 drm_modeset_unlock_all(dev); 3955 WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret); 3956 if (ret) 3957 drm_atomic_state_free(state); 3958 } 3959 3960 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev, 3961 enum pipe pipe, 3962 enum intel_pipe_crc_source *source, 3963 uint32_t *val) 3964 { 3965 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3966 *source = INTEL_PIPE_CRC_SOURCE_PF; 3967 3968 switch (*source) { 3969 case INTEL_PIPE_CRC_SOURCE_PLANE1: 3970 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; 3971 break; 3972 case INTEL_PIPE_CRC_SOURCE_PLANE2: 3973 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; 3974 break; 3975 case INTEL_PIPE_CRC_SOURCE_PF: 3976 if (IS_HASWELL(dev) && pipe == PIPE_A) 3977 hsw_trans_edp_pipe_A_crc_wa(dev, true); 3978 3979 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; 3980 break; 3981 case INTEL_PIPE_CRC_SOURCE_NONE: 3982 *val = 0; 3983 break; 3984 default: 3985 return -EINVAL; 3986 } 3987 3988 return 0; 3989 } 3990 3991 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, 3992 enum intel_pipe_crc_source source) 3993 { 3994 struct drm_i915_private *dev_priv = dev->dev_private; 3995 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 3996 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, 3997 pipe)); 3998 u32 val = 0; /* shut up gcc */ 3999 int ret; 4000 4001 if (pipe_crc->source == source) 4002 return 0; 4003 4004 /* forbid changing the source without going back to 'none' */ 4005 if (pipe_crc->source && source) 4006 return -EINVAL; 4007 4008 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) { 4009 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); 4010 return -EIO; 4011 } 4012 4013 if (IS_GEN2(dev)) 4014 ret = i8xx_pipe_crc_ctl_reg(&source, &val); 4015 else if (INTEL_INFO(dev)->gen < 5) 4016 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); 4017 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4018 ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val); 4019 else if (IS_GEN5(dev) || IS_GEN6(dev)) 4020 ret = ilk_pipe_crc_ctl_reg(&source, &val); 4021 else 4022 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val); 4023 4024 if (ret != 0) 4025 return ret; 4026 4027 /* none -> real source transition */ 4028 if (source) { 4029 struct intel_pipe_crc_entry *entries; 4030 4031 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", 4032 pipe_name(pipe), pipe_crc_source_name(source)); 4033 4034 entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, 4035 sizeof(pipe_crc->entries[0]), 4036 GFP_KERNEL); 4037 if (!entries) 4038 return -ENOMEM; 4039 4040 /* 4041 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 4042 * enabled and disabled dynamically based on package C states, 4043 * user space can't make reliable use of the CRCs, so let's just 4044 * completely disable it. 4045 */ 4046 hsw_disable_ips(crtc); 4047 4048 spin_lock_irq(&pipe_crc->lock); 4049 kfree(pipe_crc->entries); 4050 pipe_crc->entries = entries; 4051 pipe_crc->head = 0; 4052 pipe_crc->tail = 0; 4053 spin_unlock_irq(&pipe_crc->lock); 4054 } 4055 4056 pipe_crc->source = source; 4057 4058 I915_WRITE(PIPE_CRC_CTL(pipe), val); 4059 POSTING_READ(PIPE_CRC_CTL(pipe)); 4060 4061 /* real source -> none transition */ 4062 if (source == INTEL_PIPE_CRC_SOURCE_NONE) { 4063 struct intel_pipe_crc_entry *entries; 4064 struct intel_crtc *crtc = 4065 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 4066 4067 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", 4068 pipe_name(pipe)); 4069 4070 drm_modeset_lock(&crtc->base.mutex, NULL); 4071 if (crtc->base.state->active) 4072 intel_wait_for_vblank(dev, pipe); 4073 drm_modeset_unlock(&crtc->base.mutex); 4074 4075 spin_lock_irq(&pipe_crc->lock); 4076 entries = pipe_crc->entries; 4077 pipe_crc->entries = NULL; 4078 pipe_crc->head = 0; 4079 pipe_crc->tail = 0; 4080 spin_unlock_irq(&pipe_crc->lock); 4081 4082 kfree(entries); 4083 4084 if (IS_G4X(dev)) 4085 g4x_undo_pipe_scramble_reset(dev, pipe); 4086 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4087 vlv_undo_pipe_scramble_reset(dev, pipe); 4088 else if (IS_HASWELL(dev) && pipe == PIPE_A) 4089 hsw_trans_edp_pipe_A_crc_wa(dev, false); 4090 4091 hsw_enable_ips(crtc); 4092 } 4093 4094 return 0; 4095 } 4096 4097 /* 4098 * Parse pipe CRC command strings: 4099 * command: wsp* object wsp+ name wsp+ source wsp* 4100 * object: 'pipe' 4101 * name: (A | B | C) 4102 * source: (none | plane1 | plane2 | pf) 4103 * wsp: (#0x20 | #0x9 | #0xA)+ 4104 * 4105 * eg.: 4106 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A 4107 * "pipe A none" -> Stop CRC 4108 */ 4109 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) 4110 { 4111 int n_words = 0; 4112 4113 while (*buf) { 4114 char *end; 4115 4116 /* skip leading white space */ 4117 buf = skip_spaces(buf); 4118 if (!*buf) 4119 break; /* end of buffer */ 4120 4121 /* find end of word */ 4122 for (end = buf; *end && !isspace(*end); end++) 4123 ; 4124 4125 if (n_words == max_words) { 4126 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", 4127 max_words); 4128 return -EINVAL; /* ran out of words[] before bytes */ 4129 } 4130 4131 if (*end) 4132 *end++ = '\0'; 4133 words[n_words++] = buf; 4134 buf = end; 4135 } 4136 4137 return n_words; 4138 } 4139 4140 enum intel_pipe_crc_object { 4141 PIPE_CRC_OBJECT_PIPE, 4142 }; 4143 4144 static const char * const pipe_crc_objects[] = { 4145 "pipe", 4146 }; 4147 4148 static int 4149 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) 4150 { 4151 int i; 4152 4153 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) 4154 if (!strcmp(buf, pipe_crc_objects[i])) { 4155 *o = i; 4156 return 0; 4157 } 4158 4159 return -EINVAL; 4160 } 4161 4162 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) 4163 { 4164 const char name = buf[0]; 4165 4166 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) 4167 return -EINVAL; 4168 4169 *pipe = name - 'A'; 4170 4171 return 0; 4172 } 4173 4174 static int 4175 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) 4176 { 4177 int i; 4178 4179 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) 4180 if (!strcmp(buf, pipe_crc_sources[i])) { 4181 *s = i; 4182 return 0; 4183 } 4184 4185 return -EINVAL; 4186 } 4187 4188 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len) 4189 { 4190 #define N_WORDS 3 4191 int n_words; 4192 char *words[N_WORDS]; 4193 enum pipe pipe; 4194 enum intel_pipe_crc_object object; 4195 enum intel_pipe_crc_source source; 4196 4197 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); 4198 if (n_words != N_WORDS) { 4199 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", 4200 N_WORDS); 4201 return -EINVAL; 4202 } 4203 4204 if (display_crc_ctl_parse_object(words[0], &object) < 0) { 4205 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); 4206 return -EINVAL; 4207 } 4208 4209 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { 4210 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); 4211 return -EINVAL; 4212 } 4213 4214 if (display_crc_ctl_parse_source(words[2], &source) < 0) { 4215 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); 4216 return -EINVAL; 4217 } 4218 4219 return pipe_crc_set_source(dev, pipe, source); 4220 } 4221 4222 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, 4223 size_t len, loff_t *offp) 4224 { 4225 struct seq_file *m = file->private_data; 4226 struct drm_device *dev = m->private; 4227 char *tmpbuf; 4228 int ret; 4229 4230 if (len == 0) 4231 return 0; 4232 4233 if (len > PAGE_SIZE - 1) { 4234 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", 4235 PAGE_SIZE); 4236 return -E2BIG; 4237 } 4238 4239 tmpbuf = kmalloc(len + 1, GFP_KERNEL); 4240 if (!tmpbuf) 4241 return -ENOMEM; 4242 4243 if (copy_from_user(tmpbuf, ubuf, len)) { 4244 ret = -EFAULT; 4245 goto out; 4246 } 4247 tmpbuf[len] = '\0'; 4248 4249 ret = display_crc_ctl_parse(dev, tmpbuf, len); 4250 4251 out: 4252 kfree(tmpbuf); 4253 if (ret < 0) 4254 return ret; 4255 4256 *offp += len; 4257 return len; 4258 } 4259 4260 static const struct file_operations i915_display_crc_ctl_fops = { 4261 .owner = THIS_MODULE, 4262 .open = display_crc_ctl_open, 4263 .read = seq_read, 4264 .llseek = seq_lseek, 4265 .release = single_release, 4266 .write = display_crc_ctl_write 4267 }; 4268 4269 static ssize_t i915_displayport_test_active_write(struct file *file, 4270 const char __user *ubuf, 4271 size_t len, loff_t *offp) 4272 { 4273 char *input_buffer; 4274 int status = 0; 4275 struct drm_device *dev; 4276 struct drm_connector *connector; 4277 struct list_head *connector_list; 4278 struct intel_dp *intel_dp; 4279 int val = 0; 4280 4281 dev = ((struct seq_file *)file->private_data)->private; 4282 4283 connector_list = &dev->mode_config.connector_list; 4284 4285 if (len == 0) 4286 return 0; 4287 4288 input_buffer = kmalloc(len + 1, GFP_KERNEL); 4289 if (!input_buffer) 4290 return -ENOMEM; 4291 4292 if (copy_from_user(input_buffer, ubuf, len)) { 4293 status = -EFAULT; 4294 goto out; 4295 } 4296 4297 input_buffer[len] = '\0'; 4298 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); 4299 4300 list_for_each_entry(connector, connector_list, head) { 4301 4302 if (connector->connector_type != 4303 DRM_MODE_CONNECTOR_DisplayPort) 4304 continue; 4305 4306 if (connector->status == connector_status_connected && 4307 connector->encoder != NULL) { 4308 intel_dp = enc_to_intel_dp(connector->encoder); 4309 status = kstrtoint(input_buffer, 10, &val); 4310 if (status < 0) 4311 goto out; 4312 DRM_DEBUG_DRIVER("Got %d for test active\n", val); 4313 /* To prevent erroneous activation of the compliance 4314 * testing code, only accept an actual value of 1 here 4315 */ 4316 if (val == 1) 4317 intel_dp->compliance_test_active = 1; 4318 else 4319 intel_dp->compliance_test_active = 0; 4320 } 4321 } 4322 out: 4323 kfree(input_buffer); 4324 if (status < 0) 4325 return status; 4326 4327 *offp += len; 4328 return len; 4329 } 4330 4331 static int i915_displayport_test_active_show(struct seq_file *m, void *data) 4332 { 4333 struct drm_device *dev = m->private; 4334 struct drm_connector *connector; 4335 struct list_head *connector_list = &dev->mode_config.connector_list; 4336 struct intel_dp *intel_dp; 4337 4338 list_for_each_entry(connector, connector_list, head) { 4339 4340 if (connector->connector_type != 4341 DRM_MODE_CONNECTOR_DisplayPort) 4342 continue; 4343 4344 if (connector->status == connector_status_connected && 4345 connector->encoder != NULL) { 4346 intel_dp = enc_to_intel_dp(connector->encoder); 4347 if (intel_dp->compliance_test_active) 4348 seq_puts(m, "1"); 4349 else 4350 seq_puts(m, "0"); 4351 } else 4352 seq_puts(m, "0"); 4353 } 4354 4355 return 0; 4356 } 4357 4358 static int i915_displayport_test_active_open(struct inode *inode, 4359 struct file *file) 4360 { 4361 struct drm_device *dev = inode->i_private; 4362 4363 return single_open(file, i915_displayport_test_active_show, dev); 4364 } 4365 4366 static const struct file_operations i915_displayport_test_active_fops = { 4367 .owner = THIS_MODULE, 4368 .open = i915_displayport_test_active_open, 4369 .read = seq_read, 4370 .llseek = seq_lseek, 4371 .release = single_release, 4372 .write = i915_displayport_test_active_write 4373 }; 4374 4375 static int i915_displayport_test_data_show(struct seq_file *m, void *data) 4376 { 4377 struct drm_device *dev = m->private; 4378 struct drm_connector *connector; 4379 struct list_head *connector_list = &dev->mode_config.connector_list; 4380 struct intel_dp *intel_dp; 4381 4382 list_for_each_entry(connector, connector_list, head) { 4383 4384 if (connector->connector_type != 4385 DRM_MODE_CONNECTOR_DisplayPort) 4386 continue; 4387 4388 if (connector->status == connector_status_connected && 4389 connector->encoder != NULL) { 4390 intel_dp = enc_to_intel_dp(connector->encoder); 4391 seq_printf(m, "%lx", intel_dp->compliance_test_data); 4392 } else 4393 seq_puts(m, "0"); 4394 } 4395 4396 return 0; 4397 } 4398 static int i915_displayport_test_data_open(struct inode *inode, 4399 struct file *file) 4400 { 4401 struct drm_device *dev = inode->i_private; 4402 4403 return single_open(file, i915_displayport_test_data_show, dev); 4404 } 4405 4406 static const struct file_operations i915_displayport_test_data_fops = { 4407 .owner = THIS_MODULE, 4408 .open = i915_displayport_test_data_open, 4409 .read = seq_read, 4410 .llseek = seq_lseek, 4411 .release = single_release 4412 }; 4413 4414 static int i915_displayport_test_type_show(struct seq_file *m, void *data) 4415 { 4416 struct drm_device *dev = m->private; 4417 struct drm_connector *connector; 4418 struct list_head *connector_list = &dev->mode_config.connector_list; 4419 struct intel_dp *intel_dp; 4420 4421 list_for_each_entry(connector, connector_list, head) { 4422 4423 if (connector->connector_type != 4424 DRM_MODE_CONNECTOR_DisplayPort) 4425 continue; 4426 4427 if (connector->status == connector_status_connected && 4428 connector->encoder != NULL) { 4429 intel_dp = enc_to_intel_dp(connector->encoder); 4430 seq_printf(m, "%02lx", intel_dp->compliance_test_type); 4431 } else 4432 seq_puts(m, "0"); 4433 } 4434 4435 return 0; 4436 } 4437 4438 static int i915_displayport_test_type_open(struct inode *inode, 4439 struct file *file) 4440 { 4441 struct drm_device *dev = inode->i_private; 4442 4443 return single_open(file, i915_displayport_test_type_show, dev); 4444 } 4445 4446 static const struct file_operations i915_displayport_test_type_fops = { 4447 .owner = THIS_MODULE, 4448 .open = i915_displayport_test_type_open, 4449 .read = seq_read, 4450 .llseek = seq_lseek, 4451 .release = single_release 4452 }; 4453 4454 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) 4455 { 4456 struct drm_device *dev = m->private; 4457 int level; 4458 int num_levels; 4459 4460 if (IS_CHERRYVIEW(dev)) 4461 num_levels = 3; 4462 else if (IS_VALLEYVIEW(dev)) 4463 num_levels = 1; 4464 else 4465 num_levels = ilk_wm_max_level(dev) + 1; 4466 4467 drm_modeset_lock_all(dev); 4468 4469 for (level = 0; level < num_levels; level++) { 4470 unsigned int latency = wm[level]; 4471 4472 /* 4473 * - WM1+ latency values in 0.5us units 4474 * - latencies are in us on gen9/vlv/chv 4475 */ 4476 if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) || 4477 IS_CHERRYVIEW(dev)) 4478 latency *= 10; 4479 else if (level > 0) 4480 latency *= 5; 4481 4482 seq_printf(m, "WM%d %u (%u.%u usec)\n", 4483 level, wm[level], latency / 10, latency % 10); 4484 } 4485 4486 drm_modeset_unlock_all(dev); 4487 } 4488 4489 static int pri_wm_latency_show(struct seq_file *m, void *data) 4490 { 4491 struct drm_device *dev = m->private; 4492 struct drm_i915_private *dev_priv = dev->dev_private; 4493 const uint16_t *latencies; 4494 4495 if (INTEL_INFO(dev)->gen >= 9) 4496 latencies = dev_priv->wm.skl_latency; 4497 else 4498 latencies = to_i915(dev)->wm.pri_latency; 4499 4500 wm_latency_show(m, latencies); 4501 4502 return 0; 4503 } 4504 4505 static int spr_wm_latency_show(struct seq_file *m, void *data) 4506 { 4507 struct drm_device *dev = m->private; 4508 struct drm_i915_private *dev_priv = dev->dev_private; 4509 const uint16_t *latencies; 4510 4511 if (INTEL_INFO(dev)->gen >= 9) 4512 latencies = dev_priv->wm.skl_latency; 4513 else 4514 latencies = to_i915(dev)->wm.spr_latency; 4515 4516 wm_latency_show(m, latencies); 4517 4518 return 0; 4519 } 4520 4521 static int cur_wm_latency_show(struct seq_file *m, void *data) 4522 { 4523 struct drm_device *dev = m->private; 4524 struct drm_i915_private *dev_priv = dev->dev_private; 4525 const uint16_t *latencies; 4526 4527 if (INTEL_INFO(dev)->gen >= 9) 4528 latencies = dev_priv->wm.skl_latency; 4529 else 4530 latencies = to_i915(dev)->wm.cur_latency; 4531 4532 wm_latency_show(m, latencies); 4533 4534 return 0; 4535 } 4536 4537 static int pri_wm_latency_open(struct inode *inode, struct file *file) 4538 { 4539 struct drm_device *dev = inode->i_private; 4540 4541 if (INTEL_INFO(dev)->gen < 5) 4542 return -ENODEV; 4543 4544 return single_open(file, pri_wm_latency_show, dev); 4545 } 4546 4547 static int spr_wm_latency_open(struct inode *inode, struct file *file) 4548 { 4549 struct drm_device *dev = inode->i_private; 4550 4551 if (HAS_GMCH_DISPLAY(dev)) 4552 return -ENODEV; 4553 4554 return single_open(file, spr_wm_latency_show, dev); 4555 } 4556 4557 static int cur_wm_latency_open(struct inode *inode, struct file *file) 4558 { 4559 struct drm_device *dev = inode->i_private; 4560 4561 if (HAS_GMCH_DISPLAY(dev)) 4562 return -ENODEV; 4563 4564 return single_open(file, cur_wm_latency_show, dev); 4565 } 4566 4567 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 4568 size_t len, loff_t *offp, uint16_t wm[8]) 4569 { 4570 struct seq_file *m = file->private_data; 4571 struct drm_device *dev = m->private; 4572 uint16_t new[8] = { 0 }; 4573 int num_levels; 4574 int level; 4575 int ret; 4576 char tmp[32]; 4577 4578 if (IS_CHERRYVIEW(dev)) 4579 num_levels = 3; 4580 else if (IS_VALLEYVIEW(dev)) 4581 num_levels = 1; 4582 else 4583 num_levels = ilk_wm_max_level(dev) + 1; 4584 4585 if (len >= sizeof(tmp)) 4586 return -EINVAL; 4587 4588 if (copy_from_user(tmp, ubuf, len)) 4589 return -EFAULT; 4590 4591 tmp[len] = '\0'; 4592 4593 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 4594 &new[0], &new[1], &new[2], &new[3], 4595 &new[4], &new[5], &new[6], &new[7]); 4596 if (ret != num_levels) 4597 return -EINVAL; 4598 4599 drm_modeset_lock_all(dev); 4600 4601 for (level = 0; level < num_levels; level++) 4602 wm[level] = new[level]; 4603 4604 drm_modeset_unlock_all(dev); 4605 4606 return len; 4607 } 4608 4609 4610 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 4611 size_t len, loff_t *offp) 4612 { 4613 struct seq_file *m = file->private_data; 4614 struct drm_device *dev = m->private; 4615 struct drm_i915_private *dev_priv = dev->dev_private; 4616 uint16_t *latencies; 4617 4618 if (INTEL_INFO(dev)->gen >= 9) 4619 latencies = dev_priv->wm.skl_latency; 4620 else 4621 latencies = to_i915(dev)->wm.pri_latency; 4622 4623 return wm_latency_write(file, ubuf, len, offp, latencies); 4624 } 4625 4626 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 4627 size_t len, loff_t *offp) 4628 { 4629 struct seq_file *m = file->private_data; 4630 struct drm_device *dev = m->private; 4631 struct drm_i915_private *dev_priv = dev->dev_private; 4632 uint16_t *latencies; 4633 4634 if (INTEL_INFO(dev)->gen >= 9) 4635 latencies = dev_priv->wm.skl_latency; 4636 else 4637 latencies = to_i915(dev)->wm.spr_latency; 4638 4639 return wm_latency_write(file, ubuf, len, offp, latencies); 4640 } 4641 4642 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 4643 size_t len, loff_t *offp) 4644 { 4645 struct seq_file *m = file->private_data; 4646 struct drm_device *dev = m->private; 4647 struct drm_i915_private *dev_priv = dev->dev_private; 4648 uint16_t *latencies; 4649 4650 if (INTEL_INFO(dev)->gen >= 9) 4651 latencies = dev_priv->wm.skl_latency; 4652 else 4653 latencies = to_i915(dev)->wm.cur_latency; 4654 4655 return wm_latency_write(file, ubuf, len, offp, latencies); 4656 } 4657 4658 static const struct file_operations i915_pri_wm_latency_fops = { 4659 .owner = THIS_MODULE, 4660 .open = pri_wm_latency_open, 4661 .read = seq_read, 4662 .llseek = seq_lseek, 4663 .release = single_release, 4664 .write = pri_wm_latency_write 4665 }; 4666 4667 static const struct file_operations i915_spr_wm_latency_fops = { 4668 .owner = THIS_MODULE, 4669 .open = spr_wm_latency_open, 4670 .read = seq_read, 4671 .llseek = seq_lseek, 4672 .release = single_release, 4673 .write = spr_wm_latency_write 4674 }; 4675 4676 static const struct file_operations i915_cur_wm_latency_fops = { 4677 .owner = THIS_MODULE, 4678 .open = cur_wm_latency_open, 4679 .read = seq_read, 4680 .llseek = seq_lseek, 4681 .release = single_release, 4682 .write = cur_wm_latency_write 4683 }; 4684 4685 static int 4686 i915_wedged_get(void *data, u64 *val) 4687 { 4688 struct drm_device *dev = data; 4689 struct drm_i915_private *dev_priv = dev->dev_private; 4690 4691 *val = atomic_read(&dev_priv->gpu_error.reset_counter); 4692 4693 return 0; 4694 } 4695 4696 static int 4697 i915_wedged_set(void *data, u64 val) 4698 { 4699 struct drm_device *dev = data; 4700 struct drm_i915_private *dev_priv = dev->dev_private; 4701 4702 /* 4703 * There is no safeguard against this debugfs entry colliding 4704 * with the hangcheck calling same i915_handle_error() in 4705 * parallel, causing an explosion. For now we assume that the 4706 * test harness is responsible enough not to inject gpu hangs 4707 * while it is writing to 'i915_wedged' 4708 */ 4709 4710 if (i915_reset_in_progress(&dev_priv->gpu_error)) 4711 return -EAGAIN; 4712 4713 intel_runtime_pm_get(dev_priv); 4714 4715 i915_handle_error(dev, val, 4716 "Manually setting wedged to %llu", val); 4717 4718 intel_runtime_pm_put(dev_priv); 4719 4720 return 0; 4721 } 4722 4723 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 4724 i915_wedged_get, i915_wedged_set, 4725 "%llu\n"); 4726 4727 static int 4728 i915_ring_stop_get(void *data, u64 *val) 4729 { 4730 struct drm_device *dev = data; 4731 struct drm_i915_private *dev_priv = dev->dev_private; 4732 4733 *val = dev_priv->gpu_error.stop_rings; 4734 4735 return 0; 4736 } 4737 4738 static int 4739 i915_ring_stop_set(void *data, u64 val) 4740 { 4741 struct drm_device *dev = data; 4742 struct drm_i915_private *dev_priv = dev->dev_private; 4743 int ret; 4744 4745 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); 4746 4747 ret = mutex_lock_interruptible(&dev->struct_mutex); 4748 if (ret) 4749 return ret; 4750 4751 dev_priv->gpu_error.stop_rings = val; 4752 mutex_unlock(&dev->struct_mutex); 4753 4754 return 0; 4755 } 4756 4757 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, 4758 i915_ring_stop_get, i915_ring_stop_set, 4759 "0x%08llx\n"); 4760 4761 static int 4762 i915_ring_missed_irq_get(void *data, u64 *val) 4763 { 4764 struct drm_device *dev = data; 4765 struct drm_i915_private *dev_priv = dev->dev_private; 4766 4767 *val = dev_priv->gpu_error.missed_irq_rings; 4768 return 0; 4769 } 4770 4771 static int 4772 i915_ring_missed_irq_set(void *data, u64 val) 4773 { 4774 struct drm_device *dev = data; 4775 struct drm_i915_private *dev_priv = dev->dev_private; 4776 int ret; 4777 4778 /* Lock against concurrent debugfs callers */ 4779 ret = mutex_lock_interruptible(&dev->struct_mutex); 4780 if (ret) 4781 return ret; 4782 dev_priv->gpu_error.missed_irq_rings = val; 4783 mutex_unlock(&dev->struct_mutex); 4784 4785 return 0; 4786 } 4787 4788 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 4789 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 4790 "0x%08llx\n"); 4791 4792 static int 4793 i915_ring_test_irq_get(void *data, u64 *val) 4794 { 4795 struct drm_device *dev = data; 4796 struct drm_i915_private *dev_priv = dev->dev_private; 4797 4798 *val = dev_priv->gpu_error.test_irq_rings; 4799 4800 return 0; 4801 } 4802 4803 static int 4804 i915_ring_test_irq_set(void *data, u64 val) 4805 { 4806 struct drm_device *dev = data; 4807 struct drm_i915_private *dev_priv = dev->dev_private; 4808 int ret; 4809 4810 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 4811 4812 /* Lock against concurrent debugfs callers */ 4813 ret = mutex_lock_interruptible(&dev->struct_mutex); 4814 if (ret) 4815 return ret; 4816 4817 dev_priv->gpu_error.test_irq_rings = val; 4818 mutex_unlock(&dev->struct_mutex); 4819 4820 return 0; 4821 } 4822 4823 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 4824 i915_ring_test_irq_get, i915_ring_test_irq_set, 4825 "0x%08llx\n"); 4826 4827 #define DROP_UNBOUND 0x1 4828 #define DROP_BOUND 0x2 4829 #define DROP_RETIRE 0x4 4830 #define DROP_ACTIVE 0x8 4831 #define DROP_ALL (DROP_UNBOUND | \ 4832 DROP_BOUND | \ 4833 DROP_RETIRE | \ 4834 DROP_ACTIVE) 4835 static int 4836 i915_drop_caches_get(void *data, u64 *val) 4837 { 4838 *val = DROP_ALL; 4839 4840 return 0; 4841 } 4842 4843 static int 4844 i915_drop_caches_set(void *data, u64 val) 4845 { 4846 struct drm_device *dev = data; 4847 struct drm_i915_private *dev_priv = dev->dev_private; 4848 int ret; 4849 4850 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 4851 4852 /* No need to check and wait for gpu resets, only libdrm auto-restarts 4853 * on ioctls on -EAGAIN. */ 4854 ret = mutex_lock_interruptible(&dev->struct_mutex); 4855 if (ret) 4856 return ret; 4857 4858 if (val & DROP_ACTIVE) { 4859 ret = i915_gpu_idle(dev); 4860 if (ret) 4861 goto unlock; 4862 } 4863 4864 if (val & (DROP_RETIRE | DROP_ACTIVE)) 4865 i915_gem_retire_requests(dev); 4866 4867 if (val & DROP_BOUND) 4868 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); 4869 4870 if (val & DROP_UNBOUND) 4871 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND); 4872 4873 unlock: 4874 mutex_unlock(&dev->struct_mutex); 4875 4876 return ret; 4877 } 4878 4879 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 4880 i915_drop_caches_get, i915_drop_caches_set, 4881 "0x%08llx\n"); 4882 4883 static int 4884 i915_max_freq_get(void *data, u64 *val) 4885 { 4886 struct drm_device *dev = data; 4887 struct drm_i915_private *dev_priv = dev->dev_private; 4888 int ret; 4889 4890 if (INTEL_INFO(dev)->gen < 6) 4891 return -ENODEV; 4892 4893 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4894 4895 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4896 if (ret) 4897 return ret; 4898 4899 *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); 4900 mutex_unlock(&dev_priv->rps.hw_lock); 4901 4902 return 0; 4903 } 4904 4905 static int 4906 i915_max_freq_set(void *data, u64 val) 4907 { 4908 struct drm_device *dev = data; 4909 struct drm_i915_private *dev_priv = dev->dev_private; 4910 u32 hw_max, hw_min; 4911 int ret; 4912 4913 if (INTEL_INFO(dev)->gen < 6) 4914 return -ENODEV; 4915 4916 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4917 4918 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 4919 4920 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4921 if (ret) 4922 return ret; 4923 4924 /* 4925 * Turbo will still be enabled, but won't go above the set value. 4926 */ 4927 val = intel_freq_opcode(dev_priv, val); 4928 4929 hw_max = dev_priv->rps.max_freq; 4930 hw_min = dev_priv->rps.min_freq; 4931 4932 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { 4933 mutex_unlock(&dev_priv->rps.hw_lock); 4934 return -EINVAL; 4935 } 4936 4937 dev_priv->rps.max_freq_softlimit = val; 4938 4939 intel_set_rps(dev, val); 4940 4941 mutex_unlock(&dev_priv->rps.hw_lock); 4942 4943 return 0; 4944 } 4945 4946 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 4947 i915_max_freq_get, i915_max_freq_set, 4948 "%llu\n"); 4949 4950 static int 4951 i915_min_freq_get(void *data, u64 *val) 4952 { 4953 struct drm_device *dev = data; 4954 struct drm_i915_private *dev_priv = dev->dev_private; 4955 int ret; 4956 4957 if (INTEL_INFO(dev)->gen < 6) 4958 return -ENODEV; 4959 4960 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4961 4962 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4963 if (ret) 4964 return ret; 4965 4966 *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); 4967 mutex_unlock(&dev_priv->rps.hw_lock); 4968 4969 return 0; 4970 } 4971 4972 static int 4973 i915_min_freq_set(void *data, u64 val) 4974 { 4975 struct drm_device *dev = data; 4976 struct drm_i915_private *dev_priv = dev->dev_private; 4977 u32 hw_max, hw_min; 4978 int ret; 4979 4980 if (INTEL_INFO(dev)->gen < 6) 4981 return -ENODEV; 4982 4983 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4984 4985 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 4986 4987 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4988 if (ret) 4989 return ret; 4990 4991 /* 4992 * Turbo will still be enabled, but won't go below the set value. 4993 */ 4994 val = intel_freq_opcode(dev_priv, val); 4995 4996 hw_max = dev_priv->rps.max_freq; 4997 hw_min = dev_priv->rps.min_freq; 4998 4999 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { 5000 mutex_unlock(&dev_priv->rps.hw_lock); 5001 return -EINVAL; 5002 } 5003 5004 dev_priv->rps.min_freq_softlimit = val; 5005 5006 intel_set_rps(dev, val); 5007 5008 mutex_unlock(&dev_priv->rps.hw_lock); 5009 5010 return 0; 5011 } 5012 5013 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 5014 i915_min_freq_get, i915_min_freq_set, 5015 "%llu\n"); 5016 5017 static int 5018 i915_cache_sharing_get(void *data, u64 *val) 5019 { 5020 struct drm_device *dev = data; 5021 struct drm_i915_private *dev_priv = dev->dev_private; 5022 u32 snpcr; 5023 int ret; 5024 5025 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 5026 return -ENODEV; 5027 5028 ret = mutex_lock_interruptible(&dev->struct_mutex); 5029 if (ret) 5030 return ret; 5031 intel_runtime_pm_get(dev_priv); 5032 5033 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 5034 5035 intel_runtime_pm_put(dev_priv); 5036 mutex_unlock(&dev_priv->dev->struct_mutex); 5037 5038 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 5039 5040 return 0; 5041 } 5042 5043 static int 5044 i915_cache_sharing_set(void *data, u64 val) 5045 { 5046 struct drm_device *dev = data; 5047 struct drm_i915_private *dev_priv = dev->dev_private; 5048 u32 snpcr; 5049 5050 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 5051 return -ENODEV; 5052 5053 if (val > 3) 5054 return -EINVAL; 5055 5056 intel_runtime_pm_get(dev_priv); 5057 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 5058 5059 /* Update the cache sharing policy here as well */ 5060 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 5061 snpcr &= ~GEN6_MBC_SNPCR_MASK; 5062 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 5063 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 5064 5065 intel_runtime_pm_put(dev_priv); 5066 return 0; 5067 } 5068 5069 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 5070 i915_cache_sharing_get, i915_cache_sharing_set, 5071 "%llu\n"); 5072 5073 struct sseu_dev_status { 5074 unsigned int slice_total; 5075 unsigned int subslice_total; 5076 unsigned int subslice_per_slice; 5077 unsigned int eu_total; 5078 unsigned int eu_per_subslice; 5079 }; 5080 5081 static void cherryview_sseu_device_status(struct drm_device *dev, 5082 struct sseu_dev_status *stat) 5083 { 5084 struct drm_i915_private *dev_priv = dev->dev_private; 5085 int ss_max = 2; 5086 int ss; 5087 u32 sig1[ss_max], sig2[ss_max]; 5088 5089 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); 5090 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); 5091 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); 5092 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); 5093 5094 for (ss = 0; ss < ss_max; ss++) { 5095 unsigned int eu_cnt; 5096 5097 if (sig1[ss] & CHV_SS_PG_ENABLE) 5098 /* skip disabled subslice */ 5099 continue; 5100 5101 stat->slice_total = 1; 5102 stat->subslice_per_slice++; 5103 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + 5104 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + 5105 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + 5106 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); 5107 stat->eu_total += eu_cnt; 5108 stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt); 5109 } 5110 stat->subslice_total = stat->subslice_per_slice; 5111 } 5112 5113 static void gen9_sseu_device_status(struct drm_device *dev, 5114 struct sseu_dev_status *stat) 5115 { 5116 struct drm_i915_private *dev_priv = dev->dev_private; 5117 int s_max = 3, ss_max = 4; 5118 int s, ss; 5119 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; 5120 5121 /* BXT has a single slice and at most 3 subslices. */ 5122 if (IS_BROXTON(dev)) { 5123 s_max = 1; 5124 ss_max = 3; 5125 } 5126 5127 for (s = 0; s < s_max; s++) { 5128 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s)); 5129 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s)); 5130 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s)); 5131 } 5132 5133 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | 5134 GEN9_PGCTL_SSA_EU19_ACK | 5135 GEN9_PGCTL_SSA_EU210_ACK | 5136 GEN9_PGCTL_SSA_EU311_ACK; 5137 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | 5138 GEN9_PGCTL_SSB_EU19_ACK | 5139 GEN9_PGCTL_SSB_EU210_ACK | 5140 GEN9_PGCTL_SSB_EU311_ACK; 5141 5142 for (s = 0; s < s_max; s++) { 5143 unsigned int ss_cnt = 0; 5144 5145 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) 5146 /* skip disabled slice */ 5147 continue; 5148 5149 stat->slice_total++; 5150 5151 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 5152 ss_cnt = INTEL_INFO(dev)->subslice_per_slice; 5153 5154 for (ss = 0; ss < ss_max; ss++) { 5155 unsigned int eu_cnt; 5156 5157 if (IS_BROXTON(dev) && 5158 !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) 5159 /* skip disabled subslice */ 5160 continue; 5161 5162 if (IS_BROXTON(dev)) 5163 ss_cnt++; 5164 5165 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & 5166 eu_mask[ss%2]); 5167 stat->eu_total += eu_cnt; 5168 stat->eu_per_subslice = max(stat->eu_per_subslice, 5169 eu_cnt); 5170 } 5171 5172 stat->subslice_total += ss_cnt; 5173 stat->subslice_per_slice = max(stat->subslice_per_slice, 5174 ss_cnt); 5175 } 5176 } 5177 5178 static void broadwell_sseu_device_status(struct drm_device *dev, 5179 struct sseu_dev_status *stat) 5180 { 5181 struct drm_i915_private *dev_priv = dev->dev_private; 5182 int s; 5183 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); 5184 5185 stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK); 5186 5187 if (stat->slice_total) { 5188 stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice; 5189 stat->subslice_total = stat->slice_total * 5190 stat->subslice_per_slice; 5191 stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice; 5192 stat->eu_total = stat->eu_per_subslice * stat->subslice_total; 5193 5194 /* subtract fused off EU(s) from enabled slice(s) */ 5195 for (s = 0; s < stat->slice_total; s++) { 5196 u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s]; 5197 5198 stat->eu_total -= hweight8(subslice_7eu); 5199 } 5200 } 5201 } 5202 5203 static int i915_sseu_status(struct seq_file *m, void *unused) 5204 { 5205 struct drm_info_node *node = (struct drm_info_node *) m->private; 5206 struct drm_device *dev = node->minor->dev; 5207 struct sseu_dev_status stat; 5208 5209 if (INTEL_INFO(dev)->gen < 8) 5210 return -ENODEV; 5211 5212 seq_puts(m, "SSEU Device Info\n"); 5213 seq_printf(m, " Available Slice Total: %u\n", 5214 INTEL_INFO(dev)->slice_total); 5215 seq_printf(m, " Available Subslice Total: %u\n", 5216 INTEL_INFO(dev)->subslice_total); 5217 seq_printf(m, " Available Subslice Per Slice: %u\n", 5218 INTEL_INFO(dev)->subslice_per_slice); 5219 seq_printf(m, " Available EU Total: %u\n", 5220 INTEL_INFO(dev)->eu_total); 5221 seq_printf(m, " Available EU Per Subslice: %u\n", 5222 INTEL_INFO(dev)->eu_per_subslice); 5223 seq_printf(m, " Has Slice Power Gating: %s\n", 5224 yesno(INTEL_INFO(dev)->has_slice_pg)); 5225 seq_printf(m, " Has Subslice Power Gating: %s\n", 5226 yesno(INTEL_INFO(dev)->has_subslice_pg)); 5227 seq_printf(m, " Has EU Power Gating: %s\n", 5228 yesno(INTEL_INFO(dev)->has_eu_pg)); 5229 5230 seq_puts(m, "SSEU Device Status\n"); 5231 memset(&stat, 0, sizeof(stat)); 5232 if (IS_CHERRYVIEW(dev)) { 5233 cherryview_sseu_device_status(dev, &stat); 5234 } else if (IS_BROADWELL(dev)) { 5235 broadwell_sseu_device_status(dev, &stat); 5236 } else if (INTEL_INFO(dev)->gen >= 9) { 5237 gen9_sseu_device_status(dev, &stat); 5238 } 5239 seq_printf(m, " Enabled Slice Total: %u\n", 5240 stat.slice_total); 5241 seq_printf(m, " Enabled Subslice Total: %u\n", 5242 stat.subslice_total); 5243 seq_printf(m, " Enabled Subslice Per Slice: %u\n", 5244 stat.subslice_per_slice); 5245 seq_printf(m, " Enabled EU Total: %u\n", 5246 stat.eu_total); 5247 seq_printf(m, " Enabled EU Per Subslice: %u\n", 5248 stat.eu_per_subslice); 5249 5250 return 0; 5251 } 5252 5253 static int i915_forcewake_open(struct inode *inode, struct file *file) 5254 { 5255 struct drm_device *dev = inode->i_private; 5256 struct drm_i915_private *dev_priv = dev->dev_private; 5257 5258 if (INTEL_INFO(dev)->gen < 6) 5259 return 0; 5260 5261 intel_runtime_pm_get(dev_priv); 5262 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5263 5264 return 0; 5265 } 5266 5267 static int i915_forcewake_release(struct inode *inode, struct file *file) 5268 { 5269 struct drm_device *dev = inode->i_private; 5270 struct drm_i915_private *dev_priv = dev->dev_private; 5271 5272 if (INTEL_INFO(dev)->gen < 6) 5273 return 0; 5274 5275 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5276 intel_runtime_pm_put(dev_priv); 5277 5278 return 0; 5279 } 5280 5281 static const struct file_operations i915_forcewake_fops = { 5282 .owner = THIS_MODULE, 5283 .open = i915_forcewake_open, 5284 .release = i915_forcewake_release, 5285 }; 5286 5287 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 5288 { 5289 struct drm_device *dev = minor->dev; 5290 struct dentry *ent; 5291 5292 ent = debugfs_create_file("i915_forcewake_user", 5293 S_IRUSR, 5294 root, dev, 5295 &i915_forcewake_fops); 5296 if (!ent) 5297 return -ENOMEM; 5298 5299 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 5300 } 5301 5302 static int i915_debugfs_create(struct dentry *root, 5303 struct drm_minor *minor, 5304 const char *name, 5305 const struct file_operations *fops) 5306 { 5307 struct drm_device *dev = minor->dev; 5308 struct dentry *ent; 5309 5310 ent = debugfs_create_file(name, 5311 S_IRUGO | S_IWUSR, 5312 root, dev, 5313 fops); 5314 if (!ent) 5315 return -ENOMEM; 5316 5317 return drm_add_fake_info_node(minor, ent, fops); 5318 } 5319 5320 static const struct drm_info_list i915_debugfs_list[] = { 5321 {"i915_capabilities", i915_capabilities, 0}, 5322 {"i915_gem_objects", i915_gem_object_info, 0}, 5323 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 5324 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 5325 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 5326 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 5327 {"i915_gem_stolen", i915_gem_stolen_list_info }, 5328 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 5329 {"i915_gem_request", i915_gem_request_info, 0}, 5330 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 5331 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 5332 {"i915_gem_interrupt", i915_interrupt_info, 0}, 5333 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 5334 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 5335 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 5336 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 5337 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, 5338 {"i915_guc_info", i915_guc_info, 0}, 5339 {"i915_guc_load_status", i915_guc_load_status_info, 0}, 5340 {"i915_guc_log_dump", i915_guc_log_dump, 0}, 5341 {"i915_frequency_info", i915_frequency_info, 0}, 5342 {"i915_hangcheck_info", i915_hangcheck_info, 0}, 5343 {"i915_drpc_info", i915_drpc_info, 0}, 5344 {"i915_emon_status", i915_emon_status, 0}, 5345 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 5346 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 5347 {"i915_fbc_status", i915_fbc_status, 0}, 5348 {"i915_ips_status", i915_ips_status, 0}, 5349 {"i915_sr_status", i915_sr_status, 0}, 5350 {"i915_opregion", i915_opregion, 0}, 5351 {"i915_vbt", i915_vbt, 0}, 5352 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 5353 {"i915_context_status", i915_context_status, 0}, 5354 {"i915_dump_lrc", i915_dump_lrc, 0}, 5355 {"i915_execlists", i915_execlists, 0}, 5356 {"i915_forcewake_domains", i915_forcewake_domains, 0}, 5357 {"i915_swizzle_info", i915_swizzle_info, 0}, 5358 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 5359 {"i915_llc", i915_llc, 0}, 5360 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 5361 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 5362 {"i915_energy_uJ", i915_energy_uJ, 0}, 5363 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 5364 {"i915_power_domain_info", i915_power_domain_info, 0}, 5365 {"i915_dmc_info", i915_dmc_info, 0}, 5366 {"i915_display_info", i915_display_info, 0}, 5367 {"i915_semaphore_status", i915_semaphore_status, 0}, 5368 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 5369 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 5370 {"i915_wa_registers", i915_wa_registers, 0}, 5371 {"i915_ddb_info", i915_ddb_info, 0}, 5372 {"i915_sseu_status", i915_sseu_status, 0}, 5373 {"i915_drrs_status", i915_drrs_status, 0}, 5374 {"i915_rps_boost_info", i915_rps_boost_info, 0}, 5375 }; 5376 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 5377 5378 static const struct i915_debugfs_files { 5379 const char *name; 5380 const struct file_operations *fops; 5381 } i915_debugfs_files[] = { 5382 {"i915_wedged", &i915_wedged_fops}, 5383 {"i915_max_freq", &i915_max_freq_fops}, 5384 {"i915_min_freq", &i915_min_freq_fops}, 5385 {"i915_cache_sharing", &i915_cache_sharing_fops}, 5386 {"i915_ring_stop", &i915_ring_stop_fops}, 5387 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 5388 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 5389 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 5390 {"i915_error_state", &i915_error_state_fops}, 5391 {"i915_next_seqno", &i915_next_seqno_fops}, 5392 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 5393 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 5394 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 5395 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 5396 {"i915_fbc_false_color", &i915_fbc_fc_fops}, 5397 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 5398 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 5399 {"i915_dp_test_active", &i915_displayport_test_active_fops} 5400 }; 5401 5402 void intel_display_crc_init(struct drm_device *dev) 5403 { 5404 struct drm_i915_private *dev_priv = dev->dev_private; 5405 enum pipe pipe; 5406 5407 for_each_pipe(dev_priv, pipe) { 5408 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 5409 5410 pipe_crc->opened = false; 5411 spin_lock_init(&pipe_crc->lock); 5412 init_waitqueue_head(&pipe_crc->wq); 5413 } 5414 } 5415 5416 int i915_debugfs_init(struct drm_minor *minor) 5417 { 5418 int ret, i; 5419 5420 ret = i915_forcewake_create(minor->debugfs_root, minor); 5421 if (ret) 5422 return ret; 5423 5424 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 5425 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); 5426 if (ret) 5427 return ret; 5428 } 5429 5430 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 5431 ret = i915_debugfs_create(minor->debugfs_root, minor, 5432 i915_debugfs_files[i].name, 5433 i915_debugfs_files[i].fops); 5434 if (ret) 5435 return ret; 5436 } 5437 5438 return drm_debugfs_create_files(i915_debugfs_list, 5439 I915_DEBUGFS_ENTRIES, 5440 minor->debugfs_root, minor); 5441 } 5442 5443 void i915_debugfs_cleanup(struct drm_minor *minor) 5444 { 5445 int i; 5446 5447 drm_debugfs_remove_files(i915_debugfs_list, 5448 I915_DEBUGFS_ENTRIES, minor); 5449 5450 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 5451 1, minor); 5452 5453 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 5454 struct drm_info_list *info_list = 5455 (struct drm_info_list *)&i915_pipe_crc_data[i]; 5456 5457 drm_debugfs_remove_files(info_list, 1, minor); 5458 } 5459 5460 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 5461 struct drm_info_list *info_list = 5462 (struct drm_info_list *) i915_debugfs_files[i].fops; 5463 5464 drm_debugfs_remove_files(info_list, 1, minor); 5465 } 5466 } 5467 5468 struct dpcd_block { 5469 /* DPCD dump start address. */ 5470 unsigned int offset; 5471 /* DPCD dump end address, inclusive. If unset, .size will be used. */ 5472 unsigned int end; 5473 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ 5474 size_t size; 5475 /* Only valid for eDP. */ 5476 bool edp; 5477 }; 5478 5479 static const struct dpcd_block i915_dpcd_debug[] = { 5480 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, 5481 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, 5482 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, 5483 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, 5484 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, 5485 { .offset = DP_SET_POWER }, 5486 { .offset = DP_EDP_DPCD_REV }, 5487 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, 5488 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, 5489 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, 5490 }; 5491 5492 static int i915_dpcd_show(struct seq_file *m, void *data) 5493 { 5494 struct drm_connector *connector = m->private; 5495 struct intel_dp *intel_dp = 5496 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 5497 uint8_t buf[16]; 5498 ssize_t err; 5499 int i; 5500 5501 if (connector->status != connector_status_connected) 5502 return -ENODEV; 5503 5504 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) { 5505 const struct dpcd_block *b = &i915_dpcd_debug[i]; 5506 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); 5507 5508 if (b->edp && 5509 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 5510 continue; 5511 5512 /* low tech for now */ 5513 if (WARN_ON(size > sizeof(buf))) 5514 continue; 5515 5516 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); 5517 if (err <= 0) { 5518 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", 5519 size, b->offset, err); 5520 continue; 5521 } 5522 5523 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); 5524 } 5525 5526 return 0; 5527 } 5528 5529 static int i915_dpcd_open(struct inode *inode, struct file *file) 5530 { 5531 return single_open(file, i915_dpcd_show, inode->i_private); 5532 } 5533 5534 static const struct file_operations i915_dpcd_fops = { 5535 .owner = THIS_MODULE, 5536 .open = i915_dpcd_open, 5537 .read = seq_read, 5538 .llseek = seq_lseek, 5539 .release = single_release, 5540 }; 5541 5542 /** 5543 * i915_debugfs_connector_add - add i915 specific connector debugfs files 5544 * @connector: pointer to a registered drm_connector 5545 * 5546 * Cleanup will be done by drm_connector_unregister() through a call to 5547 * drm_debugfs_connector_remove(). 5548 * 5549 * Returns 0 on success, negative error codes on error. 5550 */ 5551 int i915_debugfs_connector_add(struct drm_connector *connector) 5552 { 5553 struct dentry *root = connector->debugfs_entry; 5554 5555 /* The connector must have been registered beforehands. */ 5556 if (!root) 5557 return -ENODEV; 5558 5559 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 5560 connector->connector_type == DRM_MODE_CONNECTOR_eDP) 5561 debugfs_create_file("i915_dpcd", S_IRUGO, root, connector, 5562 &i915_dpcd_fops); 5563 5564 return 0; 5565 } 5566