1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/circ_buf.h> 31 #include <linux/ctype.h> 32 #include <linux/debugfs.h> 33 #include <linux/slab.h> 34 #include <linux/export.h> 35 #include <linux/list_sort.h> 36 #include <asm/msr-index.h> 37 #include <drm/drmP.h> 38 #include "intel_drv.h" 39 #include "intel_ringbuffer.h" 40 #include <drm/i915_drm.h> 41 #include "i915_drv.h" 42 43 enum { 44 ACTIVE_LIST, 45 INACTIVE_LIST, 46 PINNED_LIST, 47 }; 48 49 /* As the drm_debugfs_init() routines are called before dev->dev_private is 50 * allocated we need to hook into the minor for release. */ 51 static int 52 drm_add_fake_info_node(struct drm_minor *minor, 53 struct dentry *ent, 54 const void *key) 55 { 56 struct drm_info_node *node; 57 58 node = kmalloc(sizeof(*node), GFP_KERNEL); 59 if (node == NULL) { 60 debugfs_remove(ent); 61 return -ENOMEM; 62 } 63 64 node->minor = minor; 65 node->dent = ent; 66 node->info_ent = (void *) key; 67 68 mutex_lock(&minor->debugfs_lock); 69 list_add(&node->list, &minor->debugfs_list); 70 mutex_unlock(&minor->debugfs_lock); 71 72 return 0; 73 } 74 75 static int i915_capabilities(struct seq_file *m, void *data) 76 { 77 struct drm_info_node *node = m->private; 78 struct drm_device *dev = node->minor->dev; 79 const struct intel_device_info *info = INTEL_INFO(dev); 80 81 seq_printf(m, "gen: %d\n", info->gen); 82 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 83 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 84 #define SEP_SEMICOLON ; 85 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON); 86 #undef PRINT_FLAG 87 #undef SEP_SEMICOLON 88 89 return 0; 90 } 91 92 static const char get_active_flag(struct drm_i915_gem_object *obj) 93 { 94 return obj->active ? '*' : ' '; 95 } 96 97 static const char get_pin_flag(struct drm_i915_gem_object *obj) 98 { 99 return obj->pin_display ? 'p' : ' '; 100 } 101 102 static const char get_tiling_flag(struct drm_i915_gem_object *obj) 103 { 104 switch (obj->tiling_mode) { 105 default: 106 case I915_TILING_NONE: return ' '; 107 case I915_TILING_X: return 'X'; 108 case I915_TILING_Y: return 'Y'; 109 } 110 } 111 112 static inline const char get_global_flag(struct drm_i915_gem_object *obj) 113 { 114 return i915_gem_obj_to_ggtt(obj) ? 'g' : ' '; 115 } 116 117 static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj) 118 { 119 return obj->mapping ? 'M' : ' '; 120 } 121 122 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) 123 { 124 u64 size = 0; 125 struct i915_vma *vma; 126 127 list_for_each_entry(vma, &obj->vma_list, obj_link) { 128 if (vma->is_ggtt && drm_mm_node_allocated(&vma->node)) 129 size += vma->node.size; 130 } 131 132 return size; 133 } 134 135 static void 136 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 137 { 138 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 139 struct intel_engine_cs *engine; 140 struct i915_vma *vma; 141 int pin_count = 0; 142 enum intel_engine_id id; 143 144 lockdep_assert_held(&obj->base.dev->struct_mutex); 145 146 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ", 147 &obj->base, 148 get_active_flag(obj), 149 get_pin_flag(obj), 150 get_tiling_flag(obj), 151 get_global_flag(obj), 152 get_pin_mapped_flag(obj), 153 obj->base.size / 1024, 154 obj->base.read_domains, 155 obj->base.write_domain); 156 for_each_engine_id(engine, dev_priv, id) 157 seq_printf(m, "%x ", 158 i915_gem_request_get_seqno(obj->last_read_req[id])); 159 seq_printf(m, "] %x %x%s%s%s", 160 i915_gem_request_get_seqno(obj->last_write_req), 161 i915_gem_request_get_seqno(obj->last_fenced_req), 162 i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level), 163 obj->dirty ? " dirty" : "", 164 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 165 if (obj->base.name) 166 seq_printf(m, " (name: %d)", obj->base.name); 167 list_for_each_entry(vma, &obj->vma_list, obj_link) { 168 if (vma->pin_count > 0) 169 pin_count++; 170 } 171 seq_printf(m, " (pinned x %d)", pin_count); 172 if (obj->pin_display) 173 seq_printf(m, " (display)"); 174 if (obj->fence_reg != I915_FENCE_REG_NONE) 175 seq_printf(m, " (fence: %d)", obj->fence_reg); 176 list_for_each_entry(vma, &obj->vma_list, obj_link) { 177 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", 178 vma->is_ggtt ? "g" : "pp", 179 vma->node.start, vma->node.size); 180 if (vma->is_ggtt) 181 seq_printf(m, ", type: %u", vma->ggtt_view.type); 182 seq_puts(m, ")"); 183 } 184 if (obj->stolen) 185 seq_printf(m, " (stolen: %08llx)", obj->stolen->start); 186 if (obj->pin_display || obj->fault_mappable) { 187 char s[3], *t = s; 188 if (obj->pin_display) 189 *t++ = 'p'; 190 if (obj->fault_mappable) 191 *t++ = 'f'; 192 *t = '\0'; 193 seq_printf(m, " (%s mappable)", s); 194 } 195 if (obj->last_write_req != NULL) 196 seq_printf(m, " (%s)", 197 i915_gem_request_get_engine(obj->last_write_req)->name); 198 if (obj->frontbuffer_bits) 199 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); 200 } 201 202 static void describe_ctx(struct seq_file *m, struct intel_context *ctx) 203 { 204 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i'); 205 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 206 seq_putc(m, ' '); 207 } 208 209 static int i915_gem_object_list_info(struct seq_file *m, void *data) 210 { 211 struct drm_info_node *node = m->private; 212 uintptr_t list = (uintptr_t) node->info_ent->data; 213 struct list_head *head; 214 struct drm_device *dev = node->minor->dev; 215 struct drm_i915_private *dev_priv = to_i915(dev); 216 struct i915_ggtt *ggtt = &dev_priv->ggtt; 217 struct i915_vma *vma; 218 u64 total_obj_size, total_gtt_size; 219 int count, ret; 220 221 ret = mutex_lock_interruptible(&dev->struct_mutex); 222 if (ret) 223 return ret; 224 225 /* FIXME: the user of this interface might want more than just GGTT */ 226 switch (list) { 227 case ACTIVE_LIST: 228 seq_puts(m, "Active:\n"); 229 head = &ggtt->base.active_list; 230 break; 231 case INACTIVE_LIST: 232 seq_puts(m, "Inactive:\n"); 233 head = &ggtt->base.inactive_list; 234 break; 235 default: 236 mutex_unlock(&dev->struct_mutex); 237 return -EINVAL; 238 } 239 240 total_obj_size = total_gtt_size = count = 0; 241 list_for_each_entry(vma, head, vm_link) { 242 seq_printf(m, " "); 243 describe_obj(m, vma->obj); 244 seq_printf(m, "\n"); 245 total_obj_size += vma->obj->base.size; 246 total_gtt_size += vma->node.size; 247 count++; 248 } 249 mutex_unlock(&dev->struct_mutex); 250 251 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 252 count, total_obj_size, total_gtt_size); 253 return 0; 254 } 255 256 static int obj_rank_by_stolen(void *priv, 257 struct list_head *A, struct list_head *B) 258 { 259 struct drm_i915_gem_object *a = 260 container_of(A, struct drm_i915_gem_object, obj_exec_link); 261 struct drm_i915_gem_object *b = 262 container_of(B, struct drm_i915_gem_object, obj_exec_link); 263 264 if (a->stolen->start < b->stolen->start) 265 return -1; 266 if (a->stolen->start > b->stolen->start) 267 return 1; 268 return 0; 269 } 270 271 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 272 { 273 struct drm_info_node *node = m->private; 274 struct drm_device *dev = node->minor->dev; 275 struct drm_i915_private *dev_priv = dev->dev_private; 276 struct drm_i915_gem_object *obj; 277 u64 total_obj_size, total_gtt_size; 278 LIST_HEAD(stolen); 279 int count, ret; 280 281 ret = mutex_lock_interruptible(&dev->struct_mutex); 282 if (ret) 283 return ret; 284 285 total_obj_size = total_gtt_size = count = 0; 286 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 287 if (obj->stolen == NULL) 288 continue; 289 290 list_add(&obj->obj_exec_link, &stolen); 291 292 total_obj_size += obj->base.size; 293 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 294 count++; 295 } 296 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 297 if (obj->stolen == NULL) 298 continue; 299 300 list_add(&obj->obj_exec_link, &stolen); 301 302 total_obj_size += obj->base.size; 303 count++; 304 } 305 list_sort(NULL, &stolen, obj_rank_by_stolen); 306 seq_puts(m, "Stolen:\n"); 307 while (!list_empty(&stolen)) { 308 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link); 309 seq_puts(m, " "); 310 describe_obj(m, obj); 311 seq_putc(m, '\n'); 312 list_del_init(&obj->obj_exec_link); 313 } 314 mutex_unlock(&dev->struct_mutex); 315 316 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 317 count, total_obj_size, total_gtt_size); 318 return 0; 319 } 320 321 #define count_objects(list, member) do { \ 322 list_for_each_entry(obj, list, member) { \ 323 size += i915_gem_obj_total_ggtt_size(obj); \ 324 ++count; \ 325 if (obj->map_and_fenceable) { \ 326 mappable_size += i915_gem_obj_ggtt_size(obj); \ 327 ++mappable_count; \ 328 } \ 329 } \ 330 } while (0) 331 332 struct file_stats { 333 struct drm_i915_file_private *file_priv; 334 unsigned long count; 335 u64 total, unbound; 336 u64 global, shared; 337 u64 active, inactive; 338 }; 339 340 static int per_file_stats(int id, void *ptr, void *data) 341 { 342 struct drm_i915_gem_object *obj = ptr; 343 struct file_stats *stats = data; 344 struct i915_vma *vma; 345 346 stats->count++; 347 stats->total += obj->base.size; 348 349 if (obj->base.name || obj->base.dma_buf) 350 stats->shared += obj->base.size; 351 352 if (USES_FULL_PPGTT(obj->base.dev)) { 353 list_for_each_entry(vma, &obj->vma_list, obj_link) { 354 struct i915_hw_ppgtt *ppgtt; 355 356 if (!drm_mm_node_allocated(&vma->node)) 357 continue; 358 359 if (vma->is_ggtt) { 360 stats->global += obj->base.size; 361 continue; 362 } 363 364 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); 365 if (ppgtt->file_priv != stats->file_priv) 366 continue; 367 368 if (obj->active) /* XXX per-vma statistic */ 369 stats->active += obj->base.size; 370 else 371 stats->inactive += obj->base.size; 372 373 return 0; 374 } 375 } else { 376 if (i915_gem_obj_ggtt_bound(obj)) { 377 stats->global += obj->base.size; 378 if (obj->active) 379 stats->active += obj->base.size; 380 else 381 stats->inactive += obj->base.size; 382 return 0; 383 } 384 } 385 386 if (!list_empty(&obj->global_list)) 387 stats->unbound += obj->base.size; 388 389 return 0; 390 } 391 392 #define print_file_stats(m, name, stats) do { \ 393 if (stats.count) \ 394 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \ 395 name, \ 396 stats.count, \ 397 stats.total, \ 398 stats.active, \ 399 stats.inactive, \ 400 stats.global, \ 401 stats.shared, \ 402 stats.unbound); \ 403 } while (0) 404 405 static void print_batch_pool_stats(struct seq_file *m, 406 struct drm_i915_private *dev_priv) 407 { 408 struct drm_i915_gem_object *obj; 409 struct file_stats stats; 410 struct intel_engine_cs *engine; 411 int j; 412 413 memset(&stats, 0, sizeof(stats)); 414 415 for_each_engine(engine, dev_priv) { 416 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { 417 list_for_each_entry(obj, 418 &engine->batch_pool.cache_list[j], 419 batch_pool_link) 420 per_file_stats(0, obj, &stats); 421 } 422 } 423 424 print_file_stats(m, "[k]batch pool", stats); 425 } 426 427 #define count_vmas(list, member) do { \ 428 list_for_each_entry(vma, list, member) { \ 429 size += i915_gem_obj_total_ggtt_size(vma->obj); \ 430 ++count; \ 431 if (vma->obj->map_and_fenceable) { \ 432 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ 433 ++mappable_count; \ 434 } \ 435 } \ 436 } while (0) 437 438 static int i915_gem_object_info(struct seq_file *m, void* data) 439 { 440 struct drm_info_node *node = m->private; 441 struct drm_device *dev = node->minor->dev; 442 struct drm_i915_private *dev_priv = to_i915(dev); 443 struct i915_ggtt *ggtt = &dev_priv->ggtt; 444 u32 count, mappable_count, purgeable_count; 445 u64 size, mappable_size, purgeable_size; 446 unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0; 447 u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0; 448 struct drm_i915_gem_object *obj; 449 struct drm_file *file; 450 struct i915_vma *vma; 451 int ret; 452 453 ret = mutex_lock_interruptible(&dev->struct_mutex); 454 if (ret) 455 return ret; 456 457 seq_printf(m, "%u objects, %zu bytes\n", 458 dev_priv->mm.object_count, 459 dev_priv->mm.object_memory); 460 461 size = count = mappable_size = mappable_count = 0; 462 count_objects(&dev_priv->mm.bound_list, global_list); 463 seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n", 464 count, mappable_count, size, mappable_size); 465 466 size = count = mappable_size = mappable_count = 0; 467 count_vmas(&ggtt->base.active_list, vm_link); 468 seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n", 469 count, mappable_count, size, mappable_size); 470 471 size = count = mappable_size = mappable_count = 0; 472 count_vmas(&ggtt->base.inactive_list, vm_link); 473 seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n", 474 count, mappable_count, size, mappable_size); 475 476 size = count = purgeable_size = purgeable_count = 0; 477 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 478 size += obj->base.size, ++count; 479 if (obj->madv == I915_MADV_DONTNEED) 480 purgeable_size += obj->base.size, ++purgeable_count; 481 if (obj->mapping) { 482 pin_mapped_count++; 483 pin_mapped_size += obj->base.size; 484 if (obj->pages_pin_count == 0) { 485 pin_mapped_purgeable_count++; 486 pin_mapped_purgeable_size += obj->base.size; 487 } 488 } 489 } 490 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); 491 492 size = count = mappable_size = mappable_count = 0; 493 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 494 if (obj->fault_mappable) { 495 size += i915_gem_obj_ggtt_size(obj); 496 ++count; 497 } 498 if (obj->pin_display) { 499 mappable_size += i915_gem_obj_ggtt_size(obj); 500 ++mappable_count; 501 } 502 if (obj->madv == I915_MADV_DONTNEED) { 503 purgeable_size += obj->base.size; 504 ++purgeable_count; 505 } 506 if (obj->mapping) { 507 pin_mapped_count++; 508 pin_mapped_size += obj->base.size; 509 if (obj->pages_pin_count == 0) { 510 pin_mapped_purgeable_count++; 511 pin_mapped_purgeable_size += obj->base.size; 512 } 513 } 514 } 515 seq_printf(m, "%u purgeable objects, %llu bytes\n", 516 purgeable_count, purgeable_size); 517 seq_printf(m, "%u pinned mappable objects, %llu bytes\n", 518 mappable_count, mappable_size); 519 seq_printf(m, "%u fault mappable objects, %llu bytes\n", 520 count, size); 521 seq_printf(m, 522 "%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n", 523 pin_mapped_count, pin_mapped_purgeable_count, 524 pin_mapped_size, pin_mapped_purgeable_size); 525 526 seq_printf(m, "%llu [%llu] gtt total\n", 527 ggtt->base.total, ggtt->mappable_end - ggtt->base.start); 528 529 seq_putc(m, '\n'); 530 print_batch_pool_stats(m, dev_priv); 531 532 mutex_unlock(&dev->struct_mutex); 533 534 mutex_lock(&dev->filelist_mutex); 535 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 536 struct file_stats stats; 537 struct task_struct *task; 538 539 memset(&stats, 0, sizeof(stats)); 540 stats.file_priv = file->driver_priv; 541 spin_lock(&file->table_lock); 542 idr_for_each(&file->object_idr, per_file_stats, &stats); 543 spin_unlock(&file->table_lock); 544 /* 545 * Although we have a valid reference on file->pid, that does 546 * not guarantee that the task_struct who called get_pid() is 547 * still alive (e.g. get_pid(current) => fork() => exit()). 548 * Therefore, we need to protect this ->comm access using RCU. 549 */ 550 rcu_read_lock(); 551 task = pid_task(file->pid, PIDTYPE_PID); 552 print_file_stats(m, task ? task->comm : "<unknown>", stats); 553 rcu_read_unlock(); 554 } 555 mutex_unlock(&dev->filelist_mutex); 556 557 return 0; 558 } 559 560 static int i915_gem_gtt_info(struct seq_file *m, void *data) 561 { 562 struct drm_info_node *node = m->private; 563 struct drm_device *dev = node->minor->dev; 564 uintptr_t list = (uintptr_t) node->info_ent->data; 565 struct drm_i915_private *dev_priv = dev->dev_private; 566 struct drm_i915_gem_object *obj; 567 u64 total_obj_size, total_gtt_size; 568 int count, ret; 569 570 ret = mutex_lock_interruptible(&dev->struct_mutex); 571 if (ret) 572 return ret; 573 574 total_obj_size = total_gtt_size = count = 0; 575 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 576 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj)) 577 continue; 578 579 seq_puts(m, " "); 580 describe_obj(m, obj); 581 seq_putc(m, '\n'); 582 total_obj_size += obj->base.size; 583 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 584 count++; 585 } 586 587 mutex_unlock(&dev->struct_mutex); 588 589 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 590 count, total_obj_size, total_gtt_size); 591 592 return 0; 593 } 594 595 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 596 { 597 struct drm_info_node *node = m->private; 598 struct drm_device *dev = node->minor->dev; 599 struct drm_i915_private *dev_priv = dev->dev_private; 600 struct intel_crtc *crtc; 601 int ret; 602 603 ret = mutex_lock_interruptible(&dev->struct_mutex); 604 if (ret) 605 return ret; 606 607 for_each_intel_crtc(dev, crtc) { 608 const char pipe = pipe_name(crtc->pipe); 609 const char plane = plane_name(crtc->plane); 610 struct intel_unpin_work *work; 611 612 spin_lock_irq(&dev->event_lock); 613 work = crtc->unpin_work; 614 if (work == NULL) { 615 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 616 pipe, plane); 617 } else { 618 u32 addr; 619 620 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 621 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 622 pipe, plane); 623 } else { 624 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 625 pipe, plane); 626 } 627 if (work->flip_queued_req) { 628 struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req); 629 630 seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n", 631 engine->name, 632 i915_gem_request_get_seqno(work->flip_queued_req), 633 dev_priv->next_seqno, 634 engine->get_seqno(engine), 635 i915_gem_request_completed(work->flip_queued_req, true)); 636 } else 637 seq_printf(m, "Flip not associated with any ring\n"); 638 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", 639 work->flip_queued_vblank, 640 work->flip_ready_vblank, 641 drm_crtc_vblank_count(&crtc->base)); 642 if (work->enable_stall_check) 643 seq_puts(m, "Stall check enabled, "); 644 else 645 seq_puts(m, "Stall check waiting for page flip ioctl, "); 646 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 647 648 if (INTEL_INFO(dev)->gen >= 4) 649 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane))); 650 else 651 addr = I915_READ(DSPADDR(crtc->plane)); 652 seq_printf(m, "Current scanout address 0x%08x\n", addr); 653 654 if (work->pending_flip_obj) { 655 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset); 656 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset); 657 } 658 } 659 spin_unlock_irq(&dev->event_lock); 660 } 661 662 mutex_unlock(&dev->struct_mutex); 663 664 return 0; 665 } 666 667 static int i915_gem_batch_pool_info(struct seq_file *m, void *data) 668 { 669 struct drm_info_node *node = m->private; 670 struct drm_device *dev = node->minor->dev; 671 struct drm_i915_private *dev_priv = dev->dev_private; 672 struct drm_i915_gem_object *obj; 673 struct intel_engine_cs *engine; 674 int total = 0; 675 int ret, j; 676 677 ret = mutex_lock_interruptible(&dev->struct_mutex); 678 if (ret) 679 return ret; 680 681 for_each_engine(engine, dev_priv) { 682 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { 683 int count; 684 685 count = 0; 686 list_for_each_entry(obj, 687 &engine->batch_pool.cache_list[j], 688 batch_pool_link) 689 count++; 690 seq_printf(m, "%s cache[%d]: %d objects\n", 691 engine->name, j, count); 692 693 list_for_each_entry(obj, 694 &engine->batch_pool.cache_list[j], 695 batch_pool_link) { 696 seq_puts(m, " "); 697 describe_obj(m, obj); 698 seq_putc(m, '\n'); 699 } 700 701 total += count; 702 } 703 } 704 705 seq_printf(m, "total: %d\n", total); 706 707 mutex_unlock(&dev->struct_mutex); 708 709 return 0; 710 } 711 712 static int i915_gem_request_info(struct seq_file *m, void *data) 713 { 714 struct drm_info_node *node = m->private; 715 struct drm_device *dev = node->minor->dev; 716 struct drm_i915_private *dev_priv = dev->dev_private; 717 struct intel_engine_cs *engine; 718 struct drm_i915_gem_request *req; 719 int ret, any; 720 721 ret = mutex_lock_interruptible(&dev->struct_mutex); 722 if (ret) 723 return ret; 724 725 any = 0; 726 for_each_engine(engine, dev_priv) { 727 int count; 728 729 count = 0; 730 list_for_each_entry(req, &engine->request_list, list) 731 count++; 732 if (count == 0) 733 continue; 734 735 seq_printf(m, "%s requests: %d\n", engine->name, count); 736 list_for_each_entry(req, &engine->request_list, list) { 737 struct task_struct *task; 738 739 rcu_read_lock(); 740 task = NULL; 741 if (req->pid) 742 task = pid_task(req->pid, PIDTYPE_PID); 743 seq_printf(m, " %x @ %d: %s [%d]\n", 744 req->seqno, 745 (int) (jiffies - req->emitted_jiffies), 746 task ? task->comm : "<unknown>", 747 task ? task->pid : -1); 748 rcu_read_unlock(); 749 } 750 751 any++; 752 } 753 mutex_unlock(&dev->struct_mutex); 754 755 if (any == 0) 756 seq_puts(m, "No requests\n"); 757 758 return 0; 759 } 760 761 static void i915_ring_seqno_info(struct seq_file *m, 762 struct intel_engine_cs *engine) 763 { 764 seq_printf(m, "Current sequence (%s): %x\n", 765 engine->name, engine->get_seqno(engine)); 766 seq_printf(m, "Current user interrupts (%s): %x\n", 767 engine->name, READ_ONCE(engine->user_interrupts)); 768 } 769 770 static int i915_gem_seqno_info(struct seq_file *m, void *data) 771 { 772 struct drm_info_node *node = m->private; 773 struct drm_device *dev = node->minor->dev; 774 struct drm_i915_private *dev_priv = dev->dev_private; 775 struct intel_engine_cs *engine; 776 int ret; 777 778 ret = mutex_lock_interruptible(&dev->struct_mutex); 779 if (ret) 780 return ret; 781 intel_runtime_pm_get(dev_priv); 782 783 for_each_engine(engine, dev_priv) 784 i915_ring_seqno_info(m, engine); 785 786 intel_runtime_pm_put(dev_priv); 787 mutex_unlock(&dev->struct_mutex); 788 789 return 0; 790 } 791 792 793 static int i915_interrupt_info(struct seq_file *m, void *data) 794 { 795 struct drm_info_node *node = m->private; 796 struct drm_device *dev = node->minor->dev; 797 struct drm_i915_private *dev_priv = dev->dev_private; 798 struct intel_engine_cs *engine; 799 int ret, i, pipe; 800 801 ret = mutex_lock_interruptible(&dev->struct_mutex); 802 if (ret) 803 return ret; 804 intel_runtime_pm_get(dev_priv); 805 806 if (IS_CHERRYVIEW(dev)) { 807 seq_printf(m, "Master Interrupt Control:\t%08x\n", 808 I915_READ(GEN8_MASTER_IRQ)); 809 810 seq_printf(m, "Display IER:\t%08x\n", 811 I915_READ(VLV_IER)); 812 seq_printf(m, "Display IIR:\t%08x\n", 813 I915_READ(VLV_IIR)); 814 seq_printf(m, "Display IIR_RW:\t%08x\n", 815 I915_READ(VLV_IIR_RW)); 816 seq_printf(m, "Display IMR:\t%08x\n", 817 I915_READ(VLV_IMR)); 818 for_each_pipe(dev_priv, pipe) 819 seq_printf(m, "Pipe %c stat:\t%08x\n", 820 pipe_name(pipe), 821 I915_READ(PIPESTAT(pipe))); 822 823 seq_printf(m, "Port hotplug:\t%08x\n", 824 I915_READ(PORT_HOTPLUG_EN)); 825 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 826 I915_READ(VLV_DPFLIPSTAT)); 827 seq_printf(m, "DPINVGTT:\t%08x\n", 828 I915_READ(DPINVGTT)); 829 830 for (i = 0; i < 4; i++) { 831 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 832 i, I915_READ(GEN8_GT_IMR(i))); 833 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 834 i, I915_READ(GEN8_GT_IIR(i))); 835 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 836 i, I915_READ(GEN8_GT_IER(i))); 837 } 838 839 seq_printf(m, "PCU interrupt mask:\t%08x\n", 840 I915_READ(GEN8_PCU_IMR)); 841 seq_printf(m, "PCU interrupt identity:\t%08x\n", 842 I915_READ(GEN8_PCU_IIR)); 843 seq_printf(m, "PCU interrupt enable:\t%08x\n", 844 I915_READ(GEN8_PCU_IER)); 845 } else if (INTEL_INFO(dev)->gen >= 8) { 846 seq_printf(m, "Master Interrupt Control:\t%08x\n", 847 I915_READ(GEN8_MASTER_IRQ)); 848 849 for (i = 0; i < 4; i++) { 850 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 851 i, I915_READ(GEN8_GT_IMR(i))); 852 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 853 i, I915_READ(GEN8_GT_IIR(i))); 854 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 855 i, I915_READ(GEN8_GT_IER(i))); 856 } 857 858 for_each_pipe(dev_priv, pipe) { 859 enum intel_display_power_domain power_domain; 860 861 power_domain = POWER_DOMAIN_PIPE(pipe); 862 if (!intel_display_power_get_if_enabled(dev_priv, 863 power_domain)) { 864 seq_printf(m, "Pipe %c power disabled\n", 865 pipe_name(pipe)); 866 continue; 867 } 868 seq_printf(m, "Pipe %c IMR:\t%08x\n", 869 pipe_name(pipe), 870 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 871 seq_printf(m, "Pipe %c IIR:\t%08x\n", 872 pipe_name(pipe), 873 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 874 seq_printf(m, "Pipe %c IER:\t%08x\n", 875 pipe_name(pipe), 876 I915_READ(GEN8_DE_PIPE_IER(pipe))); 877 878 intel_display_power_put(dev_priv, power_domain); 879 } 880 881 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 882 I915_READ(GEN8_DE_PORT_IMR)); 883 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 884 I915_READ(GEN8_DE_PORT_IIR)); 885 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 886 I915_READ(GEN8_DE_PORT_IER)); 887 888 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 889 I915_READ(GEN8_DE_MISC_IMR)); 890 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 891 I915_READ(GEN8_DE_MISC_IIR)); 892 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 893 I915_READ(GEN8_DE_MISC_IER)); 894 895 seq_printf(m, "PCU interrupt mask:\t%08x\n", 896 I915_READ(GEN8_PCU_IMR)); 897 seq_printf(m, "PCU interrupt identity:\t%08x\n", 898 I915_READ(GEN8_PCU_IIR)); 899 seq_printf(m, "PCU interrupt enable:\t%08x\n", 900 I915_READ(GEN8_PCU_IER)); 901 } else if (IS_VALLEYVIEW(dev)) { 902 seq_printf(m, "Display IER:\t%08x\n", 903 I915_READ(VLV_IER)); 904 seq_printf(m, "Display IIR:\t%08x\n", 905 I915_READ(VLV_IIR)); 906 seq_printf(m, "Display IIR_RW:\t%08x\n", 907 I915_READ(VLV_IIR_RW)); 908 seq_printf(m, "Display IMR:\t%08x\n", 909 I915_READ(VLV_IMR)); 910 for_each_pipe(dev_priv, pipe) 911 seq_printf(m, "Pipe %c stat:\t%08x\n", 912 pipe_name(pipe), 913 I915_READ(PIPESTAT(pipe))); 914 915 seq_printf(m, "Master IER:\t%08x\n", 916 I915_READ(VLV_MASTER_IER)); 917 918 seq_printf(m, "Render IER:\t%08x\n", 919 I915_READ(GTIER)); 920 seq_printf(m, "Render IIR:\t%08x\n", 921 I915_READ(GTIIR)); 922 seq_printf(m, "Render IMR:\t%08x\n", 923 I915_READ(GTIMR)); 924 925 seq_printf(m, "PM IER:\t\t%08x\n", 926 I915_READ(GEN6_PMIER)); 927 seq_printf(m, "PM IIR:\t\t%08x\n", 928 I915_READ(GEN6_PMIIR)); 929 seq_printf(m, "PM IMR:\t\t%08x\n", 930 I915_READ(GEN6_PMIMR)); 931 932 seq_printf(m, "Port hotplug:\t%08x\n", 933 I915_READ(PORT_HOTPLUG_EN)); 934 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 935 I915_READ(VLV_DPFLIPSTAT)); 936 seq_printf(m, "DPINVGTT:\t%08x\n", 937 I915_READ(DPINVGTT)); 938 939 } else if (!HAS_PCH_SPLIT(dev)) { 940 seq_printf(m, "Interrupt enable: %08x\n", 941 I915_READ(IER)); 942 seq_printf(m, "Interrupt identity: %08x\n", 943 I915_READ(IIR)); 944 seq_printf(m, "Interrupt mask: %08x\n", 945 I915_READ(IMR)); 946 for_each_pipe(dev_priv, pipe) 947 seq_printf(m, "Pipe %c stat: %08x\n", 948 pipe_name(pipe), 949 I915_READ(PIPESTAT(pipe))); 950 } else { 951 seq_printf(m, "North Display Interrupt enable: %08x\n", 952 I915_READ(DEIER)); 953 seq_printf(m, "North Display Interrupt identity: %08x\n", 954 I915_READ(DEIIR)); 955 seq_printf(m, "North Display Interrupt mask: %08x\n", 956 I915_READ(DEIMR)); 957 seq_printf(m, "South Display Interrupt enable: %08x\n", 958 I915_READ(SDEIER)); 959 seq_printf(m, "South Display Interrupt identity: %08x\n", 960 I915_READ(SDEIIR)); 961 seq_printf(m, "South Display Interrupt mask: %08x\n", 962 I915_READ(SDEIMR)); 963 seq_printf(m, "Graphics Interrupt enable: %08x\n", 964 I915_READ(GTIER)); 965 seq_printf(m, "Graphics Interrupt identity: %08x\n", 966 I915_READ(GTIIR)); 967 seq_printf(m, "Graphics Interrupt mask: %08x\n", 968 I915_READ(GTIMR)); 969 } 970 for_each_engine(engine, dev_priv) { 971 if (INTEL_INFO(dev)->gen >= 6) { 972 seq_printf(m, 973 "Graphics Interrupt mask (%s): %08x\n", 974 engine->name, I915_READ_IMR(engine)); 975 } 976 i915_ring_seqno_info(m, engine); 977 } 978 intel_runtime_pm_put(dev_priv); 979 mutex_unlock(&dev->struct_mutex); 980 981 return 0; 982 } 983 984 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 985 { 986 struct drm_info_node *node = m->private; 987 struct drm_device *dev = node->minor->dev; 988 struct drm_i915_private *dev_priv = dev->dev_private; 989 int i, ret; 990 991 ret = mutex_lock_interruptible(&dev->struct_mutex); 992 if (ret) 993 return ret; 994 995 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 996 for (i = 0; i < dev_priv->num_fence_regs; i++) { 997 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 998 999 seq_printf(m, "Fence %d, pin count = %d, object = ", 1000 i, dev_priv->fence_regs[i].pin_count); 1001 if (obj == NULL) 1002 seq_puts(m, "unused"); 1003 else 1004 describe_obj(m, obj); 1005 seq_putc(m, '\n'); 1006 } 1007 1008 mutex_unlock(&dev->struct_mutex); 1009 return 0; 1010 } 1011 1012 static int i915_hws_info(struct seq_file *m, void *data) 1013 { 1014 struct drm_info_node *node = m->private; 1015 struct drm_device *dev = node->minor->dev; 1016 struct drm_i915_private *dev_priv = dev->dev_private; 1017 struct intel_engine_cs *engine; 1018 const u32 *hws; 1019 int i; 1020 1021 engine = &dev_priv->engine[(uintptr_t)node->info_ent->data]; 1022 hws = engine->status_page.page_addr; 1023 if (hws == NULL) 1024 return 0; 1025 1026 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 1027 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 1028 i * 4, 1029 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 1030 } 1031 return 0; 1032 } 1033 1034 static ssize_t 1035 i915_error_state_write(struct file *filp, 1036 const char __user *ubuf, 1037 size_t cnt, 1038 loff_t *ppos) 1039 { 1040 struct i915_error_state_file_priv *error_priv = filp->private_data; 1041 struct drm_device *dev = error_priv->dev; 1042 int ret; 1043 1044 DRM_DEBUG_DRIVER("Resetting error state\n"); 1045 1046 ret = mutex_lock_interruptible(&dev->struct_mutex); 1047 if (ret) 1048 return ret; 1049 1050 i915_destroy_error_state(dev); 1051 mutex_unlock(&dev->struct_mutex); 1052 1053 return cnt; 1054 } 1055 1056 static int i915_error_state_open(struct inode *inode, struct file *file) 1057 { 1058 struct drm_device *dev = inode->i_private; 1059 struct i915_error_state_file_priv *error_priv; 1060 1061 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 1062 if (!error_priv) 1063 return -ENOMEM; 1064 1065 error_priv->dev = dev; 1066 1067 i915_error_state_get(dev, error_priv); 1068 1069 file->private_data = error_priv; 1070 1071 return 0; 1072 } 1073 1074 static int i915_error_state_release(struct inode *inode, struct file *file) 1075 { 1076 struct i915_error_state_file_priv *error_priv = file->private_data; 1077 1078 i915_error_state_put(error_priv); 1079 kfree(error_priv); 1080 1081 return 0; 1082 } 1083 1084 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, 1085 size_t count, loff_t *pos) 1086 { 1087 struct i915_error_state_file_priv *error_priv = file->private_data; 1088 struct drm_i915_error_state_buf error_str; 1089 loff_t tmp_pos = 0; 1090 ssize_t ret_count = 0; 1091 int ret; 1092 1093 ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos); 1094 if (ret) 1095 return ret; 1096 1097 ret = i915_error_state_to_str(&error_str, error_priv); 1098 if (ret) 1099 goto out; 1100 1101 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, 1102 error_str.buf, 1103 error_str.bytes); 1104 1105 if (ret_count < 0) 1106 ret = ret_count; 1107 else 1108 *pos = error_str.start + ret_count; 1109 out: 1110 i915_error_state_buf_release(&error_str); 1111 return ret ?: ret_count; 1112 } 1113 1114 static const struct file_operations i915_error_state_fops = { 1115 .owner = THIS_MODULE, 1116 .open = i915_error_state_open, 1117 .read = i915_error_state_read, 1118 .write = i915_error_state_write, 1119 .llseek = default_llseek, 1120 .release = i915_error_state_release, 1121 }; 1122 1123 static int 1124 i915_next_seqno_get(void *data, u64 *val) 1125 { 1126 struct drm_device *dev = data; 1127 struct drm_i915_private *dev_priv = dev->dev_private; 1128 int ret; 1129 1130 ret = mutex_lock_interruptible(&dev->struct_mutex); 1131 if (ret) 1132 return ret; 1133 1134 *val = dev_priv->next_seqno; 1135 mutex_unlock(&dev->struct_mutex); 1136 1137 return 0; 1138 } 1139 1140 static int 1141 i915_next_seqno_set(void *data, u64 val) 1142 { 1143 struct drm_device *dev = data; 1144 int ret; 1145 1146 ret = mutex_lock_interruptible(&dev->struct_mutex); 1147 if (ret) 1148 return ret; 1149 1150 ret = i915_gem_set_seqno(dev, val); 1151 mutex_unlock(&dev->struct_mutex); 1152 1153 return ret; 1154 } 1155 1156 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 1157 i915_next_seqno_get, i915_next_seqno_set, 1158 "0x%llx\n"); 1159 1160 static int i915_frequency_info(struct seq_file *m, void *unused) 1161 { 1162 struct drm_info_node *node = m->private; 1163 struct drm_device *dev = node->minor->dev; 1164 struct drm_i915_private *dev_priv = dev->dev_private; 1165 int ret = 0; 1166 1167 intel_runtime_pm_get(dev_priv); 1168 1169 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1170 1171 if (IS_GEN5(dev)) { 1172 u16 rgvswctl = I915_READ16(MEMSWCTL); 1173 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1174 1175 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 1176 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 1177 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 1178 MEMSTAT_VID_SHIFT); 1179 seq_printf(m, "Current P-state: %d\n", 1180 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1181 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1182 u32 freq_sts; 1183 1184 mutex_lock(&dev_priv->rps.hw_lock); 1185 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1186 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1187 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1188 1189 seq_printf(m, "actual GPU freq: %d MHz\n", 1190 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1191 1192 seq_printf(m, "current GPU freq: %d MHz\n", 1193 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 1194 1195 seq_printf(m, "max GPU freq: %d MHz\n", 1196 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1197 1198 seq_printf(m, "min GPU freq: %d MHz\n", 1199 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1200 1201 seq_printf(m, "idle GPU freq: %d MHz\n", 1202 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); 1203 1204 seq_printf(m, 1205 "efficient (RPe) frequency: %d MHz\n", 1206 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1207 mutex_unlock(&dev_priv->rps.hw_lock); 1208 } else if (INTEL_INFO(dev)->gen >= 6) { 1209 u32 rp_state_limits; 1210 u32 gt_perf_status; 1211 u32 rp_state_cap; 1212 u32 rpmodectl, rpinclimit, rpdeclimit; 1213 u32 rpstat, cagf, reqf; 1214 u32 rpupei, rpcurup, rpprevup; 1215 u32 rpdownei, rpcurdown, rpprevdown; 1216 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 1217 int max_freq; 1218 1219 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1220 if (IS_BROXTON(dev)) { 1221 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 1222 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); 1223 } else { 1224 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1225 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1226 } 1227 1228 /* RPSTAT1 is in the GT power well */ 1229 ret = mutex_lock_interruptible(&dev->struct_mutex); 1230 if (ret) 1231 goto out; 1232 1233 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1234 1235 reqf = I915_READ(GEN6_RPNSWREQ); 1236 if (IS_GEN9(dev)) 1237 reqf >>= 23; 1238 else { 1239 reqf &= ~GEN6_TURBO_DISABLE; 1240 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1241 reqf >>= 24; 1242 else 1243 reqf >>= 25; 1244 } 1245 reqf = intel_gpu_freq(dev_priv, reqf); 1246 1247 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1248 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 1249 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 1250 1251 rpstat = I915_READ(GEN6_RPSTAT1); 1252 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 1253 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 1254 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 1255 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 1256 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 1257 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 1258 if (IS_GEN9(dev)) 1259 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; 1260 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1261 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1262 else 1263 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1264 cagf = intel_gpu_freq(dev_priv, cagf); 1265 1266 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1267 mutex_unlock(&dev->struct_mutex); 1268 1269 if (IS_GEN6(dev) || IS_GEN7(dev)) { 1270 pm_ier = I915_READ(GEN6_PMIER); 1271 pm_imr = I915_READ(GEN6_PMIMR); 1272 pm_isr = I915_READ(GEN6_PMISR); 1273 pm_iir = I915_READ(GEN6_PMIIR); 1274 pm_mask = I915_READ(GEN6_PMINTRMSK); 1275 } else { 1276 pm_ier = I915_READ(GEN8_GT_IER(2)); 1277 pm_imr = I915_READ(GEN8_GT_IMR(2)); 1278 pm_isr = I915_READ(GEN8_GT_ISR(2)); 1279 pm_iir = I915_READ(GEN8_GT_IIR(2)); 1280 pm_mask = I915_READ(GEN6_PMINTRMSK); 1281 } 1282 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1283 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1284 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1285 seq_printf(m, "Render p-state ratio: %d\n", 1286 (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8); 1287 seq_printf(m, "Render p-state VID: %d\n", 1288 gt_perf_status & 0xff); 1289 seq_printf(m, "Render p-state limit: %d\n", 1290 rp_state_limits & 0xff); 1291 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1292 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 1293 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 1294 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 1295 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1296 seq_printf(m, "CAGF: %dMHz\n", cagf); 1297 seq_printf(m, "RP CUR UP EI: %d (%dus)\n", 1298 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei)); 1299 seq_printf(m, "RP CUR UP: %d (%dus)\n", 1300 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup)); 1301 seq_printf(m, "RP PREV UP: %d (%dus)\n", 1302 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup)); 1303 seq_printf(m, "Up threshold: %d%%\n", 1304 dev_priv->rps.up_threshold); 1305 1306 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n", 1307 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei)); 1308 seq_printf(m, "RP CUR DOWN: %d (%dus)\n", 1309 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown)); 1310 seq_printf(m, "RP PREV DOWN: %d (%dus)\n", 1311 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown)); 1312 seq_printf(m, "Down threshold: %d%%\n", 1313 dev_priv->rps.down_threshold); 1314 1315 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 : 1316 rp_state_cap >> 16) & 0xff; 1317 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? 1318 GEN9_FREQ_SCALER : 1); 1319 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1320 intel_gpu_freq(dev_priv, max_freq)); 1321 1322 max_freq = (rp_state_cap & 0xff00) >> 8; 1323 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? 1324 GEN9_FREQ_SCALER : 1); 1325 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1326 intel_gpu_freq(dev_priv, max_freq)); 1327 1328 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 : 1329 rp_state_cap >> 0) & 0xff; 1330 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? 1331 GEN9_FREQ_SCALER : 1); 1332 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1333 intel_gpu_freq(dev_priv, max_freq)); 1334 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1335 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1336 1337 seq_printf(m, "Current freq: %d MHz\n", 1338 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 1339 seq_printf(m, "Actual freq: %d MHz\n", cagf); 1340 seq_printf(m, "Idle freq: %d MHz\n", 1341 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); 1342 seq_printf(m, "Min freq: %d MHz\n", 1343 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1344 seq_printf(m, "Max freq: %d MHz\n", 1345 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1346 seq_printf(m, 1347 "efficient (RPe) frequency: %d MHz\n", 1348 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1349 } else { 1350 seq_puts(m, "no P-state info available\n"); 1351 } 1352 1353 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq); 1354 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); 1355 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); 1356 1357 out: 1358 intel_runtime_pm_put(dev_priv); 1359 return ret; 1360 } 1361 1362 static int i915_hangcheck_info(struct seq_file *m, void *unused) 1363 { 1364 struct drm_info_node *node = m->private; 1365 struct drm_device *dev = node->minor->dev; 1366 struct drm_i915_private *dev_priv = dev->dev_private; 1367 struct intel_engine_cs *engine; 1368 u64 acthd[I915_NUM_ENGINES]; 1369 u32 seqno[I915_NUM_ENGINES]; 1370 u32 instdone[I915_NUM_INSTDONE_REG]; 1371 enum intel_engine_id id; 1372 int j; 1373 1374 if (!i915.enable_hangcheck) { 1375 seq_printf(m, "Hangcheck disabled\n"); 1376 return 0; 1377 } 1378 1379 intel_runtime_pm_get(dev_priv); 1380 1381 for_each_engine_id(engine, dev_priv, id) { 1382 acthd[id] = intel_ring_get_active_head(engine); 1383 seqno[id] = engine->get_seqno(engine); 1384 } 1385 1386 i915_get_extra_instdone(dev, instdone); 1387 1388 intel_runtime_pm_put(dev_priv); 1389 1390 if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) { 1391 seq_printf(m, "Hangcheck active, fires in %dms\n", 1392 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - 1393 jiffies)); 1394 } else 1395 seq_printf(m, "Hangcheck inactive\n"); 1396 1397 for_each_engine_id(engine, dev_priv, id) { 1398 seq_printf(m, "%s:\n", engine->name); 1399 seq_printf(m, "\tseqno = %x [current %x, last %x]\n", 1400 engine->hangcheck.seqno, 1401 seqno[id], 1402 engine->last_submitted_seqno); 1403 seq_printf(m, "\tuser interrupts = %x [current %x]\n", 1404 engine->hangcheck.user_interrupts, 1405 READ_ONCE(engine->user_interrupts)); 1406 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", 1407 (long long)engine->hangcheck.acthd, 1408 (long long)acthd[id]); 1409 seq_printf(m, "\tscore = %d\n", engine->hangcheck.score); 1410 seq_printf(m, "\taction = %d\n", engine->hangcheck.action); 1411 1412 if (engine->id == RCS) { 1413 seq_puts(m, "\tinstdone read ="); 1414 1415 for (j = 0; j < I915_NUM_INSTDONE_REG; j++) 1416 seq_printf(m, " 0x%08x", instdone[j]); 1417 1418 seq_puts(m, "\n\tinstdone accu ="); 1419 1420 for (j = 0; j < I915_NUM_INSTDONE_REG; j++) 1421 seq_printf(m, " 0x%08x", 1422 engine->hangcheck.instdone[j]); 1423 1424 seq_puts(m, "\n"); 1425 } 1426 } 1427 1428 return 0; 1429 } 1430 1431 static int ironlake_drpc_info(struct seq_file *m) 1432 { 1433 struct drm_info_node *node = m->private; 1434 struct drm_device *dev = node->minor->dev; 1435 struct drm_i915_private *dev_priv = dev->dev_private; 1436 u32 rgvmodectl, rstdbyctl; 1437 u16 crstandvid; 1438 int ret; 1439 1440 ret = mutex_lock_interruptible(&dev->struct_mutex); 1441 if (ret) 1442 return ret; 1443 intel_runtime_pm_get(dev_priv); 1444 1445 rgvmodectl = I915_READ(MEMMODECTL); 1446 rstdbyctl = I915_READ(RSTDBYCTL); 1447 crstandvid = I915_READ16(CRSTANDVID); 1448 1449 intel_runtime_pm_put(dev_priv); 1450 mutex_unlock(&dev->struct_mutex); 1451 1452 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); 1453 seq_printf(m, "Boost freq: %d\n", 1454 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1455 MEMMODE_BOOST_FREQ_SHIFT); 1456 seq_printf(m, "HW control enabled: %s\n", 1457 yesno(rgvmodectl & MEMMODE_HWIDLE_EN)); 1458 seq_printf(m, "SW control enabled: %s\n", 1459 yesno(rgvmodectl & MEMMODE_SWMODE_EN)); 1460 seq_printf(m, "Gated voltage change: %s\n", 1461 yesno(rgvmodectl & MEMMODE_RCLK_GATE)); 1462 seq_printf(m, "Starting frequency: P%d\n", 1463 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1464 seq_printf(m, "Max P-state: P%d\n", 1465 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1466 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1467 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1468 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1469 seq_printf(m, "Render standby enabled: %s\n", 1470 yesno(!(rstdbyctl & RCX_SW_EXIT))); 1471 seq_puts(m, "Current RS state: "); 1472 switch (rstdbyctl & RSX_STATUS_MASK) { 1473 case RSX_STATUS_ON: 1474 seq_puts(m, "on\n"); 1475 break; 1476 case RSX_STATUS_RC1: 1477 seq_puts(m, "RC1\n"); 1478 break; 1479 case RSX_STATUS_RC1E: 1480 seq_puts(m, "RC1E\n"); 1481 break; 1482 case RSX_STATUS_RS1: 1483 seq_puts(m, "RS1\n"); 1484 break; 1485 case RSX_STATUS_RS2: 1486 seq_puts(m, "RS2 (RC6)\n"); 1487 break; 1488 case RSX_STATUS_RS3: 1489 seq_puts(m, "RC3 (RC6+)\n"); 1490 break; 1491 default: 1492 seq_puts(m, "unknown\n"); 1493 break; 1494 } 1495 1496 return 0; 1497 } 1498 1499 static int i915_forcewake_domains(struct seq_file *m, void *data) 1500 { 1501 struct drm_info_node *node = m->private; 1502 struct drm_device *dev = node->minor->dev; 1503 struct drm_i915_private *dev_priv = dev->dev_private; 1504 struct intel_uncore_forcewake_domain *fw_domain; 1505 1506 spin_lock_irq(&dev_priv->uncore.lock); 1507 for_each_fw_domain(fw_domain, dev_priv) { 1508 seq_printf(m, "%s.wake_count = %u\n", 1509 intel_uncore_forcewake_domain_to_str(fw_domain->id), 1510 fw_domain->wake_count); 1511 } 1512 spin_unlock_irq(&dev_priv->uncore.lock); 1513 1514 return 0; 1515 } 1516 1517 static int vlv_drpc_info(struct seq_file *m) 1518 { 1519 struct drm_info_node *node = m->private; 1520 struct drm_device *dev = node->minor->dev; 1521 struct drm_i915_private *dev_priv = dev->dev_private; 1522 u32 rpmodectl1, rcctl1, pw_status; 1523 1524 intel_runtime_pm_get(dev_priv); 1525 1526 pw_status = I915_READ(VLV_GTLC_PW_STATUS); 1527 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1528 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1529 1530 intel_runtime_pm_put(dev_priv); 1531 1532 seq_printf(m, "Video Turbo Mode: %s\n", 1533 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1534 seq_printf(m, "Turbo enabled: %s\n", 1535 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1536 seq_printf(m, "HW control enabled: %s\n", 1537 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1538 seq_printf(m, "SW control enabled: %s\n", 1539 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1540 GEN6_RP_MEDIA_SW_MODE)); 1541 seq_printf(m, "RC6 Enabled: %s\n", 1542 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1543 GEN6_RC_CTL_EI_MODE(1)))); 1544 seq_printf(m, "Render Power Well: %s\n", 1545 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1546 seq_printf(m, "Media Power Well: %s\n", 1547 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1548 1549 seq_printf(m, "Render RC6 residency since boot: %u\n", 1550 I915_READ(VLV_GT_RENDER_RC6)); 1551 seq_printf(m, "Media RC6 residency since boot: %u\n", 1552 I915_READ(VLV_GT_MEDIA_RC6)); 1553 1554 return i915_forcewake_domains(m, NULL); 1555 } 1556 1557 static int gen6_drpc_info(struct seq_file *m) 1558 { 1559 struct drm_info_node *node = m->private; 1560 struct drm_device *dev = node->minor->dev; 1561 struct drm_i915_private *dev_priv = dev->dev_private; 1562 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1563 unsigned forcewake_count; 1564 int count = 0, ret; 1565 1566 ret = mutex_lock_interruptible(&dev->struct_mutex); 1567 if (ret) 1568 return ret; 1569 intel_runtime_pm_get(dev_priv); 1570 1571 spin_lock_irq(&dev_priv->uncore.lock); 1572 forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count; 1573 spin_unlock_irq(&dev_priv->uncore.lock); 1574 1575 if (forcewake_count) { 1576 seq_puts(m, "RC information inaccurate because somebody " 1577 "holds a forcewake reference \n"); 1578 } else { 1579 /* NB: we cannot use forcewake, else we read the wrong values */ 1580 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1581 udelay(10); 1582 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1583 } 1584 1585 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS); 1586 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1587 1588 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1589 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1590 mutex_unlock(&dev->struct_mutex); 1591 mutex_lock(&dev_priv->rps.hw_lock); 1592 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1593 mutex_unlock(&dev_priv->rps.hw_lock); 1594 1595 intel_runtime_pm_put(dev_priv); 1596 1597 seq_printf(m, "Video Turbo Mode: %s\n", 1598 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1599 seq_printf(m, "HW control enabled: %s\n", 1600 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1601 seq_printf(m, "SW control enabled: %s\n", 1602 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1603 GEN6_RP_MEDIA_SW_MODE)); 1604 seq_printf(m, "RC1e Enabled: %s\n", 1605 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1606 seq_printf(m, "RC6 Enabled: %s\n", 1607 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1608 seq_printf(m, "Deep RC6 Enabled: %s\n", 1609 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1610 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1611 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1612 seq_puts(m, "Current RC state: "); 1613 switch (gt_core_status & GEN6_RCn_MASK) { 1614 case GEN6_RC0: 1615 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1616 seq_puts(m, "Core Power Down\n"); 1617 else 1618 seq_puts(m, "on\n"); 1619 break; 1620 case GEN6_RC3: 1621 seq_puts(m, "RC3\n"); 1622 break; 1623 case GEN6_RC6: 1624 seq_puts(m, "RC6\n"); 1625 break; 1626 case GEN6_RC7: 1627 seq_puts(m, "RC7\n"); 1628 break; 1629 default: 1630 seq_puts(m, "Unknown\n"); 1631 break; 1632 } 1633 1634 seq_printf(m, "Core Power Down: %s\n", 1635 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1636 1637 /* Not exactly sure what this is */ 1638 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1639 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1640 seq_printf(m, "RC6 residency since boot: %u\n", 1641 I915_READ(GEN6_GT_GFX_RC6)); 1642 seq_printf(m, "RC6+ residency since boot: %u\n", 1643 I915_READ(GEN6_GT_GFX_RC6p)); 1644 seq_printf(m, "RC6++ residency since boot: %u\n", 1645 I915_READ(GEN6_GT_GFX_RC6pp)); 1646 1647 seq_printf(m, "RC6 voltage: %dmV\n", 1648 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1649 seq_printf(m, "RC6+ voltage: %dmV\n", 1650 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1651 seq_printf(m, "RC6++ voltage: %dmV\n", 1652 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1653 return 0; 1654 } 1655 1656 static int i915_drpc_info(struct seq_file *m, void *unused) 1657 { 1658 struct drm_info_node *node = m->private; 1659 struct drm_device *dev = node->minor->dev; 1660 1661 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 1662 return vlv_drpc_info(m); 1663 else if (INTEL_INFO(dev)->gen >= 6) 1664 return gen6_drpc_info(m); 1665 else 1666 return ironlake_drpc_info(m); 1667 } 1668 1669 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) 1670 { 1671 struct drm_info_node *node = m->private; 1672 struct drm_device *dev = node->minor->dev; 1673 struct drm_i915_private *dev_priv = dev->dev_private; 1674 1675 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 1676 dev_priv->fb_tracking.busy_bits); 1677 1678 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 1679 dev_priv->fb_tracking.flip_bits); 1680 1681 return 0; 1682 } 1683 1684 static int i915_fbc_status(struct seq_file *m, void *unused) 1685 { 1686 struct drm_info_node *node = m->private; 1687 struct drm_device *dev = node->minor->dev; 1688 struct drm_i915_private *dev_priv = dev->dev_private; 1689 1690 if (!HAS_FBC(dev)) { 1691 seq_puts(m, "FBC unsupported on this chipset\n"); 1692 return 0; 1693 } 1694 1695 intel_runtime_pm_get(dev_priv); 1696 mutex_lock(&dev_priv->fbc.lock); 1697 1698 if (intel_fbc_is_active(dev_priv)) 1699 seq_puts(m, "FBC enabled\n"); 1700 else 1701 seq_printf(m, "FBC disabled: %s\n", 1702 dev_priv->fbc.no_fbc_reason); 1703 1704 if (INTEL_INFO(dev_priv)->gen >= 7) 1705 seq_printf(m, "Compressing: %s\n", 1706 yesno(I915_READ(FBC_STATUS2) & 1707 FBC_COMPRESSION_MASK)); 1708 1709 mutex_unlock(&dev_priv->fbc.lock); 1710 intel_runtime_pm_put(dev_priv); 1711 1712 return 0; 1713 } 1714 1715 static int i915_fbc_fc_get(void *data, u64 *val) 1716 { 1717 struct drm_device *dev = data; 1718 struct drm_i915_private *dev_priv = dev->dev_private; 1719 1720 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1721 return -ENODEV; 1722 1723 *val = dev_priv->fbc.false_color; 1724 1725 return 0; 1726 } 1727 1728 static int i915_fbc_fc_set(void *data, u64 val) 1729 { 1730 struct drm_device *dev = data; 1731 struct drm_i915_private *dev_priv = dev->dev_private; 1732 u32 reg; 1733 1734 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1735 return -ENODEV; 1736 1737 mutex_lock(&dev_priv->fbc.lock); 1738 1739 reg = I915_READ(ILK_DPFC_CONTROL); 1740 dev_priv->fbc.false_color = val; 1741 1742 I915_WRITE(ILK_DPFC_CONTROL, val ? 1743 (reg | FBC_CTL_FALSE_COLOR) : 1744 (reg & ~FBC_CTL_FALSE_COLOR)); 1745 1746 mutex_unlock(&dev_priv->fbc.lock); 1747 return 0; 1748 } 1749 1750 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops, 1751 i915_fbc_fc_get, i915_fbc_fc_set, 1752 "%llu\n"); 1753 1754 static int i915_ips_status(struct seq_file *m, void *unused) 1755 { 1756 struct drm_info_node *node = m->private; 1757 struct drm_device *dev = node->minor->dev; 1758 struct drm_i915_private *dev_priv = dev->dev_private; 1759 1760 if (!HAS_IPS(dev)) { 1761 seq_puts(m, "not supported\n"); 1762 return 0; 1763 } 1764 1765 intel_runtime_pm_get(dev_priv); 1766 1767 seq_printf(m, "Enabled by kernel parameter: %s\n", 1768 yesno(i915.enable_ips)); 1769 1770 if (INTEL_INFO(dev)->gen >= 8) { 1771 seq_puts(m, "Currently: unknown\n"); 1772 } else { 1773 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1774 seq_puts(m, "Currently: enabled\n"); 1775 else 1776 seq_puts(m, "Currently: disabled\n"); 1777 } 1778 1779 intel_runtime_pm_put(dev_priv); 1780 1781 return 0; 1782 } 1783 1784 static int i915_sr_status(struct seq_file *m, void *unused) 1785 { 1786 struct drm_info_node *node = m->private; 1787 struct drm_device *dev = node->minor->dev; 1788 struct drm_i915_private *dev_priv = dev->dev_private; 1789 bool sr_enabled = false; 1790 1791 intel_runtime_pm_get(dev_priv); 1792 1793 if (HAS_PCH_SPLIT(dev)) 1794 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1795 else if (IS_CRESTLINE(dev) || IS_G4X(dev) || 1796 IS_I945G(dev) || IS_I945GM(dev)) 1797 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1798 else if (IS_I915GM(dev)) 1799 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1800 else if (IS_PINEVIEW(dev)) 1801 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1802 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 1803 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 1804 1805 intel_runtime_pm_put(dev_priv); 1806 1807 seq_printf(m, "self-refresh: %s\n", 1808 sr_enabled ? "enabled" : "disabled"); 1809 1810 return 0; 1811 } 1812 1813 static int i915_emon_status(struct seq_file *m, void *unused) 1814 { 1815 struct drm_info_node *node = m->private; 1816 struct drm_device *dev = node->minor->dev; 1817 struct drm_i915_private *dev_priv = dev->dev_private; 1818 unsigned long temp, chipset, gfx; 1819 int ret; 1820 1821 if (!IS_GEN5(dev)) 1822 return -ENODEV; 1823 1824 ret = mutex_lock_interruptible(&dev->struct_mutex); 1825 if (ret) 1826 return ret; 1827 1828 temp = i915_mch_val(dev_priv); 1829 chipset = i915_chipset_val(dev_priv); 1830 gfx = i915_gfx_val(dev_priv); 1831 mutex_unlock(&dev->struct_mutex); 1832 1833 seq_printf(m, "GMCH temp: %ld\n", temp); 1834 seq_printf(m, "Chipset power: %ld\n", chipset); 1835 seq_printf(m, "GFX power: %ld\n", gfx); 1836 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1837 1838 return 0; 1839 } 1840 1841 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1842 { 1843 struct drm_info_node *node = m->private; 1844 struct drm_device *dev = node->minor->dev; 1845 struct drm_i915_private *dev_priv = dev->dev_private; 1846 int ret = 0; 1847 int gpu_freq, ia_freq; 1848 unsigned int max_gpu_freq, min_gpu_freq; 1849 1850 if (!HAS_CORE_RING_FREQ(dev)) { 1851 seq_puts(m, "unsupported on this chipset\n"); 1852 return 0; 1853 } 1854 1855 intel_runtime_pm_get(dev_priv); 1856 1857 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1858 1859 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1860 if (ret) 1861 goto out; 1862 1863 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 1864 /* Convert GT frequency to 50 HZ units */ 1865 min_gpu_freq = 1866 dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER; 1867 max_gpu_freq = 1868 dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER; 1869 } else { 1870 min_gpu_freq = dev_priv->rps.min_freq_softlimit; 1871 max_gpu_freq = dev_priv->rps.max_freq_softlimit; 1872 } 1873 1874 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1875 1876 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { 1877 ia_freq = gpu_freq; 1878 sandybridge_pcode_read(dev_priv, 1879 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1880 &ia_freq); 1881 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1882 intel_gpu_freq(dev_priv, (gpu_freq * 1883 (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? 1884 GEN9_FREQ_SCALER : 1))), 1885 ((ia_freq >> 0) & 0xff) * 100, 1886 ((ia_freq >> 8) & 0xff) * 100); 1887 } 1888 1889 mutex_unlock(&dev_priv->rps.hw_lock); 1890 1891 out: 1892 intel_runtime_pm_put(dev_priv); 1893 return ret; 1894 } 1895 1896 static int i915_opregion(struct seq_file *m, void *unused) 1897 { 1898 struct drm_info_node *node = m->private; 1899 struct drm_device *dev = node->minor->dev; 1900 struct drm_i915_private *dev_priv = dev->dev_private; 1901 struct intel_opregion *opregion = &dev_priv->opregion; 1902 int ret; 1903 1904 ret = mutex_lock_interruptible(&dev->struct_mutex); 1905 if (ret) 1906 goto out; 1907 1908 if (opregion->header) 1909 seq_write(m, opregion->header, OPREGION_SIZE); 1910 1911 mutex_unlock(&dev->struct_mutex); 1912 1913 out: 1914 return 0; 1915 } 1916 1917 static int i915_vbt(struct seq_file *m, void *unused) 1918 { 1919 struct drm_info_node *node = m->private; 1920 struct drm_device *dev = node->minor->dev; 1921 struct drm_i915_private *dev_priv = dev->dev_private; 1922 struct intel_opregion *opregion = &dev_priv->opregion; 1923 1924 if (opregion->vbt) 1925 seq_write(m, opregion->vbt, opregion->vbt_size); 1926 1927 return 0; 1928 } 1929 1930 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1931 { 1932 struct drm_info_node *node = m->private; 1933 struct drm_device *dev = node->minor->dev; 1934 struct intel_framebuffer *fbdev_fb = NULL; 1935 struct drm_framebuffer *drm_fb; 1936 int ret; 1937 1938 ret = mutex_lock_interruptible(&dev->struct_mutex); 1939 if (ret) 1940 return ret; 1941 1942 #ifdef CONFIG_DRM_FBDEV_EMULATION 1943 if (to_i915(dev)->fbdev) { 1944 fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb); 1945 1946 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1947 fbdev_fb->base.width, 1948 fbdev_fb->base.height, 1949 fbdev_fb->base.depth, 1950 fbdev_fb->base.bits_per_pixel, 1951 fbdev_fb->base.modifier[0], 1952 drm_framebuffer_read_refcount(&fbdev_fb->base)); 1953 describe_obj(m, fbdev_fb->obj); 1954 seq_putc(m, '\n'); 1955 } 1956 #endif 1957 1958 mutex_lock(&dev->mode_config.fb_lock); 1959 drm_for_each_fb(drm_fb, dev) { 1960 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 1961 if (fb == fbdev_fb) 1962 continue; 1963 1964 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1965 fb->base.width, 1966 fb->base.height, 1967 fb->base.depth, 1968 fb->base.bits_per_pixel, 1969 fb->base.modifier[0], 1970 drm_framebuffer_read_refcount(&fb->base)); 1971 describe_obj(m, fb->obj); 1972 seq_putc(m, '\n'); 1973 } 1974 mutex_unlock(&dev->mode_config.fb_lock); 1975 mutex_unlock(&dev->struct_mutex); 1976 1977 return 0; 1978 } 1979 1980 static void describe_ctx_ringbuf(struct seq_file *m, 1981 struct intel_ringbuffer *ringbuf) 1982 { 1983 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)", 1984 ringbuf->space, ringbuf->head, ringbuf->tail, 1985 ringbuf->last_retired_head); 1986 } 1987 1988 static int i915_context_status(struct seq_file *m, void *unused) 1989 { 1990 struct drm_info_node *node = m->private; 1991 struct drm_device *dev = node->minor->dev; 1992 struct drm_i915_private *dev_priv = dev->dev_private; 1993 struct intel_engine_cs *engine; 1994 struct intel_context *ctx; 1995 enum intel_engine_id id; 1996 int ret; 1997 1998 ret = mutex_lock_interruptible(&dev->struct_mutex); 1999 if (ret) 2000 return ret; 2001 2002 list_for_each_entry(ctx, &dev_priv->context_list, link) { 2003 if (!i915.enable_execlists && 2004 ctx->legacy_hw_ctx.rcs_state == NULL) 2005 continue; 2006 2007 seq_puts(m, "HW context "); 2008 describe_ctx(m, ctx); 2009 if (ctx == dev_priv->kernel_context) 2010 seq_printf(m, "(kernel context) "); 2011 2012 if (i915.enable_execlists) { 2013 seq_putc(m, '\n'); 2014 for_each_engine_id(engine, dev_priv, id) { 2015 struct drm_i915_gem_object *ctx_obj = 2016 ctx->engine[id].state; 2017 struct intel_ringbuffer *ringbuf = 2018 ctx->engine[id].ringbuf; 2019 2020 seq_printf(m, "%s: ", engine->name); 2021 if (ctx_obj) 2022 describe_obj(m, ctx_obj); 2023 if (ringbuf) 2024 describe_ctx_ringbuf(m, ringbuf); 2025 seq_putc(m, '\n'); 2026 } 2027 } else { 2028 describe_obj(m, ctx->legacy_hw_ctx.rcs_state); 2029 } 2030 2031 seq_putc(m, '\n'); 2032 } 2033 2034 mutex_unlock(&dev->struct_mutex); 2035 2036 return 0; 2037 } 2038 2039 static void i915_dump_lrc_obj(struct seq_file *m, 2040 struct intel_context *ctx, 2041 struct intel_engine_cs *engine) 2042 { 2043 struct page *page; 2044 uint32_t *reg_state; 2045 int j; 2046 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; 2047 unsigned long ggtt_offset = 0; 2048 2049 if (ctx_obj == NULL) { 2050 seq_printf(m, "Context on %s with no gem object\n", 2051 engine->name); 2052 return; 2053 } 2054 2055 seq_printf(m, "CONTEXT: %s %u\n", engine->name, 2056 intel_execlists_ctx_id(ctx, engine)); 2057 2058 if (!i915_gem_obj_ggtt_bound(ctx_obj)) 2059 seq_puts(m, "\tNot bound in GGTT\n"); 2060 else 2061 ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj); 2062 2063 if (i915_gem_object_get_pages(ctx_obj)) { 2064 seq_puts(m, "\tFailed to get pages for context object\n"); 2065 return; 2066 } 2067 2068 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); 2069 if (!WARN_ON(page == NULL)) { 2070 reg_state = kmap_atomic(page); 2071 2072 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { 2073 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n", 2074 ggtt_offset + 4096 + (j * 4), 2075 reg_state[j], reg_state[j + 1], 2076 reg_state[j + 2], reg_state[j + 3]); 2077 } 2078 kunmap_atomic(reg_state); 2079 } 2080 2081 seq_putc(m, '\n'); 2082 } 2083 2084 static int i915_dump_lrc(struct seq_file *m, void *unused) 2085 { 2086 struct drm_info_node *node = (struct drm_info_node *) m->private; 2087 struct drm_device *dev = node->minor->dev; 2088 struct drm_i915_private *dev_priv = dev->dev_private; 2089 struct intel_engine_cs *engine; 2090 struct intel_context *ctx; 2091 int ret; 2092 2093 if (!i915.enable_execlists) { 2094 seq_printf(m, "Logical Ring Contexts are disabled\n"); 2095 return 0; 2096 } 2097 2098 ret = mutex_lock_interruptible(&dev->struct_mutex); 2099 if (ret) 2100 return ret; 2101 2102 list_for_each_entry(ctx, &dev_priv->context_list, link) 2103 if (ctx != dev_priv->kernel_context) 2104 for_each_engine(engine, dev_priv) 2105 i915_dump_lrc_obj(m, ctx, engine); 2106 2107 mutex_unlock(&dev->struct_mutex); 2108 2109 return 0; 2110 } 2111 2112 static int i915_execlists(struct seq_file *m, void *data) 2113 { 2114 struct drm_info_node *node = (struct drm_info_node *)m->private; 2115 struct drm_device *dev = node->minor->dev; 2116 struct drm_i915_private *dev_priv = dev->dev_private; 2117 struct intel_engine_cs *engine; 2118 u32 status_pointer; 2119 u8 read_pointer; 2120 u8 write_pointer; 2121 u32 status; 2122 u32 ctx_id; 2123 struct list_head *cursor; 2124 int i, ret; 2125 2126 if (!i915.enable_execlists) { 2127 seq_puts(m, "Logical Ring Contexts are disabled\n"); 2128 return 0; 2129 } 2130 2131 ret = mutex_lock_interruptible(&dev->struct_mutex); 2132 if (ret) 2133 return ret; 2134 2135 intel_runtime_pm_get(dev_priv); 2136 2137 for_each_engine(engine, dev_priv) { 2138 struct drm_i915_gem_request *head_req = NULL; 2139 int count = 0; 2140 2141 seq_printf(m, "%s\n", engine->name); 2142 2143 status = I915_READ(RING_EXECLIST_STATUS_LO(engine)); 2144 ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine)); 2145 seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n", 2146 status, ctx_id); 2147 2148 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); 2149 seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer); 2150 2151 read_pointer = engine->next_context_status_buffer; 2152 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer); 2153 if (read_pointer > write_pointer) 2154 write_pointer += GEN8_CSB_ENTRIES; 2155 seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n", 2156 read_pointer, write_pointer); 2157 2158 for (i = 0; i < GEN8_CSB_ENTRIES; i++) { 2159 status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i)); 2160 ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i)); 2161 2162 seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n", 2163 i, status, ctx_id); 2164 } 2165 2166 spin_lock_bh(&engine->execlist_lock); 2167 list_for_each(cursor, &engine->execlist_queue) 2168 count++; 2169 head_req = list_first_entry_or_null(&engine->execlist_queue, 2170 struct drm_i915_gem_request, 2171 execlist_link); 2172 spin_unlock_bh(&engine->execlist_lock); 2173 2174 seq_printf(m, "\t%d requests in queue\n", count); 2175 if (head_req) { 2176 seq_printf(m, "\tHead request id: %u\n", 2177 intel_execlists_ctx_id(head_req->ctx, engine)); 2178 seq_printf(m, "\tHead request tail: %u\n", 2179 head_req->tail); 2180 } 2181 2182 seq_putc(m, '\n'); 2183 } 2184 2185 intel_runtime_pm_put(dev_priv); 2186 mutex_unlock(&dev->struct_mutex); 2187 2188 return 0; 2189 } 2190 2191 static const char *swizzle_string(unsigned swizzle) 2192 { 2193 switch (swizzle) { 2194 case I915_BIT_6_SWIZZLE_NONE: 2195 return "none"; 2196 case I915_BIT_6_SWIZZLE_9: 2197 return "bit9"; 2198 case I915_BIT_6_SWIZZLE_9_10: 2199 return "bit9/bit10"; 2200 case I915_BIT_6_SWIZZLE_9_11: 2201 return "bit9/bit11"; 2202 case I915_BIT_6_SWIZZLE_9_10_11: 2203 return "bit9/bit10/bit11"; 2204 case I915_BIT_6_SWIZZLE_9_17: 2205 return "bit9/bit17"; 2206 case I915_BIT_6_SWIZZLE_9_10_17: 2207 return "bit9/bit10/bit17"; 2208 case I915_BIT_6_SWIZZLE_UNKNOWN: 2209 return "unknown"; 2210 } 2211 2212 return "bug"; 2213 } 2214 2215 static int i915_swizzle_info(struct seq_file *m, void *data) 2216 { 2217 struct drm_info_node *node = m->private; 2218 struct drm_device *dev = node->minor->dev; 2219 struct drm_i915_private *dev_priv = dev->dev_private; 2220 int ret; 2221 2222 ret = mutex_lock_interruptible(&dev->struct_mutex); 2223 if (ret) 2224 return ret; 2225 intel_runtime_pm_get(dev_priv); 2226 2227 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 2228 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 2229 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 2230 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 2231 2232 if (IS_GEN3(dev) || IS_GEN4(dev)) { 2233 seq_printf(m, "DDC = 0x%08x\n", 2234 I915_READ(DCC)); 2235 seq_printf(m, "DDC2 = 0x%08x\n", 2236 I915_READ(DCC2)); 2237 seq_printf(m, "C0DRB3 = 0x%04x\n", 2238 I915_READ16(C0DRB3)); 2239 seq_printf(m, "C1DRB3 = 0x%04x\n", 2240 I915_READ16(C1DRB3)); 2241 } else if (INTEL_INFO(dev)->gen >= 6) { 2242 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 2243 I915_READ(MAD_DIMM_C0)); 2244 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 2245 I915_READ(MAD_DIMM_C1)); 2246 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 2247 I915_READ(MAD_DIMM_C2)); 2248 seq_printf(m, "TILECTL = 0x%08x\n", 2249 I915_READ(TILECTL)); 2250 if (INTEL_INFO(dev)->gen >= 8) 2251 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 2252 I915_READ(GAMTARBMODE)); 2253 else 2254 seq_printf(m, "ARB_MODE = 0x%08x\n", 2255 I915_READ(ARB_MODE)); 2256 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 2257 I915_READ(DISP_ARB_CTL)); 2258 } 2259 2260 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 2261 seq_puts(m, "L-shaped memory detected\n"); 2262 2263 intel_runtime_pm_put(dev_priv); 2264 mutex_unlock(&dev->struct_mutex); 2265 2266 return 0; 2267 } 2268 2269 static int per_file_ctx(int id, void *ptr, void *data) 2270 { 2271 struct intel_context *ctx = ptr; 2272 struct seq_file *m = data; 2273 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2274 2275 if (!ppgtt) { 2276 seq_printf(m, " no ppgtt for context %d\n", 2277 ctx->user_handle); 2278 return 0; 2279 } 2280 2281 if (i915_gem_context_is_default(ctx)) 2282 seq_puts(m, " default context:\n"); 2283 else 2284 seq_printf(m, " context %d:\n", ctx->user_handle); 2285 ppgtt->debug_dump(ppgtt, m); 2286 2287 return 0; 2288 } 2289 2290 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2291 { 2292 struct drm_i915_private *dev_priv = dev->dev_private; 2293 struct intel_engine_cs *engine; 2294 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2295 int i; 2296 2297 if (!ppgtt) 2298 return; 2299 2300 for_each_engine(engine, dev_priv) { 2301 seq_printf(m, "%s\n", engine->name); 2302 for (i = 0; i < 4; i++) { 2303 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i)); 2304 pdp <<= 32; 2305 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i)); 2306 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 2307 } 2308 } 2309 } 2310 2311 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2312 { 2313 struct drm_i915_private *dev_priv = dev->dev_private; 2314 struct intel_engine_cs *engine; 2315 2316 if (INTEL_INFO(dev)->gen == 6) 2317 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2318 2319 for_each_engine(engine, dev_priv) { 2320 seq_printf(m, "%s\n", engine->name); 2321 if (INTEL_INFO(dev)->gen == 7) 2322 seq_printf(m, "GFX_MODE: 0x%08x\n", 2323 I915_READ(RING_MODE_GEN7(engine))); 2324 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", 2325 I915_READ(RING_PP_DIR_BASE(engine))); 2326 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", 2327 I915_READ(RING_PP_DIR_BASE_READ(engine))); 2328 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", 2329 I915_READ(RING_PP_DIR_DCLV(engine))); 2330 } 2331 if (dev_priv->mm.aliasing_ppgtt) { 2332 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2333 2334 seq_puts(m, "aliasing PPGTT:\n"); 2335 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset); 2336 2337 ppgtt->debug_dump(ppgtt, m); 2338 } 2339 2340 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 2341 } 2342 2343 static int i915_ppgtt_info(struct seq_file *m, void *data) 2344 { 2345 struct drm_info_node *node = m->private; 2346 struct drm_device *dev = node->minor->dev; 2347 struct drm_i915_private *dev_priv = dev->dev_private; 2348 struct drm_file *file; 2349 2350 int ret = mutex_lock_interruptible(&dev->struct_mutex); 2351 if (ret) 2352 return ret; 2353 intel_runtime_pm_get(dev_priv); 2354 2355 if (INTEL_INFO(dev)->gen >= 8) 2356 gen8_ppgtt_info(m, dev); 2357 else if (INTEL_INFO(dev)->gen >= 6) 2358 gen6_ppgtt_info(m, dev); 2359 2360 mutex_lock(&dev->filelist_mutex); 2361 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2362 struct drm_i915_file_private *file_priv = file->driver_priv; 2363 struct task_struct *task; 2364 2365 task = get_pid_task(file->pid, PIDTYPE_PID); 2366 if (!task) { 2367 ret = -ESRCH; 2368 goto out_put; 2369 } 2370 seq_printf(m, "\nproc: %s\n", task->comm); 2371 put_task_struct(task); 2372 idr_for_each(&file_priv->context_idr, per_file_ctx, 2373 (void *)(unsigned long)m); 2374 } 2375 mutex_unlock(&dev->filelist_mutex); 2376 2377 out_put: 2378 intel_runtime_pm_put(dev_priv); 2379 mutex_unlock(&dev->struct_mutex); 2380 2381 return ret; 2382 } 2383 2384 static int count_irq_waiters(struct drm_i915_private *i915) 2385 { 2386 struct intel_engine_cs *engine; 2387 int count = 0; 2388 2389 for_each_engine(engine, i915) 2390 count += engine->irq_refcount; 2391 2392 return count; 2393 } 2394 2395 static int i915_rps_boost_info(struct seq_file *m, void *data) 2396 { 2397 struct drm_info_node *node = m->private; 2398 struct drm_device *dev = node->minor->dev; 2399 struct drm_i915_private *dev_priv = dev->dev_private; 2400 struct drm_file *file; 2401 2402 seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled); 2403 seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy); 2404 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); 2405 seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n", 2406 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 2407 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 2408 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit), 2409 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit), 2410 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 2411 2412 mutex_lock(&dev->filelist_mutex); 2413 spin_lock(&dev_priv->rps.client_lock); 2414 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2415 struct drm_i915_file_private *file_priv = file->driver_priv; 2416 struct task_struct *task; 2417 2418 rcu_read_lock(); 2419 task = pid_task(file->pid, PIDTYPE_PID); 2420 seq_printf(m, "%s [%d]: %d boosts%s\n", 2421 task ? task->comm : "<unknown>", 2422 task ? task->pid : -1, 2423 file_priv->rps.boosts, 2424 list_empty(&file_priv->rps.link) ? "" : ", active"); 2425 rcu_read_unlock(); 2426 } 2427 seq_printf(m, "Semaphore boosts: %d%s\n", 2428 dev_priv->rps.semaphores.boosts, 2429 list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active"); 2430 seq_printf(m, "MMIO flip boosts: %d%s\n", 2431 dev_priv->rps.mmioflips.boosts, 2432 list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active"); 2433 seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts); 2434 spin_unlock(&dev_priv->rps.client_lock); 2435 mutex_unlock(&dev->filelist_mutex); 2436 2437 return 0; 2438 } 2439 2440 static int i915_llc(struct seq_file *m, void *data) 2441 { 2442 struct drm_info_node *node = m->private; 2443 struct drm_device *dev = node->minor->dev; 2444 struct drm_i915_private *dev_priv = dev->dev_private; 2445 const bool edram = INTEL_GEN(dev_priv) > 8; 2446 2447 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); 2448 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC", 2449 intel_uncore_edram_size(dev_priv)/1024/1024); 2450 2451 return 0; 2452 } 2453 2454 static int i915_guc_load_status_info(struct seq_file *m, void *data) 2455 { 2456 struct drm_info_node *node = m->private; 2457 struct drm_i915_private *dev_priv = node->minor->dev->dev_private; 2458 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 2459 u32 tmp, i; 2460 2461 if (!HAS_GUC_UCODE(dev_priv)) 2462 return 0; 2463 2464 seq_printf(m, "GuC firmware status:\n"); 2465 seq_printf(m, "\tpath: %s\n", 2466 guc_fw->guc_fw_path); 2467 seq_printf(m, "\tfetch: %s\n", 2468 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status)); 2469 seq_printf(m, "\tload: %s\n", 2470 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); 2471 seq_printf(m, "\tversion wanted: %d.%d\n", 2472 guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); 2473 seq_printf(m, "\tversion found: %d.%d\n", 2474 guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found); 2475 seq_printf(m, "\theader: offset is %d; size = %d\n", 2476 guc_fw->header_offset, guc_fw->header_size); 2477 seq_printf(m, "\tuCode: offset is %d; size = %d\n", 2478 guc_fw->ucode_offset, guc_fw->ucode_size); 2479 seq_printf(m, "\tRSA: offset is %d; size = %d\n", 2480 guc_fw->rsa_offset, guc_fw->rsa_size); 2481 2482 tmp = I915_READ(GUC_STATUS); 2483 2484 seq_printf(m, "\nGuC status 0x%08x:\n", tmp); 2485 seq_printf(m, "\tBootrom status = 0x%x\n", 2486 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); 2487 seq_printf(m, "\tuKernel status = 0x%x\n", 2488 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); 2489 seq_printf(m, "\tMIA Core status = 0x%x\n", 2490 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); 2491 seq_puts(m, "\nScratch registers:\n"); 2492 for (i = 0; i < 16; i++) 2493 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i))); 2494 2495 return 0; 2496 } 2497 2498 static void i915_guc_client_info(struct seq_file *m, 2499 struct drm_i915_private *dev_priv, 2500 struct i915_guc_client *client) 2501 { 2502 struct intel_engine_cs *engine; 2503 uint64_t tot = 0; 2504 2505 seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n", 2506 client->priority, client->ctx_index, client->proc_desc_offset); 2507 seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n", 2508 client->doorbell_id, client->doorbell_offset, client->cookie); 2509 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", 2510 client->wq_size, client->wq_offset, client->wq_tail); 2511 2512 seq_printf(m, "\tFailed to queue: %u\n", client->q_fail); 2513 seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); 2514 seq_printf(m, "\tLast submission result: %d\n", client->retcode); 2515 2516 for_each_engine(engine, dev_priv) { 2517 seq_printf(m, "\tSubmissions: %llu %s\n", 2518 client->submissions[engine->guc_id], 2519 engine->name); 2520 tot += client->submissions[engine->guc_id]; 2521 } 2522 seq_printf(m, "\tTotal: %llu\n", tot); 2523 } 2524 2525 static int i915_guc_info(struct seq_file *m, void *data) 2526 { 2527 struct drm_info_node *node = m->private; 2528 struct drm_device *dev = node->minor->dev; 2529 struct drm_i915_private *dev_priv = dev->dev_private; 2530 struct intel_guc guc; 2531 struct i915_guc_client client = {}; 2532 struct intel_engine_cs *engine; 2533 u64 total = 0; 2534 2535 if (!HAS_GUC_SCHED(dev_priv)) 2536 return 0; 2537 2538 if (mutex_lock_interruptible(&dev->struct_mutex)) 2539 return 0; 2540 2541 /* Take a local copy of the GuC data, so we can dump it at leisure */ 2542 guc = dev_priv->guc; 2543 if (guc.execbuf_client) 2544 client = *guc.execbuf_client; 2545 2546 mutex_unlock(&dev->struct_mutex); 2547 2548 seq_printf(m, "GuC total action count: %llu\n", guc.action_count); 2549 seq_printf(m, "GuC action failure count: %u\n", guc.action_fail); 2550 seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd); 2551 seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status); 2552 seq_printf(m, "GuC last action error code: %d\n", guc.action_err); 2553 2554 seq_printf(m, "\nGuC submissions:\n"); 2555 for_each_engine(engine, dev_priv) { 2556 seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", 2557 engine->name, guc.submissions[engine->guc_id], 2558 guc.last_seqno[engine->guc_id]); 2559 total += guc.submissions[engine->guc_id]; 2560 } 2561 seq_printf(m, "\t%s: %llu\n", "Total", total); 2562 2563 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client); 2564 i915_guc_client_info(m, dev_priv, &client); 2565 2566 /* Add more as required ... */ 2567 2568 return 0; 2569 } 2570 2571 static int i915_guc_log_dump(struct seq_file *m, void *data) 2572 { 2573 struct drm_info_node *node = m->private; 2574 struct drm_device *dev = node->minor->dev; 2575 struct drm_i915_private *dev_priv = dev->dev_private; 2576 struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj; 2577 u32 *log; 2578 int i = 0, pg; 2579 2580 if (!log_obj) 2581 return 0; 2582 2583 for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) { 2584 log = kmap_atomic(i915_gem_object_get_page(log_obj, pg)); 2585 2586 for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4) 2587 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n", 2588 *(log + i), *(log + i + 1), 2589 *(log + i + 2), *(log + i + 3)); 2590 2591 kunmap_atomic(log); 2592 } 2593 2594 seq_putc(m, '\n'); 2595 2596 return 0; 2597 } 2598 2599 static int i915_edp_psr_status(struct seq_file *m, void *data) 2600 { 2601 struct drm_info_node *node = m->private; 2602 struct drm_device *dev = node->minor->dev; 2603 struct drm_i915_private *dev_priv = dev->dev_private; 2604 u32 psrperf = 0; 2605 u32 stat[3]; 2606 enum pipe pipe; 2607 bool enabled = false; 2608 2609 if (!HAS_PSR(dev)) { 2610 seq_puts(m, "PSR not supported\n"); 2611 return 0; 2612 } 2613 2614 intel_runtime_pm_get(dev_priv); 2615 2616 mutex_lock(&dev_priv->psr.lock); 2617 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 2618 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 2619 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); 2620 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); 2621 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", 2622 dev_priv->psr.busy_frontbuffer_bits); 2623 seq_printf(m, "Re-enable work scheduled: %s\n", 2624 yesno(work_busy(&dev_priv->psr.work.work))); 2625 2626 if (HAS_DDI(dev)) 2627 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 2628 else { 2629 for_each_pipe(dev_priv, pipe) { 2630 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & 2631 VLV_EDP_PSR_CURR_STATE_MASK; 2632 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2633 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2634 enabled = true; 2635 } 2636 } 2637 2638 seq_printf(m, "Main link in standby mode: %s\n", 2639 yesno(dev_priv->psr.link_standby)); 2640 2641 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); 2642 2643 if (!HAS_DDI(dev)) 2644 for_each_pipe(dev_priv, pipe) { 2645 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2646 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2647 seq_printf(m, " pipe %c", pipe_name(pipe)); 2648 } 2649 seq_puts(m, "\n"); 2650 2651 /* 2652 * VLV/CHV PSR has no kind of performance counter 2653 * SKL+ Perf counter is reset to 0 everytime DC state is entered 2654 */ 2655 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2656 psrperf = I915_READ(EDP_PSR_PERF_CNT) & 2657 EDP_PSR_PERF_CNT_MASK; 2658 2659 seq_printf(m, "Performance_Counter: %u\n", psrperf); 2660 } 2661 mutex_unlock(&dev_priv->psr.lock); 2662 2663 intel_runtime_pm_put(dev_priv); 2664 return 0; 2665 } 2666 2667 static int i915_sink_crc(struct seq_file *m, void *data) 2668 { 2669 struct drm_info_node *node = m->private; 2670 struct drm_device *dev = node->minor->dev; 2671 struct intel_encoder *encoder; 2672 struct intel_connector *connector; 2673 struct intel_dp *intel_dp = NULL; 2674 int ret; 2675 u8 crc[6]; 2676 2677 drm_modeset_lock_all(dev); 2678 for_each_intel_connector(dev, connector) { 2679 2680 if (connector->base.dpms != DRM_MODE_DPMS_ON) 2681 continue; 2682 2683 if (!connector->base.encoder) 2684 continue; 2685 2686 encoder = to_intel_encoder(connector->base.encoder); 2687 if (encoder->type != INTEL_OUTPUT_EDP) 2688 continue; 2689 2690 intel_dp = enc_to_intel_dp(&encoder->base); 2691 2692 ret = intel_dp_sink_crc(intel_dp, crc); 2693 if (ret) 2694 goto out; 2695 2696 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 2697 crc[0], crc[1], crc[2], 2698 crc[3], crc[4], crc[5]); 2699 goto out; 2700 } 2701 ret = -ENODEV; 2702 out: 2703 drm_modeset_unlock_all(dev); 2704 return ret; 2705 } 2706 2707 static int i915_energy_uJ(struct seq_file *m, void *data) 2708 { 2709 struct drm_info_node *node = m->private; 2710 struct drm_device *dev = node->minor->dev; 2711 struct drm_i915_private *dev_priv = dev->dev_private; 2712 u64 power; 2713 u32 units; 2714 2715 if (INTEL_INFO(dev)->gen < 6) 2716 return -ENODEV; 2717 2718 intel_runtime_pm_get(dev_priv); 2719 2720 rdmsrl(MSR_RAPL_POWER_UNIT, power); 2721 power = (power & 0x1f00) >> 8; 2722 units = 1000000 / (1 << power); /* convert to uJ */ 2723 power = I915_READ(MCH_SECP_NRG_STTS); 2724 power *= units; 2725 2726 intel_runtime_pm_put(dev_priv); 2727 2728 seq_printf(m, "%llu", (long long unsigned)power); 2729 2730 return 0; 2731 } 2732 2733 static int i915_runtime_pm_status(struct seq_file *m, void *unused) 2734 { 2735 struct drm_info_node *node = m->private; 2736 struct drm_device *dev = node->minor->dev; 2737 struct drm_i915_private *dev_priv = dev->dev_private; 2738 2739 if (!HAS_RUNTIME_PM(dev_priv)) 2740 seq_puts(m, "Runtime power management not supported\n"); 2741 2742 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); 2743 seq_printf(m, "IRQs disabled: %s\n", 2744 yesno(!intel_irqs_enabled(dev_priv))); 2745 #ifdef CONFIG_PM 2746 seq_printf(m, "Usage count: %d\n", 2747 atomic_read(&dev->dev->power.usage_count)); 2748 #else 2749 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 2750 #endif 2751 seq_printf(m, "PCI device power state: %s [%d]\n", 2752 pci_power_name(dev_priv->dev->pdev->current_state), 2753 dev_priv->dev->pdev->current_state); 2754 2755 return 0; 2756 } 2757 2758 static int i915_power_domain_info(struct seq_file *m, void *unused) 2759 { 2760 struct drm_info_node *node = m->private; 2761 struct drm_device *dev = node->minor->dev; 2762 struct drm_i915_private *dev_priv = dev->dev_private; 2763 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2764 int i; 2765 2766 mutex_lock(&power_domains->lock); 2767 2768 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2769 for (i = 0; i < power_domains->power_well_count; i++) { 2770 struct i915_power_well *power_well; 2771 enum intel_display_power_domain power_domain; 2772 2773 power_well = &power_domains->power_wells[i]; 2774 seq_printf(m, "%-25s %d\n", power_well->name, 2775 power_well->count); 2776 2777 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM; 2778 power_domain++) { 2779 if (!(BIT(power_domain) & power_well->domains)) 2780 continue; 2781 2782 seq_printf(m, " %-23s %d\n", 2783 intel_display_power_domain_str(power_domain), 2784 power_domains->domain_use_count[power_domain]); 2785 } 2786 } 2787 2788 mutex_unlock(&power_domains->lock); 2789 2790 return 0; 2791 } 2792 2793 static int i915_dmc_info(struct seq_file *m, void *unused) 2794 { 2795 struct drm_info_node *node = m->private; 2796 struct drm_device *dev = node->minor->dev; 2797 struct drm_i915_private *dev_priv = dev->dev_private; 2798 struct intel_csr *csr; 2799 2800 if (!HAS_CSR(dev)) { 2801 seq_puts(m, "not supported\n"); 2802 return 0; 2803 } 2804 2805 csr = &dev_priv->csr; 2806 2807 intel_runtime_pm_get(dev_priv); 2808 2809 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); 2810 seq_printf(m, "path: %s\n", csr->fw_path); 2811 2812 if (!csr->dmc_payload) 2813 goto out; 2814 2815 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), 2816 CSR_VERSION_MINOR(csr->version)); 2817 2818 if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) { 2819 seq_printf(m, "DC3 -> DC5 count: %d\n", 2820 I915_READ(SKL_CSR_DC3_DC5_COUNT)); 2821 seq_printf(m, "DC5 -> DC6 count: %d\n", 2822 I915_READ(SKL_CSR_DC5_DC6_COUNT)); 2823 } else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) { 2824 seq_printf(m, "DC3 -> DC5 count: %d\n", 2825 I915_READ(BXT_CSR_DC3_DC5_COUNT)); 2826 } 2827 2828 out: 2829 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); 2830 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); 2831 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); 2832 2833 intel_runtime_pm_put(dev_priv); 2834 2835 return 0; 2836 } 2837 2838 static void intel_seq_print_mode(struct seq_file *m, int tabs, 2839 struct drm_display_mode *mode) 2840 { 2841 int i; 2842 2843 for (i = 0; i < tabs; i++) 2844 seq_putc(m, '\t'); 2845 2846 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2847 mode->base.id, mode->name, 2848 mode->vrefresh, mode->clock, 2849 mode->hdisplay, mode->hsync_start, 2850 mode->hsync_end, mode->htotal, 2851 mode->vdisplay, mode->vsync_start, 2852 mode->vsync_end, mode->vtotal, 2853 mode->type, mode->flags); 2854 } 2855 2856 static void intel_encoder_info(struct seq_file *m, 2857 struct intel_crtc *intel_crtc, 2858 struct intel_encoder *intel_encoder) 2859 { 2860 struct drm_info_node *node = m->private; 2861 struct drm_device *dev = node->minor->dev; 2862 struct drm_crtc *crtc = &intel_crtc->base; 2863 struct intel_connector *intel_connector; 2864 struct drm_encoder *encoder; 2865 2866 encoder = &intel_encoder->base; 2867 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2868 encoder->base.id, encoder->name); 2869 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2870 struct drm_connector *connector = &intel_connector->base; 2871 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2872 connector->base.id, 2873 connector->name, 2874 drm_get_connector_status_name(connector->status)); 2875 if (connector->status == connector_status_connected) { 2876 struct drm_display_mode *mode = &crtc->mode; 2877 seq_printf(m, ", mode:\n"); 2878 intel_seq_print_mode(m, 2, mode); 2879 } else { 2880 seq_putc(m, '\n'); 2881 } 2882 } 2883 } 2884 2885 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2886 { 2887 struct drm_info_node *node = m->private; 2888 struct drm_device *dev = node->minor->dev; 2889 struct drm_crtc *crtc = &intel_crtc->base; 2890 struct intel_encoder *intel_encoder; 2891 struct drm_plane_state *plane_state = crtc->primary->state; 2892 struct drm_framebuffer *fb = plane_state->fb; 2893 2894 if (fb) 2895 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2896 fb->base.id, plane_state->src_x >> 16, 2897 plane_state->src_y >> 16, fb->width, fb->height); 2898 else 2899 seq_puts(m, "\tprimary plane disabled\n"); 2900 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2901 intel_encoder_info(m, intel_crtc, intel_encoder); 2902 } 2903 2904 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 2905 { 2906 struct drm_display_mode *mode = panel->fixed_mode; 2907 2908 seq_printf(m, "\tfixed mode:\n"); 2909 intel_seq_print_mode(m, 2, mode); 2910 } 2911 2912 static void intel_dp_info(struct seq_file *m, 2913 struct intel_connector *intel_connector) 2914 { 2915 struct intel_encoder *intel_encoder = intel_connector->encoder; 2916 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2917 2918 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2919 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); 2920 if (intel_encoder->type == INTEL_OUTPUT_EDP) 2921 intel_panel_info(m, &intel_connector->panel); 2922 } 2923 2924 static void intel_hdmi_info(struct seq_file *m, 2925 struct intel_connector *intel_connector) 2926 { 2927 struct intel_encoder *intel_encoder = intel_connector->encoder; 2928 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 2929 2930 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); 2931 } 2932 2933 static void intel_lvds_info(struct seq_file *m, 2934 struct intel_connector *intel_connector) 2935 { 2936 intel_panel_info(m, &intel_connector->panel); 2937 } 2938 2939 static void intel_connector_info(struct seq_file *m, 2940 struct drm_connector *connector) 2941 { 2942 struct intel_connector *intel_connector = to_intel_connector(connector); 2943 struct intel_encoder *intel_encoder = intel_connector->encoder; 2944 struct drm_display_mode *mode; 2945 2946 seq_printf(m, "connector %d: type %s, status: %s\n", 2947 connector->base.id, connector->name, 2948 drm_get_connector_status_name(connector->status)); 2949 if (connector->status == connector_status_connected) { 2950 seq_printf(m, "\tname: %s\n", connector->display_info.name); 2951 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 2952 connector->display_info.width_mm, 2953 connector->display_info.height_mm); 2954 seq_printf(m, "\tsubpixel order: %s\n", 2955 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 2956 seq_printf(m, "\tCEA rev: %d\n", 2957 connector->display_info.cea_rev); 2958 } 2959 if (intel_encoder) { 2960 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2961 intel_encoder->type == INTEL_OUTPUT_EDP) 2962 intel_dp_info(m, intel_connector); 2963 else if (intel_encoder->type == INTEL_OUTPUT_HDMI) 2964 intel_hdmi_info(m, intel_connector); 2965 else if (intel_encoder->type == INTEL_OUTPUT_LVDS) 2966 intel_lvds_info(m, intel_connector); 2967 } 2968 2969 seq_printf(m, "\tmodes:\n"); 2970 list_for_each_entry(mode, &connector->modes, head) 2971 intel_seq_print_mode(m, 2, mode); 2972 } 2973 2974 static bool cursor_active(struct drm_device *dev, int pipe) 2975 { 2976 struct drm_i915_private *dev_priv = dev->dev_private; 2977 u32 state; 2978 2979 if (IS_845G(dev) || IS_I865G(dev)) 2980 state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 2981 else 2982 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 2983 2984 return state; 2985 } 2986 2987 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) 2988 { 2989 struct drm_i915_private *dev_priv = dev->dev_private; 2990 u32 pos; 2991 2992 pos = I915_READ(CURPOS(pipe)); 2993 2994 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK; 2995 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT)) 2996 *x = -*x; 2997 2998 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK; 2999 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT)) 3000 *y = -*y; 3001 3002 return cursor_active(dev, pipe); 3003 } 3004 3005 static const char *plane_type(enum drm_plane_type type) 3006 { 3007 switch (type) { 3008 case DRM_PLANE_TYPE_OVERLAY: 3009 return "OVL"; 3010 case DRM_PLANE_TYPE_PRIMARY: 3011 return "PRI"; 3012 case DRM_PLANE_TYPE_CURSOR: 3013 return "CUR"; 3014 /* 3015 * Deliberately omitting default: to generate compiler warnings 3016 * when a new drm_plane_type gets added. 3017 */ 3018 } 3019 3020 return "unknown"; 3021 } 3022 3023 static const char *plane_rotation(unsigned int rotation) 3024 { 3025 static char buf[48]; 3026 /* 3027 * According to doc only one DRM_ROTATE_ is allowed but this 3028 * will print them all to visualize if the values are misused 3029 */ 3030 snprintf(buf, sizeof(buf), 3031 "%s%s%s%s%s%s(0x%08x)", 3032 (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "", 3033 (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "", 3034 (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "", 3035 (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "", 3036 (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "", 3037 (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "", 3038 rotation); 3039 3040 return buf; 3041 } 3042 3043 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3044 { 3045 struct drm_info_node *node = m->private; 3046 struct drm_device *dev = node->minor->dev; 3047 struct intel_plane *intel_plane; 3048 3049 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3050 struct drm_plane_state *state; 3051 struct drm_plane *plane = &intel_plane->base; 3052 3053 if (!plane->state) { 3054 seq_puts(m, "plane->state is NULL!\n"); 3055 continue; 3056 } 3057 3058 state = plane->state; 3059 3060 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n", 3061 plane->base.id, 3062 plane_type(intel_plane->base.type), 3063 state->crtc_x, state->crtc_y, 3064 state->crtc_w, state->crtc_h, 3065 (state->src_x >> 16), 3066 ((state->src_x & 0xffff) * 15625) >> 10, 3067 (state->src_y >> 16), 3068 ((state->src_y & 0xffff) * 15625) >> 10, 3069 (state->src_w >> 16), 3070 ((state->src_w & 0xffff) * 15625) >> 10, 3071 (state->src_h >> 16), 3072 ((state->src_h & 0xffff) * 15625) >> 10, 3073 state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A", 3074 plane_rotation(state->rotation)); 3075 } 3076 } 3077 3078 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3079 { 3080 struct intel_crtc_state *pipe_config; 3081 int num_scalers = intel_crtc->num_scalers; 3082 int i; 3083 3084 pipe_config = to_intel_crtc_state(intel_crtc->base.state); 3085 3086 /* Not all platformas have a scaler */ 3087 if (num_scalers) { 3088 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", 3089 num_scalers, 3090 pipe_config->scaler_state.scaler_users, 3091 pipe_config->scaler_state.scaler_id); 3092 3093 for (i = 0; i < SKL_NUM_SCALERS; i++) { 3094 struct intel_scaler *sc = 3095 &pipe_config->scaler_state.scalers[i]; 3096 3097 seq_printf(m, ", scalers[%d]: use=%s, mode=%x", 3098 i, yesno(sc->in_use), sc->mode); 3099 } 3100 seq_puts(m, "\n"); 3101 } else { 3102 seq_puts(m, "\tNo scalers available on this platform\n"); 3103 } 3104 } 3105 3106 static int i915_display_info(struct seq_file *m, void *unused) 3107 { 3108 struct drm_info_node *node = m->private; 3109 struct drm_device *dev = node->minor->dev; 3110 struct drm_i915_private *dev_priv = dev->dev_private; 3111 struct intel_crtc *crtc; 3112 struct drm_connector *connector; 3113 3114 intel_runtime_pm_get(dev_priv); 3115 drm_modeset_lock_all(dev); 3116 seq_printf(m, "CRTC info\n"); 3117 seq_printf(m, "---------\n"); 3118 for_each_intel_crtc(dev, crtc) { 3119 bool active; 3120 struct intel_crtc_state *pipe_config; 3121 int x, y; 3122 3123 pipe_config = to_intel_crtc_state(crtc->base.state); 3124 3125 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n", 3126 crtc->base.base.id, pipe_name(crtc->pipe), 3127 yesno(pipe_config->base.active), 3128 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 3129 yesno(pipe_config->dither), pipe_config->pipe_bpp); 3130 3131 if (pipe_config->base.active) { 3132 intel_crtc_info(m, crtc); 3133 3134 active = cursor_position(dev, crtc->pipe, &x, &y); 3135 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n", 3136 yesno(crtc->cursor_base), 3137 x, y, crtc->base.cursor->state->crtc_w, 3138 crtc->base.cursor->state->crtc_h, 3139 crtc->cursor_addr, yesno(active)); 3140 intel_scaler_info(m, crtc); 3141 intel_plane_info(m, crtc); 3142 } 3143 3144 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 3145 yesno(!crtc->cpu_fifo_underrun_disabled), 3146 yesno(!crtc->pch_fifo_underrun_disabled)); 3147 } 3148 3149 seq_printf(m, "\n"); 3150 seq_printf(m, "Connector info\n"); 3151 seq_printf(m, "--------------\n"); 3152 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 3153 intel_connector_info(m, connector); 3154 } 3155 drm_modeset_unlock_all(dev); 3156 intel_runtime_pm_put(dev_priv); 3157 3158 return 0; 3159 } 3160 3161 static int i915_semaphore_status(struct seq_file *m, void *unused) 3162 { 3163 struct drm_info_node *node = (struct drm_info_node *) m->private; 3164 struct drm_device *dev = node->minor->dev; 3165 struct drm_i915_private *dev_priv = dev->dev_private; 3166 struct intel_engine_cs *engine; 3167 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 3168 enum intel_engine_id id; 3169 int j, ret; 3170 3171 if (!i915_semaphore_is_enabled(dev)) { 3172 seq_puts(m, "Semaphores are disabled\n"); 3173 return 0; 3174 } 3175 3176 ret = mutex_lock_interruptible(&dev->struct_mutex); 3177 if (ret) 3178 return ret; 3179 intel_runtime_pm_get(dev_priv); 3180 3181 if (IS_BROADWELL(dev)) { 3182 struct page *page; 3183 uint64_t *seqno; 3184 3185 page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); 3186 3187 seqno = (uint64_t *)kmap_atomic(page); 3188 for_each_engine_id(engine, dev_priv, id) { 3189 uint64_t offset; 3190 3191 seq_printf(m, "%s\n", engine->name); 3192 3193 seq_puts(m, " Last signal:"); 3194 for (j = 0; j < num_rings; j++) { 3195 offset = id * I915_NUM_ENGINES + j; 3196 seq_printf(m, "0x%08llx (0x%02llx) ", 3197 seqno[offset], offset * 8); 3198 } 3199 seq_putc(m, '\n'); 3200 3201 seq_puts(m, " Last wait: "); 3202 for (j = 0; j < num_rings; j++) { 3203 offset = id + (j * I915_NUM_ENGINES); 3204 seq_printf(m, "0x%08llx (0x%02llx) ", 3205 seqno[offset], offset * 8); 3206 } 3207 seq_putc(m, '\n'); 3208 3209 } 3210 kunmap_atomic(seqno); 3211 } else { 3212 seq_puts(m, " Last signal:"); 3213 for_each_engine(engine, dev_priv) 3214 for (j = 0; j < num_rings; j++) 3215 seq_printf(m, "0x%08x\n", 3216 I915_READ(engine->semaphore.mbox.signal[j])); 3217 seq_putc(m, '\n'); 3218 } 3219 3220 seq_puts(m, "\nSync seqno:\n"); 3221 for_each_engine(engine, dev_priv) { 3222 for (j = 0; j < num_rings; j++) 3223 seq_printf(m, " 0x%08x ", 3224 engine->semaphore.sync_seqno[j]); 3225 seq_putc(m, '\n'); 3226 } 3227 seq_putc(m, '\n'); 3228 3229 intel_runtime_pm_put(dev_priv); 3230 mutex_unlock(&dev->struct_mutex); 3231 return 0; 3232 } 3233 3234 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 3235 { 3236 struct drm_info_node *node = (struct drm_info_node *) m->private; 3237 struct drm_device *dev = node->minor->dev; 3238 struct drm_i915_private *dev_priv = dev->dev_private; 3239 int i; 3240 3241 drm_modeset_lock_all(dev); 3242 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3243 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 3244 3245 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); 3246 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", 3247 pll->config.crtc_mask, pll->active_mask, yesno(pll->on)); 3248 seq_printf(m, " tracked hardware state:\n"); 3249 seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll); 3250 seq_printf(m, " dpll_md: 0x%08x\n", 3251 pll->config.hw_state.dpll_md); 3252 seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0); 3253 seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1); 3254 seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll); 3255 } 3256 drm_modeset_unlock_all(dev); 3257 3258 return 0; 3259 } 3260 3261 static int i915_wa_registers(struct seq_file *m, void *unused) 3262 { 3263 int i; 3264 int ret; 3265 struct intel_engine_cs *engine; 3266 struct drm_info_node *node = (struct drm_info_node *) m->private; 3267 struct drm_device *dev = node->minor->dev; 3268 struct drm_i915_private *dev_priv = dev->dev_private; 3269 struct i915_workarounds *workarounds = &dev_priv->workarounds; 3270 enum intel_engine_id id; 3271 3272 ret = mutex_lock_interruptible(&dev->struct_mutex); 3273 if (ret) 3274 return ret; 3275 3276 intel_runtime_pm_get(dev_priv); 3277 3278 seq_printf(m, "Workarounds applied: %d\n", workarounds->count); 3279 for_each_engine_id(engine, dev_priv, id) 3280 seq_printf(m, "HW whitelist count for %s: %d\n", 3281 engine->name, workarounds->hw_whitelist_count[id]); 3282 for (i = 0; i < workarounds->count; ++i) { 3283 i915_reg_t addr; 3284 u32 mask, value, read; 3285 bool ok; 3286 3287 addr = workarounds->reg[i].addr; 3288 mask = workarounds->reg[i].mask; 3289 value = workarounds->reg[i].value; 3290 read = I915_READ(addr); 3291 ok = (value & mask) == (read & mask); 3292 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", 3293 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL"); 3294 } 3295 3296 intel_runtime_pm_put(dev_priv); 3297 mutex_unlock(&dev->struct_mutex); 3298 3299 return 0; 3300 } 3301 3302 static int i915_ddb_info(struct seq_file *m, void *unused) 3303 { 3304 struct drm_info_node *node = m->private; 3305 struct drm_device *dev = node->minor->dev; 3306 struct drm_i915_private *dev_priv = dev->dev_private; 3307 struct skl_ddb_allocation *ddb; 3308 struct skl_ddb_entry *entry; 3309 enum pipe pipe; 3310 int plane; 3311 3312 if (INTEL_INFO(dev)->gen < 9) 3313 return 0; 3314 3315 drm_modeset_lock_all(dev); 3316 3317 ddb = &dev_priv->wm.skl_hw.ddb; 3318 3319 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 3320 3321 for_each_pipe(dev_priv, pipe) { 3322 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 3323 3324 for_each_plane(dev_priv, pipe, plane) { 3325 entry = &ddb->plane[pipe][plane]; 3326 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, 3327 entry->start, entry->end, 3328 skl_ddb_entry_size(entry)); 3329 } 3330 3331 entry = &ddb->plane[pipe][PLANE_CURSOR]; 3332 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 3333 entry->end, skl_ddb_entry_size(entry)); 3334 } 3335 3336 drm_modeset_unlock_all(dev); 3337 3338 return 0; 3339 } 3340 3341 static void drrs_status_per_crtc(struct seq_file *m, 3342 struct drm_device *dev, struct intel_crtc *intel_crtc) 3343 { 3344 struct intel_encoder *intel_encoder; 3345 struct drm_i915_private *dev_priv = dev->dev_private; 3346 struct i915_drrs *drrs = &dev_priv->drrs; 3347 int vrefresh = 0; 3348 3349 for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) { 3350 /* Encoder connected on this CRTC */ 3351 switch (intel_encoder->type) { 3352 case INTEL_OUTPUT_EDP: 3353 seq_puts(m, "eDP:\n"); 3354 break; 3355 case INTEL_OUTPUT_DSI: 3356 seq_puts(m, "DSI:\n"); 3357 break; 3358 case INTEL_OUTPUT_HDMI: 3359 seq_puts(m, "HDMI:\n"); 3360 break; 3361 case INTEL_OUTPUT_DISPLAYPORT: 3362 seq_puts(m, "DP:\n"); 3363 break; 3364 default: 3365 seq_printf(m, "Other encoder (id=%d).\n", 3366 intel_encoder->type); 3367 return; 3368 } 3369 } 3370 3371 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) 3372 seq_puts(m, "\tVBT: DRRS_type: Static"); 3373 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT) 3374 seq_puts(m, "\tVBT: DRRS_type: Seamless"); 3375 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED) 3376 seq_puts(m, "\tVBT: DRRS_type: None"); 3377 else 3378 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value"); 3379 3380 seq_puts(m, "\n\n"); 3381 3382 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { 3383 struct intel_panel *panel; 3384 3385 mutex_lock(&drrs->mutex); 3386 /* DRRS Supported */ 3387 seq_puts(m, "\tDRRS Supported: Yes\n"); 3388 3389 /* disable_drrs() will make drrs->dp NULL */ 3390 if (!drrs->dp) { 3391 seq_puts(m, "Idleness DRRS: Disabled"); 3392 mutex_unlock(&drrs->mutex); 3393 return; 3394 } 3395 3396 panel = &drrs->dp->attached_connector->panel; 3397 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", 3398 drrs->busy_frontbuffer_bits); 3399 3400 seq_puts(m, "\n\t\t"); 3401 if (drrs->refresh_rate_type == DRRS_HIGH_RR) { 3402 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); 3403 vrefresh = panel->fixed_mode->vrefresh; 3404 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { 3405 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); 3406 vrefresh = panel->downclock_mode->vrefresh; 3407 } else { 3408 seq_printf(m, "DRRS_State: Unknown(%d)\n", 3409 drrs->refresh_rate_type); 3410 mutex_unlock(&drrs->mutex); 3411 return; 3412 } 3413 seq_printf(m, "\t\tVrefresh: %d", vrefresh); 3414 3415 seq_puts(m, "\n\t\t"); 3416 mutex_unlock(&drrs->mutex); 3417 } else { 3418 /* DRRS not supported. Print the VBT parameter*/ 3419 seq_puts(m, "\tDRRS Supported : No"); 3420 } 3421 seq_puts(m, "\n"); 3422 } 3423 3424 static int i915_drrs_status(struct seq_file *m, void *unused) 3425 { 3426 struct drm_info_node *node = m->private; 3427 struct drm_device *dev = node->minor->dev; 3428 struct intel_crtc *intel_crtc; 3429 int active_crtc_cnt = 0; 3430 3431 for_each_intel_crtc(dev, intel_crtc) { 3432 drm_modeset_lock(&intel_crtc->base.mutex, NULL); 3433 3434 if (intel_crtc->base.state->active) { 3435 active_crtc_cnt++; 3436 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 3437 3438 drrs_status_per_crtc(m, dev, intel_crtc); 3439 } 3440 3441 drm_modeset_unlock(&intel_crtc->base.mutex); 3442 } 3443 3444 if (!active_crtc_cnt) 3445 seq_puts(m, "No active crtc found\n"); 3446 3447 return 0; 3448 } 3449 3450 struct pipe_crc_info { 3451 const char *name; 3452 struct drm_device *dev; 3453 enum pipe pipe; 3454 }; 3455 3456 static int i915_dp_mst_info(struct seq_file *m, void *unused) 3457 { 3458 struct drm_info_node *node = (struct drm_info_node *) m->private; 3459 struct drm_device *dev = node->minor->dev; 3460 struct drm_encoder *encoder; 3461 struct intel_encoder *intel_encoder; 3462 struct intel_digital_port *intel_dig_port; 3463 drm_modeset_lock_all(dev); 3464 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3465 intel_encoder = to_intel_encoder(encoder); 3466 if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT) 3467 continue; 3468 intel_dig_port = enc_to_dig_port(encoder); 3469 if (!intel_dig_port->dp.can_mst) 3470 continue; 3471 seq_printf(m, "MST Source Port %c\n", 3472 port_name(intel_dig_port->port)); 3473 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 3474 } 3475 drm_modeset_unlock_all(dev); 3476 return 0; 3477 } 3478 3479 static int i915_pipe_crc_open(struct inode *inode, struct file *filep) 3480 { 3481 struct pipe_crc_info *info = inode->i_private; 3482 struct drm_i915_private *dev_priv = info->dev->dev_private; 3483 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3484 3485 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes) 3486 return -ENODEV; 3487 3488 spin_lock_irq(&pipe_crc->lock); 3489 3490 if (pipe_crc->opened) { 3491 spin_unlock_irq(&pipe_crc->lock); 3492 return -EBUSY; /* already open */ 3493 } 3494 3495 pipe_crc->opened = true; 3496 filep->private_data = inode->i_private; 3497 3498 spin_unlock_irq(&pipe_crc->lock); 3499 3500 return 0; 3501 } 3502 3503 static int i915_pipe_crc_release(struct inode *inode, struct file *filep) 3504 { 3505 struct pipe_crc_info *info = inode->i_private; 3506 struct drm_i915_private *dev_priv = info->dev->dev_private; 3507 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3508 3509 spin_lock_irq(&pipe_crc->lock); 3510 pipe_crc->opened = false; 3511 spin_unlock_irq(&pipe_crc->lock); 3512 3513 return 0; 3514 } 3515 3516 /* (6 fields, 8 chars each, space separated (5) + '\n') */ 3517 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) 3518 /* account for \'0' */ 3519 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) 3520 3521 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) 3522 { 3523 assert_spin_locked(&pipe_crc->lock); 3524 return CIRC_CNT(pipe_crc->head, pipe_crc->tail, 3525 INTEL_PIPE_CRC_ENTRIES_NR); 3526 } 3527 3528 static ssize_t 3529 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, 3530 loff_t *pos) 3531 { 3532 struct pipe_crc_info *info = filep->private_data; 3533 struct drm_device *dev = info->dev; 3534 struct drm_i915_private *dev_priv = dev->dev_private; 3535 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3536 char buf[PIPE_CRC_BUFFER_LEN]; 3537 int n_entries; 3538 ssize_t bytes_read; 3539 3540 /* 3541 * Don't allow user space to provide buffers not big enough to hold 3542 * a line of data. 3543 */ 3544 if (count < PIPE_CRC_LINE_LEN) 3545 return -EINVAL; 3546 3547 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) 3548 return 0; 3549 3550 /* nothing to read */ 3551 spin_lock_irq(&pipe_crc->lock); 3552 while (pipe_crc_data_count(pipe_crc) == 0) { 3553 int ret; 3554 3555 if (filep->f_flags & O_NONBLOCK) { 3556 spin_unlock_irq(&pipe_crc->lock); 3557 return -EAGAIN; 3558 } 3559 3560 ret = wait_event_interruptible_lock_irq(pipe_crc->wq, 3561 pipe_crc_data_count(pipe_crc), pipe_crc->lock); 3562 if (ret) { 3563 spin_unlock_irq(&pipe_crc->lock); 3564 return ret; 3565 } 3566 } 3567 3568 /* We now have one or more entries to read */ 3569 n_entries = count / PIPE_CRC_LINE_LEN; 3570 3571 bytes_read = 0; 3572 while (n_entries > 0) { 3573 struct intel_pipe_crc_entry *entry = 3574 &pipe_crc->entries[pipe_crc->tail]; 3575 int ret; 3576 3577 if (CIRC_CNT(pipe_crc->head, pipe_crc->tail, 3578 INTEL_PIPE_CRC_ENTRIES_NR) < 1) 3579 break; 3580 3581 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); 3582 pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 3583 3584 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, 3585 "%8u %8x %8x %8x %8x %8x\n", 3586 entry->frame, entry->crc[0], 3587 entry->crc[1], entry->crc[2], 3588 entry->crc[3], entry->crc[4]); 3589 3590 spin_unlock_irq(&pipe_crc->lock); 3591 3592 ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN); 3593 if (ret == PIPE_CRC_LINE_LEN) 3594 return -EFAULT; 3595 3596 user_buf += PIPE_CRC_LINE_LEN; 3597 n_entries--; 3598 3599 spin_lock_irq(&pipe_crc->lock); 3600 } 3601 3602 spin_unlock_irq(&pipe_crc->lock); 3603 3604 return bytes_read; 3605 } 3606 3607 static const struct file_operations i915_pipe_crc_fops = { 3608 .owner = THIS_MODULE, 3609 .open = i915_pipe_crc_open, 3610 .read = i915_pipe_crc_read, 3611 .release = i915_pipe_crc_release, 3612 }; 3613 3614 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { 3615 { 3616 .name = "i915_pipe_A_crc", 3617 .pipe = PIPE_A, 3618 }, 3619 { 3620 .name = "i915_pipe_B_crc", 3621 .pipe = PIPE_B, 3622 }, 3623 { 3624 .name = "i915_pipe_C_crc", 3625 .pipe = PIPE_C, 3626 }, 3627 }; 3628 3629 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, 3630 enum pipe pipe) 3631 { 3632 struct drm_device *dev = minor->dev; 3633 struct dentry *ent; 3634 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; 3635 3636 info->dev = dev; 3637 ent = debugfs_create_file(info->name, S_IRUGO, root, info, 3638 &i915_pipe_crc_fops); 3639 if (!ent) 3640 return -ENOMEM; 3641 3642 return drm_add_fake_info_node(minor, ent, info); 3643 } 3644 3645 static const char * const pipe_crc_sources[] = { 3646 "none", 3647 "plane1", 3648 "plane2", 3649 "pf", 3650 "pipe", 3651 "TV", 3652 "DP-B", 3653 "DP-C", 3654 "DP-D", 3655 "auto", 3656 }; 3657 3658 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) 3659 { 3660 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); 3661 return pipe_crc_sources[source]; 3662 } 3663 3664 static int display_crc_ctl_show(struct seq_file *m, void *data) 3665 { 3666 struct drm_device *dev = m->private; 3667 struct drm_i915_private *dev_priv = dev->dev_private; 3668 int i; 3669 3670 for (i = 0; i < I915_MAX_PIPES; i++) 3671 seq_printf(m, "%c %s\n", pipe_name(i), 3672 pipe_crc_source_name(dev_priv->pipe_crc[i].source)); 3673 3674 return 0; 3675 } 3676 3677 static int display_crc_ctl_open(struct inode *inode, struct file *file) 3678 { 3679 struct drm_device *dev = inode->i_private; 3680 3681 return single_open(file, display_crc_ctl_show, dev); 3682 } 3683 3684 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 3685 uint32_t *val) 3686 { 3687 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3688 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3689 3690 switch (*source) { 3691 case INTEL_PIPE_CRC_SOURCE_PIPE: 3692 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; 3693 break; 3694 case INTEL_PIPE_CRC_SOURCE_NONE: 3695 *val = 0; 3696 break; 3697 default: 3698 return -EINVAL; 3699 } 3700 3701 return 0; 3702 } 3703 3704 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe, 3705 enum intel_pipe_crc_source *source) 3706 { 3707 struct intel_encoder *encoder; 3708 struct intel_crtc *crtc; 3709 struct intel_digital_port *dig_port; 3710 int ret = 0; 3711 3712 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3713 3714 drm_modeset_lock_all(dev); 3715 for_each_intel_encoder(dev, encoder) { 3716 if (!encoder->base.crtc) 3717 continue; 3718 3719 crtc = to_intel_crtc(encoder->base.crtc); 3720 3721 if (crtc->pipe != pipe) 3722 continue; 3723 3724 switch (encoder->type) { 3725 case INTEL_OUTPUT_TVOUT: 3726 *source = INTEL_PIPE_CRC_SOURCE_TV; 3727 break; 3728 case INTEL_OUTPUT_DISPLAYPORT: 3729 case INTEL_OUTPUT_EDP: 3730 dig_port = enc_to_dig_port(&encoder->base); 3731 switch (dig_port->port) { 3732 case PORT_B: 3733 *source = INTEL_PIPE_CRC_SOURCE_DP_B; 3734 break; 3735 case PORT_C: 3736 *source = INTEL_PIPE_CRC_SOURCE_DP_C; 3737 break; 3738 case PORT_D: 3739 *source = INTEL_PIPE_CRC_SOURCE_DP_D; 3740 break; 3741 default: 3742 WARN(1, "nonexisting DP port %c\n", 3743 port_name(dig_port->port)); 3744 break; 3745 } 3746 break; 3747 default: 3748 break; 3749 } 3750 } 3751 drm_modeset_unlock_all(dev); 3752 3753 return ret; 3754 } 3755 3756 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, 3757 enum pipe pipe, 3758 enum intel_pipe_crc_source *source, 3759 uint32_t *val) 3760 { 3761 struct drm_i915_private *dev_priv = dev->dev_private; 3762 bool need_stable_symbols = false; 3763 3764 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3765 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 3766 if (ret) 3767 return ret; 3768 } 3769 3770 switch (*source) { 3771 case INTEL_PIPE_CRC_SOURCE_PIPE: 3772 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; 3773 break; 3774 case INTEL_PIPE_CRC_SOURCE_DP_B: 3775 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; 3776 need_stable_symbols = true; 3777 break; 3778 case INTEL_PIPE_CRC_SOURCE_DP_C: 3779 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; 3780 need_stable_symbols = true; 3781 break; 3782 case INTEL_PIPE_CRC_SOURCE_DP_D: 3783 if (!IS_CHERRYVIEW(dev)) 3784 return -EINVAL; 3785 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV; 3786 need_stable_symbols = true; 3787 break; 3788 case INTEL_PIPE_CRC_SOURCE_NONE: 3789 *val = 0; 3790 break; 3791 default: 3792 return -EINVAL; 3793 } 3794 3795 /* 3796 * When the pipe CRC tap point is after the transcoders we need 3797 * to tweak symbol-level features to produce a deterministic series of 3798 * symbols for a given frame. We need to reset those features only once 3799 * a frame (instead of every nth symbol): 3800 * - DC-balance: used to ensure a better clock recovery from the data 3801 * link (SDVO) 3802 * - DisplayPort scrambling: used for EMI reduction 3803 */ 3804 if (need_stable_symbols) { 3805 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3806 3807 tmp |= DC_BALANCE_RESET_VLV; 3808 switch (pipe) { 3809 case PIPE_A: 3810 tmp |= PIPE_A_SCRAMBLE_RESET; 3811 break; 3812 case PIPE_B: 3813 tmp |= PIPE_B_SCRAMBLE_RESET; 3814 break; 3815 case PIPE_C: 3816 tmp |= PIPE_C_SCRAMBLE_RESET; 3817 break; 3818 default: 3819 return -EINVAL; 3820 } 3821 I915_WRITE(PORT_DFT2_G4X, tmp); 3822 } 3823 3824 return 0; 3825 } 3826 3827 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, 3828 enum pipe pipe, 3829 enum intel_pipe_crc_source *source, 3830 uint32_t *val) 3831 { 3832 struct drm_i915_private *dev_priv = dev->dev_private; 3833 bool need_stable_symbols = false; 3834 3835 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3836 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 3837 if (ret) 3838 return ret; 3839 } 3840 3841 switch (*source) { 3842 case INTEL_PIPE_CRC_SOURCE_PIPE: 3843 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; 3844 break; 3845 case INTEL_PIPE_CRC_SOURCE_TV: 3846 if (!SUPPORTS_TV(dev)) 3847 return -EINVAL; 3848 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; 3849 break; 3850 case INTEL_PIPE_CRC_SOURCE_DP_B: 3851 if (!IS_G4X(dev)) 3852 return -EINVAL; 3853 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; 3854 need_stable_symbols = true; 3855 break; 3856 case INTEL_PIPE_CRC_SOURCE_DP_C: 3857 if (!IS_G4X(dev)) 3858 return -EINVAL; 3859 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; 3860 need_stable_symbols = true; 3861 break; 3862 case INTEL_PIPE_CRC_SOURCE_DP_D: 3863 if (!IS_G4X(dev)) 3864 return -EINVAL; 3865 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; 3866 need_stable_symbols = true; 3867 break; 3868 case INTEL_PIPE_CRC_SOURCE_NONE: 3869 *val = 0; 3870 break; 3871 default: 3872 return -EINVAL; 3873 } 3874 3875 /* 3876 * When the pipe CRC tap point is after the transcoders we need 3877 * to tweak symbol-level features to produce a deterministic series of 3878 * symbols for a given frame. We need to reset those features only once 3879 * a frame (instead of every nth symbol): 3880 * - DC-balance: used to ensure a better clock recovery from the data 3881 * link (SDVO) 3882 * - DisplayPort scrambling: used for EMI reduction 3883 */ 3884 if (need_stable_symbols) { 3885 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3886 3887 WARN_ON(!IS_G4X(dev)); 3888 3889 I915_WRITE(PORT_DFT_I9XX, 3890 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); 3891 3892 if (pipe == PIPE_A) 3893 tmp |= PIPE_A_SCRAMBLE_RESET; 3894 else 3895 tmp |= PIPE_B_SCRAMBLE_RESET; 3896 3897 I915_WRITE(PORT_DFT2_G4X, tmp); 3898 } 3899 3900 return 0; 3901 } 3902 3903 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, 3904 enum pipe pipe) 3905 { 3906 struct drm_i915_private *dev_priv = dev->dev_private; 3907 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3908 3909 switch (pipe) { 3910 case PIPE_A: 3911 tmp &= ~PIPE_A_SCRAMBLE_RESET; 3912 break; 3913 case PIPE_B: 3914 tmp &= ~PIPE_B_SCRAMBLE_RESET; 3915 break; 3916 case PIPE_C: 3917 tmp &= ~PIPE_C_SCRAMBLE_RESET; 3918 break; 3919 default: 3920 return; 3921 } 3922 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) 3923 tmp &= ~DC_BALANCE_RESET_VLV; 3924 I915_WRITE(PORT_DFT2_G4X, tmp); 3925 3926 } 3927 3928 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, 3929 enum pipe pipe) 3930 { 3931 struct drm_i915_private *dev_priv = dev->dev_private; 3932 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3933 3934 if (pipe == PIPE_A) 3935 tmp &= ~PIPE_A_SCRAMBLE_RESET; 3936 else 3937 tmp &= ~PIPE_B_SCRAMBLE_RESET; 3938 I915_WRITE(PORT_DFT2_G4X, tmp); 3939 3940 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { 3941 I915_WRITE(PORT_DFT_I9XX, 3942 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); 3943 } 3944 } 3945 3946 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 3947 uint32_t *val) 3948 { 3949 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3950 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3951 3952 switch (*source) { 3953 case INTEL_PIPE_CRC_SOURCE_PLANE1: 3954 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; 3955 break; 3956 case INTEL_PIPE_CRC_SOURCE_PLANE2: 3957 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; 3958 break; 3959 case INTEL_PIPE_CRC_SOURCE_PIPE: 3960 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; 3961 break; 3962 case INTEL_PIPE_CRC_SOURCE_NONE: 3963 *val = 0; 3964 break; 3965 default: 3966 return -EINVAL; 3967 } 3968 3969 return 0; 3970 } 3971 3972 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable) 3973 { 3974 struct drm_i915_private *dev_priv = dev->dev_private; 3975 struct intel_crtc *crtc = 3976 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); 3977 struct intel_crtc_state *pipe_config; 3978 struct drm_atomic_state *state; 3979 int ret = 0; 3980 3981 drm_modeset_lock_all(dev); 3982 state = drm_atomic_state_alloc(dev); 3983 if (!state) { 3984 ret = -ENOMEM; 3985 goto out; 3986 } 3987 3988 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base); 3989 pipe_config = intel_atomic_get_crtc_state(state, crtc); 3990 if (IS_ERR(pipe_config)) { 3991 ret = PTR_ERR(pipe_config); 3992 goto out; 3993 } 3994 3995 pipe_config->pch_pfit.force_thru = enable; 3996 if (pipe_config->cpu_transcoder == TRANSCODER_EDP && 3997 pipe_config->pch_pfit.enabled != enable) 3998 pipe_config->base.connectors_changed = true; 3999 4000 ret = drm_atomic_commit(state); 4001 out: 4002 drm_modeset_unlock_all(dev); 4003 WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret); 4004 if (ret) 4005 drm_atomic_state_free(state); 4006 } 4007 4008 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev, 4009 enum pipe pipe, 4010 enum intel_pipe_crc_source *source, 4011 uint32_t *val) 4012 { 4013 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 4014 *source = INTEL_PIPE_CRC_SOURCE_PF; 4015 4016 switch (*source) { 4017 case INTEL_PIPE_CRC_SOURCE_PLANE1: 4018 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; 4019 break; 4020 case INTEL_PIPE_CRC_SOURCE_PLANE2: 4021 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; 4022 break; 4023 case INTEL_PIPE_CRC_SOURCE_PF: 4024 if (IS_HASWELL(dev) && pipe == PIPE_A) 4025 hsw_trans_edp_pipe_A_crc_wa(dev, true); 4026 4027 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; 4028 break; 4029 case INTEL_PIPE_CRC_SOURCE_NONE: 4030 *val = 0; 4031 break; 4032 default: 4033 return -EINVAL; 4034 } 4035 4036 return 0; 4037 } 4038 4039 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, 4040 enum intel_pipe_crc_source source) 4041 { 4042 struct drm_i915_private *dev_priv = dev->dev_private; 4043 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 4044 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, 4045 pipe)); 4046 enum intel_display_power_domain power_domain; 4047 u32 val = 0; /* shut up gcc */ 4048 int ret; 4049 4050 if (pipe_crc->source == source) 4051 return 0; 4052 4053 /* forbid changing the source without going back to 'none' */ 4054 if (pipe_crc->source && source) 4055 return -EINVAL; 4056 4057 power_domain = POWER_DOMAIN_PIPE(pipe); 4058 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) { 4059 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); 4060 return -EIO; 4061 } 4062 4063 if (IS_GEN2(dev)) 4064 ret = i8xx_pipe_crc_ctl_reg(&source, &val); 4065 else if (INTEL_INFO(dev)->gen < 5) 4066 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); 4067 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4068 ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val); 4069 else if (IS_GEN5(dev) || IS_GEN6(dev)) 4070 ret = ilk_pipe_crc_ctl_reg(&source, &val); 4071 else 4072 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val); 4073 4074 if (ret != 0) 4075 goto out; 4076 4077 /* none -> real source transition */ 4078 if (source) { 4079 struct intel_pipe_crc_entry *entries; 4080 4081 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", 4082 pipe_name(pipe), pipe_crc_source_name(source)); 4083 4084 entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, 4085 sizeof(pipe_crc->entries[0]), 4086 GFP_KERNEL); 4087 if (!entries) { 4088 ret = -ENOMEM; 4089 goto out; 4090 } 4091 4092 /* 4093 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 4094 * enabled and disabled dynamically based on package C states, 4095 * user space can't make reliable use of the CRCs, so let's just 4096 * completely disable it. 4097 */ 4098 hsw_disable_ips(crtc); 4099 4100 spin_lock_irq(&pipe_crc->lock); 4101 kfree(pipe_crc->entries); 4102 pipe_crc->entries = entries; 4103 pipe_crc->head = 0; 4104 pipe_crc->tail = 0; 4105 spin_unlock_irq(&pipe_crc->lock); 4106 } 4107 4108 pipe_crc->source = source; 4109 4110 I915_WRITE(PIPE_CRC_CTL(pipe), val); 4111 POSTING_READ(PIPE_CRC_CTL(pipe)); 4112 4113 /* real source -> none transition */ 4114 if (source == INTEL_PIPE_CRC_SOURCE_NONE) { 4115 struct intel_pipe_crc_entry *entries; 4116 struct intel_crtc *crtc = 4117 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 4118 4119 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", 4120 pipe_name(pipe)); 4121 4122 drm_modeset_lock(&crtc->base.mutex, NULL); 4123 if (crtc->base.state->active) 4124 intel_wait_for_vblank(dev, pipe); 4125 drm_modeset_unlock(&crtc->base.mutex); 4126 4127 spin_lock_irq(&pipe_crc->lock); 4128 entries = pipe_crc->entries; 4129 pipe_crc->entries = NULL; 4130 pipe_crc->head = 0; 4131 pipe_crc->tail = 0; 4132 spin_unlock_irq(&pipe_crc->lock); 4133 4134 kfree(entries); 4135 4136 if (IS_G4X(dev)) 4137 g4x_undo_pipe_scramble_reset(dev, pipe); 4138 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4139 vlv_undo_pipe_scramble_reset(dev, pipe); 4140 else if (IS_HASWELL(dev) && pipe == PIPE_A) 4141 hsw_trans_edp_pipe_A_crc_wa(dev, false); 4142 4143 hsw_enable_ips(crtc); 4144 } 4145 4146 ret = 0; 4147 4148 out: 4149 intel_display_power_put(dev_priv, power_domain); 4150 4151 return ret; 4152 } 4153 4154 /* 4155 * Parse pipe CRC command strings: 4156 * command: wsp* object wsp+ name wsp+ source wsp* 4157 * object: 'pipe' 4158 * name: (A | B | C) 4159 * source: (none | plane1 | plane2 | pf) 4160 * wsp: (#0x20 | #0x9 | #0xA)+ 4161 * 4162 * eg.: 4163 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A 4164 * "pipe A none" -> Stop CRC 4165 */ 4166 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) 4167 { 4168 int n_words = 0; 4169 4170 while (*buf) { 4171 char *end; 4172 4173 /* skip leading white space */ 4174 buf = skip_spaces(buf); 4175 if (!*buf) 4176 break; /* end of buffer */ 4177 4178 /* find end of word */ 4179 for (end = buf; *end && !isspace(*end); end++) 4180 ; 4181 4182 if (n_words == max_words) { 4183 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", 4184 max_words); 4185 return -EINVAL; /* ran out of words[] before bytes */ 4186 } 4187 4188 if (*end) 4189 *end++ = '\0'; 4190 words[n_words++] = buf; 4191 buf = end; 4192 } 4193 4194 return n_words; 4195 } 4196 4197 enum intel_pipe_crc_object { 4198 PIPE_CRC_OBJECT_PIPE, 4199 }; 4200 4201 static const char * const pipe_crc_objects[] = { 4202 "pipe", 4203 }; 4204 4205 static int 4206 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) 4207 { 4208 int i; 4209 4210 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) 4211 if (!strcmp(buf, pipe_crc_objects[i])) { 4212 *o = i; 4213 return 0; 4214 } 4215 4216 return -EINVAL; 4217 } 4218 4219 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) 4220 { 4221 const char name = buf[0]; 4222 4223 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) 4224 return -EINVAL; 4225 4226 *pipe = name - 'A'; 4227 4228 return 0; 4229 } 4230 4231 static int 4232 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) 4233 { 4234 int i; 4235 4236 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) 4237 if (!strcmp(buf, pipe_crc_sources[i])) { 4238 *s = i; 4239 return 0; 4240 } 4241 4242 return -EINVAL; 4243 } 4244 4245 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len) 4246 { 4247 #define N_WORDS 3 4248 int n_words; 4249 char *words[N_WORDS]; 4250 enum pipe pipe; 4251 enum intel_pipe_crc_object object; 4252 enum intel_pipe_crc_source source; 4253 4254 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); 4255 if (n_words != N_WORDS) { 4256 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", 4257 N_WORDS); 4258 return -EINVAL; 4259 } 4260 4261 if (display_crc_ctl_parse_object(words[0], &object) < 0) { 4262 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); 4263 return -EINVAL; 4264 } 4265 4266 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { 4267 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); 4268 return -EINVAL; 4269 } 4270 4271 if (display_crc_ctl_parse_source(words[2], &source) < 0) { 4272 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); 4273 return -EINVAL; 4274 } 4275 4276 return pipe_crc_set_source(dev, pipe, source); 4277 } 4278 4279 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, 4280 size_t len, loff_t *offp) 4281 { 4282 struct seq_file *m = file->private_data; 4283 struct drm_device *dev = m->private; 4284 char *tmpbuf; 4285 int ret; 4286 4287 if (len == 0) 4288 return 0; 4289 4290 if (len > PAGE_SIZE - 1) { 4291 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", 4292 PAGE_SIZE); 4293 return -E2BIG; 4294 } 4295 4296 tmpbuf = kmalloc(len + 1, GFP_KERNEL); 4297 if (!tmpbuf) 4298 return -ENOMEM; 4299 4300 if (copy_from_user(tmpbuf, ubuf, len)) { 4301 ret = -EFAULT; 4302 goto out; 4303 } 4304 tmpbuf[len] = '\0'; 4305 4306 ret = display_crc_ctl_parse(dev, tmpbuf, len); 4307 4308 out: 4309 kfree(tmpbuf); 4310 if (ret < 0) 4311 return ret; 4312 4313 *offp += len; 4314 return len; 4315 } 4316 4317 static const struct file_operations i915_display_crc_ctl_fops = { 4318 .owner = THIS_MODULE, 4319 .open = display_crc_ctl_open, 4320 .read = seq_read, 4321 .llseek = seq_lseek, 4322 .release = single_release, 4323 .write = display_crc_ctl_write 4324 }; 4325 4326 static ssize_t i915_displayport_test_active_write(struct file *file, 4327 const char __user *ubuf, 4328 size_t len, loff_t *offp) 4329 { 4330 char *input_buffer; 4331 int status = 0; 4332 struct drm_device *dev; 4333 struct drm_connector *connector; 4334 struct list_head *connector_list; 4335 struct intel_dp *intel_dp; 4336 int val = 0; 4337 4338 dev = ((struct seq_file *)file->private_data)->private; 4339 4340 connector_list = &dev->mode_config.connector_list; 4341 4342 if (len == 0) 4343 return 0; 4344 4345 input_buffer = kmalloc(len + 1, GFP_KERNEL); 4346 if (!input_buffer) 4347 return -ENOMEM; 4348 4349 if (copy_from_user(input_buffer, ubuf, len)) { 4350 status = -EFAULT; 4351 goto out; 4352 } 4353 4354 input_buffer[len] = '\0'; 4355 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); 4356 4357 list_for_each_entry(connector, connector_list, head) { 4358 4359 if (connector->connector_type != 4360 DRM_MODE_CONNECTOR_DisplayPort) 4361 continue; 4362 4363 if (connector->status == connector_status_connected && 4364 connector->encoder != NULL) { 4365 intel_dp = enc_to_intel_dp(connector->encoder); 4366 status = kstrtoint(input_buffer, 10, &val); 4367 if (status < 0) 4368 goto out; 4369 DRM_DEBUG_DRIVER("Got %d for test active\n", val); 4370 /* To prevent erroneous activation of the compliance 4371 * testing code, only accept an actual value of 1 here 4372 */ 4373 if (val == 1) 4374 intel_dp->compliance_test_active = 1; 4375 else 4376 intel_dp->compliance_test_active = 0; 4377 } 4378 } 4379 out: 4380 kfree(input_buffer); 4381 if (status < 0) 4382 return status; 4383 4384 *offp += len; 4385 return len; 4386 } 4387 4388 static int i915_displayport_test_active_show(struct seq_file *m, void *data) 4389 { 4390 struct drm_device *dev = m->private; 4391 struct drm_connector *connector; 4392 struct list_head *connector_list = &dev->mode_config.connector_list; 4393 struct intel_dp *intel_dp; 4394 4395 list_for_each_entry(connector, connector_list, head) { 4396 4397 if (connector->connector_type != 4398 DRM_MODE_CONNECTOR_DisplayPort) 4399 continue; 4400 4401 if (connector->status == connector_status_connected && 4402 connector->encoder != NULL) { 4403 intel_dp = enc_to_intel_dp(connector->encoder); 4404 if (intel_dp->compliance_test_active) 4405 seq_puts(m, "1"); 4406 else 4407 seq_puts(m, "0"); 4408 } else 4409 seq_puts(m, "0"); 4410 } 4411 4412 return 0; 4413 } 4414 4415 static int i915_displayport_test_active_open(struct inode *inode, 4416 struct file *file) 4417 { 4418 struct drm_device *dev = inode->i_private; 4419 4420 return single_open(file, i915_displayport_test_active_show, dev); 4421 } 4422 4423 static const struct file_operations i915_displayport_test_active_fops = { 4424 .owner = THIS_MODULE, 4425 .open = i915_displayport_test_active_open, 4426 .read = seq_read, 4427 .llseek = seq_lseek, 4428 .release = single_release, 4429 .write = i915_displayport_test_active_write 4430 }; 4431 4432 static int i915_displayport_test_data_show(struct seq_file *m, void *data) 4433 { 4434 struct drm_device *dev = m->private; 4435 struct drm_connector *connector; 4436 struct list_head *connector_list = &dev->mode_config.connector_list; 4437 struct intel_dp *intel_dp; 4438 4439 list_for_each_entry(connector, connector_list, head) { 4440 4441 if (connector->connector_type != 4442 DRM_MODE_CONNECTOR_DisplayPort) 4443 continue; 4444 4445 if (connector->status == connector_status_connected && 4446 connector->encoder != NULL) { 4447 intel_dp = enc_to_intel_dp(connector->encoder); 4448 seq_printf(m, "%lx", intel_dp->compliance_test_data); 4449 } else 4450 seq_puts(m, "0"); 4451 } 4452 4453 return 0; 4454 } 4455 static int i915_displayport_test_data_open(struct inode *inode, 4456 struct file *file) 4457 { 4458 struct drm_device *dev = inode->i_private; 4459 4460 return single_open(file, i915_displayport_test_data_show, dev); 4461 } 4462 4463 static const struct file_operations i915_displayport_test_data_fops = { 4464 .owner = THIS_MODULE, 4465 .open = i915_displayport_test_data_open, 4466 .read = seq_read, 4467 .llseek = seq_lseek, 4468 .release = single_release 4469 }; 4470 4471 static int i915_displayport_test_type_show(struct seq_file *m, void *data) 4472 { 4473 struct drm_device *dev = m->private; 4474 struct drm_connector *connector; 4475 struct list_head *connector_list = &dev->mode_config.connector_list; 4476 struct intel_dp *intel_dp; 4477 4478 list_for_each_entry(connector, connector_list, head) { 4479 4480 if (connector->connector_type != 4481 DRM_MODE_CONNECTOR_DisplayPort) 4482 continue; 4483 4484 if (connector->status == connector_status_connected && 4485 connector->encoder != NULL) { 4486 intel_dp = enc_to_intel_dp(connector->encoder); 4487 seq_printf(m, "%02lx", intel_dp->compliance_test_type); 4488 } else 4489 seq_puts(m, "0"); 4490 } 4491 4492 return 0; 4493 } 4494 4495 static int i915_displayport_test_type_open(struct inode *inode, 4496 struct file *file) 4497 { 4498 struct drm_device *dev = inode->i_private; 4499 4500 return single_open(file, i915_displayport_test_type_show, dev); 4501 } 4502 4503 static const struct file_operations i915_displayport_test_type_fops = { 4504 .owner = THIS_MODULE, 4505 .open = i915_displayport_test_type_open, 4506 .read = seq_read, 4507 .llseek = seq_lseek, 4508 .release = single_release 4509 }; 4510 4511 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) 4512 { 4513 struct drm_device *dev = m->private; 4514 int level; 4515 int num_levels; 4516 4517 if (IS_CHERRYVIEW(dev)) 4518 num_levels = 3; 4519 else if (IS_VALLEYVIEW(dev)) 4520 num_levels = 1; 4521 else 4522 num_levels = ilk_wm_max_level(dev) + 1; 4523 4524 drm_modeset_lock_all(dev); 4525 4526 for (level = 0; level < num_levels; level++) { 4527 unsigned int latency = wm[level]; 4528 4529 /* 4530 * - WM1+ latency values in 0.5us units 4531 * - latencies are in us on gen9/vlv/chv 4532 */ 4533 if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) || 4534 IS_CHERRYVIEW(dev)) 4535 latency *= 10; 4536 else if (level > 0) 4537 latency *= 5; 4538 4539 seq_printf(m, "WM%d %u (%u.%u usec)\n", 4540 level, wm[level], latency / 10, latency % 10); 4541 } 4542 4543 drm_modeset_unlock_all(dev); 4544 } 4545 4546 static int pri_wm_latency_show(struct seq_file *m, void *data) 4547 { 4548 struct drm_device *dev = m->private; 4549 struct drm_i915_private *dev_priv = dev->dev_private; 4550 const uint16_t *latencies; 4551 4552 if (INTEL_INFO(dev)->gen >= 9) 4553 latencies = dev_priv->wm.skl_latency; 4554 else 4555 latencies = to_i915(dev)->wm.pri_latency; 4556 4557 wm_latency_show(m, latencies); 4558 4559 return 0; 4560 } 4561 4562 static int spr_wm_latency_show(struct seq_file *m, void *data) 4563 { 4564 struct drm_device *dev = m->private; 4565 struct drm_i915_private *dev_priv = dev->dev_private; 4566 const uint16_t *latencies; 4567 4568 if (INTEL_INFO(dev)->gen >= 9) 4569 latencies = dev_priv->wm.skl_latency; 4570 else 4571 latencies = to_i915(dev)->wm.spr_latency; 4572 4573 wm_latency_show(m, latencies); 4574 4575 return 0; 4576 } 4577 4578 static int cur_wm_latency_show(struct seq_file *m, void *data) 4579 { 4580 struct drm_device *dev = m->private; 4581 struct drm_i915_private *dev_priv = dev->dev_private; 4582 const uint16_t *latencies; 4583 4584 if (INTEL_INFO(dev)->gen >= 9) 4585 latencies = dev_priv->wm.skl_latency; 4586 else 4587 latencies = to_i915(dev)->wm.cur_latency; 4588 4589 wm_latency_show(m, latencies); 4590 4591 return 0; 4592 } 4593 4594 static int pri_wm_latency_open(struct inode *inode, struct file *file) 4595 { 4596 struct drm_device *dev = inode->i_private; 4597 4598 if (INTEL_INFO(dev)->gen < 5) 4599 return -ENODEV; 4600 4601 return single_open(file, pri_wm_latency_show, dev); 4602 } 4603 4604 static int spr_wm_latency_open(struct inode *inode, struct file *file) 4605 { 4606 struct drm_device *dev = inode->i_private; 4607 4608 if (HAS_GMCH_DISPLAY(dev)) 4609 return -ENODEV; 4610 4611 return single_open(file, spr_wm_latency_show, dev); 4612 } 4613 4614 static int cur_wm_latency_open(struct inode *inode, struct file *file) 4615 { 4616 struct drm_device *dev = inode->i_private; 4617 4618 if (HAS_GMCH_DISPLAY(dev)) 4619 return -ENODEV; 4620 4621 return single_open(file, cur_wm_latency_show, dev); 4622 } 4623 4624 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 4625 size_t len, loff_t *offp, uint16_t wm[8]) 4626 { 4627 struct seq_file *m = file->private_data; 4628 struct drm_device *dev = m->private; 4629 uint16_t new[8] = { 0 }; 4630 int num_levels; 4631 int level; 4632 int ret; 4633 char tmp[32]; 4634 4635 if (IS_CHERRYVIEW(dev)) 4636 num_levels = 3; 4637 else if (IS_VALLEYVIEW(dev)) 4638 num_levels = 1; 4639 else 4640 num_levels = ilk_wm_max_level(dev) + 1; 4641 4642 if (len >= sizeof(tmp)) 4643 return -EINVAL; 4644 4645 if (copy_from_user(tmp, ubuf, len)) 4646 return -EFAULT; 4647 4648 tmp[len] = '\0'; 4649 4650 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 4651 &new[0], &new[1], &new[2], &new[3], 4652 &new[4], &new[5], &new[6], &new[7]); 4653 if (ret != num_levels) 4654 return -EINVAL; 4655 4656 drm_modeset_lock_all(dev); 4657 4658 for (level = 0; level < num_levels; level++) 4659 wm[level] = new[level]; 4660 4661 drm_modeset_unlock_all(dev); 4662 4663 return len; 4664 } 4665 4666 4667 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 4668 size_t len, loff_t *offp) 4669 { 4670 struct seq_file *m = file->private_data; 4671 struct drm_device *dev = m->private; 4672 struct drm_i915_private *dev_priv = dev->dev_private; 4673 uint16_t *latencies; 4674 4675 if (INTEL_INFO(dev)->gen >= 9) 4676 latencies = dev_priv->wm.skl_latency; 4677 else 4678 latencies = to_i915(dev)->wm.pri_latency; 4679 4680 return wm_latency_write(file, ubuf, len, offp, latencies); 4681 } 4682 4683 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 4684 size_t len, loff_t *offp) 4685 { 4686 struct seq_file *m = file->private_data; 4687 struct drm_device *dev = m->private; 4688 struct drm_i915_private *dev_priv = dev->dev_private; 4689 uint16_t *latencies; 4690 4691 if (INTEL_INFO(dev)->gen >= 9) 4692 latencies = dev_priv->wm.skl_latency; 4693 else 4694 latencies = to_i915(dev)->wm.spr_latency; 4695 4696 return wm_latency_write(file, ubuf, len, offp, latencies); 4697 } 4698 4699 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 4700 size_t len, loff_t *offp) 4701 { 4702 struct seq_file *m = file->private_data; 4703 struct drm_device *dev = m->private; 4704 struct drm_i915_private *dev_priv = dev->dev_private; 4705 uint16_t *latencies; 4706 4707 if (INTEL_INFO(dev)->gen >= 9) 4708 latencies = dev_priv->wm.skl_latency; 4709 else 4710 latencies = to_i915(dev)->wm.cur_latency; 4711 4712 return wm_latency_write(file, ubuf, len, offp, latencies); 4713 } 4714 4715 static const struct file_operations i915_pri_wm_latency_fops = { 4716 .owner = THIS_MODULE, 4717 .open = pri_wm_latency_open, 4718 .read = seq_read, 4719 .llseek = seq_lseek, 4720 .release = single_release, 4721 .write = pri_wm_latency_write 4722 }; 4723 4724 static const struct file_operations i915_spr_wm_latency_fops = { 4725 .owner = THIS_MODULE, 4726 .open = spr_wm_latency_open, 4727 .read = seq_read, 4728 .llseek = seq_lseek, 4729 .release = single_release, 4730 .write = spr_wm_latency_write 4731 }; 4732 4733 static const struct file_operations i915_cur_wm_latency_fops = { 4734 .owner = THIS_MODULE, 4735 .open = cur_wm_latency_open, 4736 .read = seq_read, 4737 .llseek = seq_lseek, 4738 .release = single_release, 4739 .write = cur_wm_latency_write 4740 }; 4741 4742 static int 4743 i915_wedged_get(void *data, u64 *val) 4744 { 4745 struct drm_device *dev = data; 4746 struct drm_i915_private *dev_priv = dev->dev_private; 4747 4748 *val = i915_terminally_wedged(&dev_priv->gpu_error); 4749 4750 return 0; 4751 } 4752 4753 static int 4754 i915_wedged_set(void *data, u64 val) 4755 { 4756 struct drm_device *dev = data; 4757 struct drm_i915_private *dev_priv = dev->dev_private; 4758 4759 /* 4760 * There is no safeguard against this debugfs entry colliding 4761 * with the hangcheck calling same i915_handle_error() in 4762 * parallel, causing an explosion. For now we assume that the 4763 * test harness is responsible enough not to inject gpu hangs 4764 * while it is writing to 'i915_wedged' 4765 */ 4766 4767 if (i915_reset_in_progress(&dev_priv->gpu_error)) 4768 return -EAGAIN; 4769 4770 intel_runtime_pm_get(dev_priv); 4771 4772 i915_handle_error(dev, val, 4773 "Manually setting wedged to %llu", val); 4774 4775 intel_runtime_pm_put(dev_priv); 4776 4777 return 0; 4778 } 4779 4780 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 4781 i915_wedged_get, i915_wedged_set, 4782 "%llu\n"); 4783 4784 static int 4785 i915_ring_stop_get(void *data, u64 *val) 4786 { 4787 struct drm_device *dev = data; 4788 struct drm_i915_private *dev_priv = dev->dev_private; 4789 4790 *val = dev_priv->gpu_error.stop_rings; 4791 4792 return 0; 4793 } 4794 4795 static int 4796 i915_ring_stop_set(void *data, u64 val) 4797 { 4798 struct drm_device *dev = data; 4799 struct drm_i915_private *dev_priv = dev->dev_private; 4800 int ret; 4801 4802 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); 4803 4804 ret = mutex_lock_interruptible(&dev->struct_mutex); 4805 if (ret) 4806 return ret; 4807 4808 dev_priv->gpu_error.stop_rings = val; 4809 mutex_unlock(&dev->struct_mutex); 4810 4811 return 0; 4812 } 4813 4814 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, 4815 i915_ring_stop_get, i915_ring_stop_set, 4816 "0x%08llx\n"); 4817 4818 static int 4819 i915_ring_missed_irq_get(void *data, u64 *val) 4820 { 4821 struct drm_device *dev = data; 4822 struct drm_i915_private *dev_priv = dev->dev_private; 4823 4824 *val = dev_priv->gpu_error.missed_irq_rings; 4825 return 0; 4826 } 4827 4828 static int 4829 i915_ring_missed_irq_set(void *data, u64 val) 4830 { 4831 struct drm_device *dev = data; 4832 struct drm_i915_private *dev_priv = dev->dev_private; 4833 int ret; 4834 4835 /* Lock against concurrent debugfs callers */ 4836 ret = mutex_lock_interruptible(&dev->struct_mutex); 4837 if (ret) 4838 return ret; 4839 dev_priv->gpu_error.missed_irq_rings = val; 4840 mutex_unlock(&dev->struct_mutex); 4841 4842 return 0; 4843 } 4844 4845 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 4846 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 4847 "0x%08llx\n"); 4848 4849 static int 4850 i915_ring_test_irq_get(void *data, u64 *val) 4851 { 4852 struct drm_device *dev = data; 4853 struct drm_i915_private *dev_priv = dev->dev_private; 4854 4855 *val = dev_priv->gpu_error.test_irq_rings; 4856 4857 return 0; 4858 } 4859 4860 static int 4861 i915_ring_test_irq_set(void *data, u64 val) 4862 { 4863 struct drm_device *dev = data; 4864 struct drm_i915_private *dev_priv = dev->dev_private; 4865 int ret; 4866 4867 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 4868 4869 /* Lock against concurrent debugfs callers */ 4870 ret = mutex_lock_interruptible(&dev->struct_mutex); 4871 if (ret) 4872 return ret; 4873 4874 dev_priv->gpu_error.test_irq_rings = val; 4875 mutex_unlock(&dev->struct_mutex); 4876 4877 return 0; 4878 } 4879 4880 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 4881 i915_ring_test_irq_get, i915_ring_test_irq_set, 4882 "0x%08llx\n"); 4883 4884 #define DROP_UNBOUND 0x1 4885 #define DROP_BOUND 0x2 4886 #define DROP_RETIRE 0x4 4887 #define DROP_ACTIVE 0x8 4888 #define DROP_ALL (DROP_UNBOUND | \ 4889 DROP_BOUND | \ 4890 DROP_RETIRE | \ 4891 DROP_ACTIVE) 4892 static int 4893 i915_drop_caches_get(void *data, u64 *val) 4894 { 4895 *val = DROP_ALL; 4896 4897 return 0; 4898 } 4899 4900 static int 4901 i915_drop_caches_set(void *data, u64 val) 4902 { 4903 struct drm_device *dev = data; 4904 struct drm_i915_private *dev_priv = dev->dev_private; 4905 int ret; 4906 4907 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 4908 4909 /* No need to check and wait for gpu resets, only libdrm auto-restarts 4910 * on ioctls on -EAGAIN. */ 4911 ret = mutex_lock_interruptible(&dev->struct_mutex); 4912 if (ret) 4913 return ret; 4914 4915 if (val & DROP_ACTIVE) { 4916 ret = i915_gpu_idle(dev); 4917 if (ret) 4918 goto unlock; 4919 } 4920 4921 if (val & (DROP_RETIRE | DROP_ACTIVE)) 4922 i915_gem_retire_requests(dev); 4923 4924 if (val & DROP_BOUND) 4925 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); 4926 4927 if (val & DROP_UNBOUND) 4928 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND); 4929 4930 unlock: 4931 mutex_unlock(&dev->struct_mutex); 4932 4933 return ret; 4934 } 4935 4936 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 4937 i915_drop_caches_get, i915_drop_caches_set, 4938 "0x%08llx\n"); 4939 4940 static int 4941 i915_max_freq_get(void *data, u64 *val) 4942 { 4943 struct drm_device *dev = data; 4944 struct drm_i915_private *dev_priv = dev->dev_private; 4945 int ret; 4946 4947 if (INTEL_INFO(dev)->gen < 6) 4948 return -ENODEV; 4949 4950 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4951 4952 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4953 if (ret) 4954 return ret; 4955 4956 *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); 4957 mutex_unlock(&dev_priv->rps.hw_lock); 4958 4959 return 0; 4960 } 4961 4962 static int 4963 i915_max_freq_set(void *data, u64 val) 4964 { 4965 struct drm_device *dev = data; 4966 struct drm_i915_private *dev_priv = dev->dev_private; 4967 u32 hw_max, hw_min; 4968 int ret; 4969 4970 if (INTEL_INFO(dev)->gen < 6) 4971 return -ENODEV; 4972 4973 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4974 4975 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 4976 4977 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4978 if (ret) 4979 return ret; 4980 4981 /* 4982 * Turbo will still be enabled, but won't go above the set value. 4983 */ 4984 val = intel_freq_opcode(dev_priv, val); 4985 4986 hw_max = dev_priv->rps.max_freq; 4987 hw_min = dev_priv->rps.min_freq; 4988 4989 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { 4990 mutex_unlock(&dev_priv->rps.hw_lock); 4991 return -EINVAL; 4992 } 4993 4994 dev_priv->rps.max_freq_softlimit = val; 4995 4996 intel_set_rps(dev, val); 4997 4998 mutex_unlock(&dev_priv->rps.hw_lock); 4999 5000 return 0; 5001 } 5002 5003 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 5004 i915_max_freq_get, i915_max_freq_set, 5005 "%llu\n"); 5006 5007 static int 5008 i915_min_freq_get(void *data, u64 *val) 5009 { 5010 struct drm_device *dev = data; 5011 struct drm_i915_private *dev_priv = dev->dev_private; 5012 int ret; 5013 5014 if (INTEL_INFO(dev)->gen < 6) 5015 return -ENODEV; 5016 5017 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 5018 5019 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 5020 if (ret) 5021 return ret; 5022 5023 *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); 5024 mutex_unlock(&dev_priv->rps.hw_lock); 5025 5026 return 0; 5027 } 5028 5029 static int 5030 i915_min_freq_set(void *data, u64 val) 5031 { 5032 struct drm_device *dev = data; 5033 struct drm_i915_private *dev_priv = dev->dev_private; 5034 u32 hw_max, hw_min; 5035 int ret; 5036 5037 if (INTEL_INFO(dev)->gen < 6) 5038 return -ENODEV; 5039 5040 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 5041 5042 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 5043 5044 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 5045 if (ret) 5046 return ret; 5047 5048 /* 5049 * Turbo will still be enabled, but won't go below the set value. 5050 */ 5051 val = intel_freq_opcode(dev_priv, val); 5052 5053 hw_max = dev_priv->rps.max_freq; 5054 hw_min = dev_priv->rps.min_freq; 5055 5056 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { 5057 mutex_unlock(&dev_priv->rps.hw_lock); 5058 return -EINVAL; 5059 } 5060 5061 dev_priv->rps.min_freq_softlimit = val; 5062 5063 intel_set_rps(dev, val); 5064 5065 mutex_unlock(&dev_priv->rps.hw_lock); 5066 5067 return 0; 5068 } 5069 5070 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 5071 i915_min_freq_get, i915_min_freq_set, 5072 "%llu\n"); 5073 5074 static int 5075 i915_cache_sharing_get(void *data, u64 *val) 5076 { 5077 struct drm_device *dev = data; 5078 struct drm_i915_private *dev_priv = dev->dev_private; 5079 u32 snpcr; 5080 int ret; 5081 5082 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 5083 return -ENODEV; 5084 5085 ret = mutex_lock_interruptible(&dev->struct_mutex); 5086 if (ret) 5087 return ret; 5088 intel_runtime_pm_get(dev_priv); 5089 5090 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 5091 5092 intel_runtime_pm_put(dev_priv); 5093 mutex_unlock(&dev_priv->dev->struct_mutex); 5094 5095 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 5096 5097 return 0; 5098 } 5099 5100 static int 5101 i915_cache_sharing_set(void *data, u64 val) 5102 { 5103 struct drm_device *dev = data; 5104 struct drm_i915_private *dev_priv = dev->dev_private; 5105 u32 snpcr; 5106 5107 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 5108 return -ENODEV; 5109 5110 if (val > 3) 5111 return -EINVAL; 5112 5113 intel_runtime_pm_get(dev_priv); 5114 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 5115 5116 /* Update the cache sharing policy here as well */ 5117 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 5118 snpcr &= ~GEN6_MBC_SNPCR_MASK; 5119 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 5120 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 5121 5122 intel_runtime_pm_put(dev_priv); 5123 return 0; 5124 } 5125 5126 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 5127 i915_cache_sharing_get, i915_cache_sharing_set, 5128 "%llu\n"); 5129 5130 struct sseu_dev_status { 5131 unsigned int slice_total; 5132 unsigned int subslice_total; 5133 unsigned int subslice_per_slice; 5134 unsigned int eu_total; 5135 unsigned int eu_per_subslice; 5136 }; 5137 5138 static void cherryview_sseu_device_status(struct drm_device *dev, 5139 struct sseu_dev_status *stat) 5140 { 5141 struct drm_i915_private *dev_priv = dev->dev_private; 5142 int ss_max = 2; 5143 int ss; 5144 u32 sig1[ss_max], sig2[ss_max]; 5145 5146 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); 5147 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); 5148 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); 5149 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); 5150 5151 for (ss = 0; ss < ss_max; ss++) { 5152 unsigned int eu_cnt; 5153 5154 if (sig1[ss] & CHV_SS_PG_ENABLE) 5155 /* skip disabled subslice */ 5156 continue; 5157 5158 stat->slice_total = 1; 5159 stat->subslice_per_slice++; 5160 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + 5161 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + 5162 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + 5163 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); 5164 stat->eu_total += eu_cnt; 5165 stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt); 5166 } 5167 stat->subslice_total = stat->subslice_per_slice; 5168 } 5169 5170 static void gen9_sseu_device_status(struct drm_device *dev, 5171 struct sseu_dev_status *stat) 5172 { 5173 struct drm_i915_private *dev_priv = dev->dev_private; 5174 int s_max = 3, ss_max = 4; 5175 int s, ss; 5176 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; 5177 5178 /* BXT has a single slice and at most 3 subslices. */ 5179 if (IS_BROXTON(dev)) { 5180 s_max = 1; 5181 ss_max = 3; 5182 } 5183 5184 for (s = 0; s < s_max; s++) { 5185 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s)); 5186 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s)); 5187 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s)); 5188 } 5189 5190 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | 5191 GEN9_PGCTL_SSA_EU19_ACK | 5192 GEN9_PGCTL_SSA_EU210_ACK | 5193 GEN9_PGCTL_SSA_EU311_ACK; 5194 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | 5195 GEN9_PGCTL_SSB_EU19_ACK | 5196 GEN9_PGCTL_SSB_EU210_ACK | 5197 GEN9_PGCTL_SSB_EU311_ACK; 5198 5199 for (s = 0; s < s_max; s++) { 5200 unsigned int ss_cnt = 0; 5201 5202 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) 5203 /* skip disabled slice */ 5204 continue; 5205 5206 stat->slice_total++; 5207 5208 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 5209 ss_cnt = INTEL_INFO(dev)->subslice_per_slice; 5210 5211 for (ss = 0; ss < ss_max; ss++) { 5212 unsigned int eu_cnt; 5213 5214 if (IS_BROXTON(dev) && 5215 !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) 5216 /* skip disabled subslice */ 5217 continue; 5218 5219 if (IS_BROXTON(dev)) 5220 ss_cnt++; 5221 5222 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & 5223 eu_mask[ss%2]); 5224 stat->eu_total += eu_cnt; 5225 stat->eu_per_subslice = max(stat->eu_per_subslice, 5226 eu_cnt); 5227 } 5228 5229 stat->subslice_total += ss_cnt; 5230 stat->subslice_per_slice = max(stat->subslice_per_slice, 5231 ss_cnt); 5232 } 5233 } 5234 5235 static void broadwell_sseu_device_status(struct drm_device *dev, 5236 struct sseu_dev_status *stat) 5237 { 5238 struct drm_i915_private *dev_priv = dev->dev_private; 5239 int s; 5240 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); 5241 5242 stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK); 5243 5244 if (stat->slice_total) { 5245 stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice; 5246 stat->subslice_total = stat->slice_total * 5247 stat->subslice_per_slice; 5248 stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice; 5249 stat->eu_total = stat->eu_per_subslice * stat->subslice_total; 5250 5251 /* subtract fused off EU(s) from enabled slice(s) */ 5252 for (s = 0; s < stat->slice_total; s++) { 5253 u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s]; 5254 5255 stat->eu_total -= hweight8(subslice_7eu); 5256 } 5257 } 5258 } 5259 5260 static int i915_sseu_status(struct seq_file *m, void *unused) 5261 { 5262 struct drm_info_node *node = (struct drm_info_node *) m->private; 5263 struct drm_device *dev = node->minor->dev; 5264 struct sseu_dev_status stat; 5265 5266 if (INTEL_INFO(dev)->gen < 8) 5267 return -ENODEV; 5268 5269 seq_puts(m, "SSEU Device Info\n"); 5270 seq_printf(m, " Available Slice Total: %u\n", 5271 INTEL_INFO(dev)->slice_total); 5272 seq_printf(m, " Available Subslice Total: %u\n", 5273 INTEL_INFO(dev)->subslice_total); 5274 seq_printf(m, " Available Subslice Per Slice: %u\n", 5275 INTEL_INFO(dev)->subslice_per_slice); 5276 seq_printf(m, " Available EU Total: %u\n", 5277 INTEL_INFO(dev)->eu_total); 5278 seq_printf(m, " Available EU Per Subslice: %u\n", 5279 INTEL_INFO(dev)->eu_per_subslice); 5280 seq_printf(m, " Has Slice Power Gating: %s\n", 5281 yesno(INTEL_INFO(dev)->has_slice_pg)); 5282 seq_printf(m, " Has Subslice Power Gating: %s\n", 5283 yesno(INTEL_INFO(dev)->has_subslice_pg)); 5284 seq_printf(m, " Has EU Power Gating: %s\n", 5285 yesno(INTEL_INFO(dev)->has_eu_pg)); 5286 5287 seq_puts(m, "SSEU Device Status\n"); 5288 memset(&stat, 0, sizeof(stat)); 5289 if (IS_CHERRYVIEW(dev)) { 5290 cherryview_sseu_device_status(dev, &stat); 5291 } else if (IS_BROADWELL(dev)) { 5292 broadwell_sseu_device_status(dev, &stat); 5293 } else if (INTEL_INFO(dev)->gen >= 9) { 5294 gen9_sseu_device_status(dev, &stat); 5295 } 5296 seq_printf(m, " Enabled Slice Total: %u\n", 5297 stat.slice_total); 5298 seq_printf(m, " Enabled Subslice Total: %u\n", 5299 stat.subslice_total); 5300 seq_printf(m, " Enabled Subslice Per Slice: %u\n", 5301 stat.subslice_per_slice); 5302 seq_printf(m, " Enabled EU Total: %u\n", 5303 stat.eu_total); 5304 seq_printf(m, " Enabled EU Per Subslice: %u\n", 5305 stat.eu_per_subslice); 5306 5307 return 0; 5308 } 5309 5310 static int i915_forcewake_open(struct inode *inode, struct file *file) 5311 { 5312 struct drm_device *dev = inode->i_private; 5313 struct drm_i915_private *dev_priv = dev->dev_private; 5314 5315 if (INTEL_INFO(dev)->gen < 6) 5316 return 0; 5317 5318 intel_runtime_pm_get(dev_priv); 5319 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5320 5321 return 0; 5322 } 5323 5324 static int i915_forcewake_release(struct inode *inode, struct file *file) 5325 { 5326 struct drm_device *dev = inode->i_private; 5327 struct drm_i915_private *dev_priv = dev->dev_private; 5328 5329 if (INTEL_INFO(dev)->gen < 6) 5330 return 0; 5331 5332 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5333 intel_runtime_pm_put(dev_priv); 5334 5335 return 0; 5336 } 5337 5338 static const struct file_operations i915_forcewake_fops = { 5339 .owner = THIS_MODULE, 5340 .open = i915_forcewake_open, 5341 .release = i915_forcewake_release, 5342 }; 5343 5344 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 5345 { 5346 struct drm_device *dev = minor->dev; 5347 struct dentry *ent; 5348 5349 ent = debugfs_create_file("i915_forcewake_user", 5350 S_IRUSR, 5351 root, dev, 5352 &i915_forcewake_fops); 5353 if (!ent) 5354 return -ENOMEM; 5355 5356 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 5357 } 5358 5359 static int i915_debugfs_create(struct dentry *root, 5360 struct drm_minor *minor, 5361 const char *name, 5362 const struct file_operations *fops) 5363 { 5364 struct drm_device *dev = minor->dev; 5365 struct dentry *ent; 5366 5367 ent = debugfs_create_file(name, 5368 S_IRUGO | S_IWUSR, 5369 root, dev, 5370 fops); 5371 if (!ent) 5372 return -ENOMEM; 5373 5374 return drm_add_fake_info_node(minor, ent, fops); 5375 } 5376 5377 static const struct drm_info_list i915_debugfs_list[] = { 5378 {"i915_capabilities", i915_capabilities, 0}, 5379 {"i915_gem_objects", i915_gem_object_info, 0}, 5380 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 5381 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 5382 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 5383 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 5384 {"i915_gem_stolen", i915_gem_stolen_list_info }, 5385 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 5386 {"i915_gem_request", i915_gem_request_info, 0}, 5387 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 5388 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 5389 {"i915_gem_interrupt", i915_interrupt_info, 0}, 5390 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 5391 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 5392 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 5393 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 5394 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, 5395 {"i915_guc_info", i915_guc_info, 0}, 5396 {"i915_guc_load_status", i915_guc_load_status_info, 0}, 5397 {"i915_guc_log_dump", i915_guc_log_dump, 0}, 5398 {"i915_frequency_info", i915_frequency_info, 0}, 5399 {"i915_hangcheck_info", i915_hangcheck_info, 0}, 5400 {"i915_drpc_info", i915_drpc_info, 0}, 5401 {"i915_emon_status", i915_emon_status, 0}, 5402 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 5403 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 5404 {"i915_fbc_status", i915_fbc_status, 0}, 5405 {"i915_ips_status", i915_ips_status, 0}, 5406 {"i915_sr_status", i915_sr_status, 0}, 5407 {"i915_opregion", i915_opregion, 0}, 5408 {"i915_vbt", i915_vbt, 0}, 5409 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 5410 {"i915_context_status", i915_context_status, 0}, 5411 {"i915_dump_lrc", i915_dump_lrc, 0}, 5412 {"i915_execlists", i915_execlists, 0}, 5413 {"i915_forcewake_domains", i915_forcewake_domains, 0}, 5414 {"i915_swizzle_info", i915_swizzle_info, 0}, 5415 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 5416 {"i915_llc", i915_llc, 0}, 5417 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 5418 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 5419 {"i915_energy_uJ", i915_energy_uJ, 0}, 5420 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 5421 {"i915_power_domain_info", i915_power_domain_info, 0}, 5422 {"i915_dmc_info", i915_dmc_info, 0}, 5423 {"i915_display_info", i915_display_info, 0}, 5424 {"i915_semaphore_status", i915_semaphore_status, 0}, 5425 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 5426 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 5427 {"i915_wa_registers", i915_wa_registers, 0}, 5428 {"i915_ddb_info", i915_ddb_info, 0}, 5429 {"i915_sseu_status", i915_sseu_status, 0}, 5430 {"i915_drrs_status", i915_drrs_status, 0}, 5431 {"i915_rps_boost_info", i915_rps_boost_info, 0}, 5432 }; 5433 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 5434 5435 static const struct i915_debugfs_files { 5436 const char *name; 5437 const struct file_operations *fops; 5438 } i915_debugfs_files[] = { 5439 {"i915_wedged", &i915_wedged_fops}, 5440 {"i915_max_freq", &i915_max_freq_fops}, 5441 {"i915_min_freq", &i915_min_freq_fops}, 5442 {"i915_cache_sharing", &i915_cache_sharing_fops}, 5443 {"i915_ring_stop", &i915_ring_stop_fops}, 5444 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 5445 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 5446 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 5447 {"i915_error_state", &i915_error_state_fops}, 5448 {"i915_next_seqno", &i915_next_seqno_fops}, 5449 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 5450 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 5451 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 5452 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 5453 {"i915_fbc_false_color", &i915_fbc_fc_fops}, 5454 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 5455 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 5456 {"i915_dp_test_active", &i915_displayport_test_active_fops} 5457 }; 5458 5459 void intel_display_crc_init(struct drm_device *dev) 5460 { 5461 struct drm_i915_private *dev_priv = dev->dev_private; 5462 enum pipe pipe; 5463 5464 for_each_pipe(dev_priv, pipe) { 5465 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 5466 5467 pipe_crc->opened = false; 5468 spin_lock_init(&pipe_crc->lock); 5469 init_waitqueue_head(&pipe_crc->wq); 5470 } 5471 } 5472 5473 int i915_debugfs_init(struct drm_minor *minor) 5474 { 5475 int ret, i; 5476 5477 ret = i915_forcewake_create(minor->debugfs_root, minor); 5478 if (ret) 5479 return ret; 5480 5481 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 5482 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); 5483 if (ret) 5484 return ret; 5485 } 5486 5487 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 5488 ret = i915_debugfs_create(minor->debugfs_root, minor, 5489 i915_debugfs_files[i].name, 5490 i915_debugfs_files[i].fops); 5491 if (ret) 5492 return ret; 5493 } 5494 5495 return drm_debugfs_create_files(i915_debugfs_list, 5496 I915_DEBUGFS_ENTRIES, 5497 minor->debugfs_root, minor); 5498 } 5499 5500 void i915_debugfs_cleanup(struct drm_minor *minor) 5501 { 5502 int i; 5503 5504 drm_debugfs_remove_files(i915_debugfs_list, 5505 I915_DEBUGFS_ENTRIES, minor); 5506 5507 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 5508 1, minor); 5509 5510 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 5511 struct drm_info_list *info_list = 5512 (struct drm_info_list *)&i915_pipe_crc_data[i]; 5513 5514 drm_debugfs_remove_files(info_list, 1, minor); 5515 } 5516 5517 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 5518 struct drm_info_list *info_list = 5519 (struct drm_info_list *) i915_debugfs_files[i].fops; 5520 5521 drm_debugfs_remove_files(info_list, 1, minor); 5522 } 5523 } 5524 5525 struct dpcd_block { 5526 /* DPCD dump start address. */ 5527 unsigned int offset; 5528 /* DPCD dump end address, inclusive. If unset, .size will be used. */ 5529 unsigned int end; 5530 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ 5531 size_t size; 5532 /* Only valid for eDP. */ 5533 bool edp; 5534 }; 5535 5536 static const struct dpcd_block i915_dpcd_debug[] = { 5537 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, 5538 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, 5539 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, 5540 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, 5541 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, 5542 { .offset = DP_SET_POWER }, 5543 { .offset = DP_EDP_DPCD_REV }, 5544 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, 5545 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, 5546 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, 5547 }; 5548 5549 static int i915_dpcd_show(struct seq_file *m, void *data) 5550 { 5551 struct drm_connector *connector = m->private; 5552 struct intel_dp *intel_dp = 5553 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 5554 uint8_t buf[16]; 5555 ssize_t err; 5556 int i; 5557 5558 if (connector->status != connector_status_connected) 5559 return -ENODEV; 5560 5561 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) { 5562 const struct dpcd_block *b = &i915_dpcd_debug[i]; 5563 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); 5564 5565 if (b->edp && 5566 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 5567 continue; 5568 5569 /* low tech for now */ 5570 if (WARN_ON(size > sizeof(buf))) 5571 continue; 5572 5573 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); 5574 if (err <= 0) { 5575 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", 5576 size, b->offset, err); 5577 continue; 5578 } 5579 5580 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); 5581 } 5582 5583 return 0; 5584 } 5585 5586 static int i915_dpcd_open(struct inode *inode, struct file *file) 5587 { 5588 return single_open(file, i915_dpcd_show, inode->i_private); 5589 } 5590 5591 static const struct file_operations i915_dpcd_fops = { 5592 .owner = THIS_MODULE, 5593 .open = i915_dpcd_open, 5594 .read = seq_read, 5595 .llseek = seq_lseek, 5596 .release = single_release, 5597 }; 5598 5599 /** 5600 * i915_debugfs_connector_add - add i915 specific connector debugfs files 5601 * @connector: pointer to a registered drm_connector 5602 * 5603 * Cleanup will be done by drm_connector_unregister() through a call to 5604 * drm_debugfs_connector_remove(). 5605 * 5606 * Returns 0 on success, negative error codes on error. 5607 */ 5608 int i915_debugfs_connector_add(struct drm_connector *connector) 5609 { 5610 struct dentry *root = connector->debugfs_entry; 5611 5612 /* The connector must have been registered beforehands. */ 5613 if (!root) 5614 return -ENODEV; 5615 5616 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 5617 connector->connector_type == DRM_MODE_CONNECTOR_eDP) 5618 debugfs_create_file("i915_dpcd", S_IRUGO, root, connector, 5619 &i915_dpcd_fops); 5620 5621 return 0; 5622 } 5623