1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 8 #include <drm/drm_debugfs.h> 9 #include <drm/drm_fourcc.h> 10 11 #include "i915_debugfs.h" 12 #include "intel_de.h" 13 #include "intel_display_debugfs.h" 14 #include "intel_display_power.h" 15 #include "intel_display_power_well.h" 16 #include "intel_display_types.h" 17 #include "intel_dmc.h" 18 #include "intel_dp.h" 19 #include "intel_dp_mst.h" 20 #include "intel_drrs.h" 21 #include "intel_fbc.h" 22 #include "intel_fbdev.h" 23 #include "intel_hdcp.h" 24 #include "intel_hdmi.h" 25 #include "intel_panel.h" 26 #include "intel_pm.h" 27 #include "intel_psr.h" 28 #include "intel_sprite.h" 29 30 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 31 { 32 return to_i915(node->minor->dev); 33 } 34 35 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) 36 { 37 struct drm_i915_private *dev_priv = node_to_i915(m->private); 38 39 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 40 dev_priv->fb_tracking.busy_bits); 41 42 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 43 dev_priv->fb_tracking.flip_bits); 44 45 return 0; 46 } 47 48 static int i915_ips_status(struct seq_file *m, void *unused) 49 { 50 struct drm_i915_private *dev_priv = node_to_i915(m->private); 51 intel_wakeref_t wakeref; 52 53 if (!HAS_IPS(dev_priv)) 54 return -ENODEV; 55 56 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 57 58 seq_printf(m, "Enabled by kernel parameter: %s\n", 59 str_yes_no(dev_priv->params.enable_ips)); 60 61 if (DISPLAY_VER(dev_priv) >= 8) { 62 seq_puts(m, "Currently: unknown\n"); 63 } else { 64 if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE) 65 seq_puts(m, "Currently: enabled\n"); 66 else 67 seq_puts(m, "Currently: disabled\n"); 68 } 69 70 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 71 72 return 0; 73 } 74 75 static int i915_sr_status(struct seq_file *m, void *unused) 76 { 77 struct drm_i915_private *dev_priv = node_to_i915(m->private); 78 intel_wakeref_t wakeref; 79 bool sr_enabled = false; 80 81 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 82 83 if (DISPLAY_VER(dev_priv) >= 9) 84 /* no global SR status; inspect per-plane WM */; 85 else if (HAS_PCH_SPLIT(dev_priv)) 86 sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM_LP_ENABLE; 87 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || 88 IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 89 sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN; 90 else if (IS_I915GM(dev_priv)) 91 sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN; 92 else if (IS_PINEVIEW(dev_priv)) 93 sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 94 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 95 sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 96 97 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 98 99 seq_printf(m, "self-refresh: %s\n", str_enabled_disabled(sr_enabled)); 100 101 return 0; 102 } 103 104 static int i915_opregion(struct seq_file *m, void *unused) 105 { 106 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; 107 108 if (opregion->header) 109 seq_write(m, opregion->header, OPREGION_SIZE); 110 111 return 0; 112 } 113 114 static int i915_vbt(struct seq_file *m, void *unused) 115 { 116 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; 117 118 if (opregion->vbt) 119 seq_write(m, opregion->vbt, opregion->vbt_size); 120 121 return 0; 122 } 123 124 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 125 { 126 struct drm_i915_private *dev_priv = node_to_i915(m->private); 127 struct drm_device *dev = &dev_priv->drm; 128 struct intel_framebuffer *fbdev_fb = NULL; 129 struct drm_framebuffer *drm_fb; 130 131 #ifdef CONFIG_DRM_FBDEV_EMULATION 132 fbdev_fb = intel_fbdev_framebuffer(dev_priv->fbdev); 133 if (fbdev_fb) { 134 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 135 fbdev_fb->base.width, 136 fbdev_fb->base.height, 137 fbdev_fb->base.format->depth, 138 fbdev_fb->base.format->cpp[0] * 8, 139 fbdev_fb->base.modifier, 140 drm_framebuffer_read_refcount(&fbdev_fb->base)); 141 i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base)); 142 seq_putc(m, '\n'); 143 } 144 #endif 145 146 mutex_lock(&dev->mode_config.fb_lock); 147 drm_for_each_fb(drm_fb, dev) { 148 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 149 if (fb == fbdev_fb) 150 continue; 151 152 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 153 fb->base.width, 154 fb->base.height, 155 fb->base.format->depth, 156 fb->base.format->cpp[0] * 8, 157 fb->base.modifier, 158 drm_framebuffer_read_refcount(&fb->base)); 159 i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base)); 160 seq_putc(m, '\n'); 161 } 162 mutex_unlock(&dev->mode_config.fb_lock); 163 164 return 0; 165 } 166 167 static int i915_psr_sink_status_show(struct seq_file *m, void *data) 168 { 169 u8 val; 170 static const char * const sink_status[] = { 171 "inactive", 172 "transition to active, capture and display", 173 "active, display from RFB", 174 "active, capture and display on sink device timings", 175 "transition to inactive, capture and display, timing re-sync", 176 "reserved", 177 "reserved", 178 "sink internal error", 179 }; 180 struct drm_connector *connector = m->private; 181 struct intel_dp *intel_dp = 182 intel_attached_dp(to_intel_connector(connector)); 183 int ret; 184 185 if (!CAN_PSR(intel_dp)) { 186 seq_puts(m, "PSR Unsupported\n"); 187 return -ENODEV; 188 } 189 190 if (connector->status != connector_status_connected) 191 return -ENODEV; 192 193 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val); 194 195 if (ret == 1) { 196 const char *str = "unknown"; 197 198 val &= DP_PSR_SINK_STATE_MASK; 199 if (val < ARRAY_SIZE(sink_status)) 200 str = sink_status[val]; 201 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); 202 } else { 203 return ret; 204 } 205 206 return 0; 207 } 208 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); 209 210 static void 211 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) 212 { 213 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 214 const char *status = "unknown"; 215 u32 val, status_val; 216 217 if (intel_dp->psr.psr2_enabled) { 218 static const char * const live_status[] = { 219 "IDLE", 220 "CAPTURE", 221 "CAPTURE_FS", 222 "SLEEP", 223 "BUFON_FW", 224 "ML_UP", 225 "SU_STANDBY", 226 "FAST_SLEEP", 227 "DEEP_SLEEP", 228 "BUF_ON", 229 "TG_ON" 230 }; 231 val = intel_de_read(dev_priv, 232 EDP_PSR2_STATUS(intel_dp->psr.transcoder)); 233 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val); 234 if (status_val < ARRAY_SIZE(live_status)) 235 status = live_status[status_val]; 236 } else { 237 static const char * const live_status[] = { 238 "IDLE", 239 "SRDONACK", 240 "SRDENT", 241 "BUFOFF", 242 "BUFON", 243 "AUXACK", 244 "SRDOFFACK", 245 "SRDENT_ON", 246 }; 247 val = intel_de_read(dev_priv, 248 EDP_PSR_STATUS(intel_dp->psr.transcoder)); 249 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> 250 EDP_PSR_STATUS_STATE_SHIFT; 251 if (status_val < ARRAY_SIZE(live_status)) 252 status = live_status[status_val]; 253 } 254 255 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); 256 } 257 258 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) 259 { 260 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 261 struct intel_psr *psr = &intel_dp->psr; 262 intel_wakeref_t wakeref; 263 const char *status; 264 bool enabled; 265 u32 val; 266 267 seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support)); 268 if (psr->sink_support) 269 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]); 270 seq_puts(m, "\n"); 271 272 if (!psr->sink_support) 273 return 0; 274 275 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 276 mutex_lock(&psr->lock); 277 278 if (psr->enabled) 279 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; 280 else 281 status = "disabled"; 282 seq_printf(m, "PSR mode: %s\n", status); 283 284 if (!psr->enabled) { 285 seq_printf(m, "PSR sink not reliable: %s\n", 286 str_yes_no(psr->sink_not_reliable)); 287 288 goto unlock; 289 } 290 291 if (psr->psr2_enabled) { 292 val = intel_de_read(dev_priv, 293 EDP_PSR2_CTL(intel_dp->psr.transcoder)); 294 enabled = val & EDP_PSR2_ENABLE; 295 } else { 296 val = intel_de_read(dev_priv, 297 EDP_PSR_CTL(intel_dp->psr.transcoder)); 298 enabled = val & EDP_PSR_ENABLE; 299 } 300 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", 301 str_enabled_disabled(enabled), val); 302 psr_source_status(intel_dp, m); 303 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", 304 psr->busy_frontbuffer_bits); 305 306 /* 307 * SKL+ Perf counter is reset to 0 everytime DC state is entered 308 */ 309 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 310 val = intel_de_read(dev_priv, 311 EDP_PSR_PERF_CNT(intel_dp->psr.transcoder)); 312 val &= EDP_PSR_PERF_CNT_MASK; 313 seq_printf(m, "Performance counter: %u\n", val); 314 } 315 316 if (psr->debug & I915_PSR_DEBUG_IRQ) { 317 seq_printf(m, "Last attempted entry at: %lld\n", 318 psr->last_entry_attempt); 319 seq_printf(m, "Last exit at: %lld\n", psr->last_exit); 320 } 321 322 if (psr->psr2_enabled) { 323 u32 su_frames_val[3]; 324 int frame; 325 326 /* 327 * Reading all 3 registers before hand to minimize crossing a 328 * frame boundary between register reads 329 */ 330 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { 331 val = intel_de_read(dev_priv, 332 PSR2_SU_STATUS(intel_dp->psr.transcoder, frame)); 333 su_frames_val[frame / 3] = val; 334 } 335 336 seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); 337 338 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { 339 u32 su_blocks; 340 341 su_blocks = su_frames_val[frame / 3] & 342 PSR2_SU_STATUS_MASK(frame); 343 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); 344 seq_printf(m, "%d\t%d\n", frame, su_blocks); 345 } 346 347 seq_printf(m, "PSR2 selective fetch: %s\n", 348 str_enabled_disabled(psr->psr2_sel_fetch_enabled)); 349 } 350 351 unlock: 352 mutex_unlock(&psr->lock); 353 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 354 355 return 0; 356 } 357 358 static int i915_edp_psr_status(struct seq_file *m, void *data) 359 { 360 struct drm_i915_private *dev_priv = node_to_i915(m->private); 361 struct intel_dp *intel_dp = NULL; 362 struct intel_encoder *encoder; 363 364 if (!HAS_PSR(dev_priv)) 365 return -ENODEV; 366 367 /* Find the first EDP which supports PSR */ 368 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 369 intel_dp = enc_to_intel_dp(encoder); 370 break; 371 } 372 373 if (!intel_dp) 374 return -ENODEV; 375 376 return intel_psr_status(m, intel_dp); 377 } 378 379 static int 380 i915_edp_psr_debug_set(void *data, u64 val) 381 { 382 struct drm_i915_private *dev_priv = data; 383 struct intel_encoder *encoder; 384 intel_wakeref_t wakeref; 385 int ret = -ENODEV; 386 387 if (!HAS_PSR(dev_priv)) 388 return ret; 389 390 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 391 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 392 393 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val); 394 395 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 396 397 // TODO: split to each transcoder's PSR debug state 398 ret = intel_psr_debug_set(intel_dp, val); 399 400 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 401 } 402 403 return ret; 404 } 405 406 static int 407 i915_edp_psr_debug_get(void *data, u64 *val) 408 { 409 struct drm_i915_private *dev_priv = data; 410 struct intel_encoder *encoder; 411 412 if (!HAS_PSR(dev_priv)) 413 return -ENODEV; 414 415 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 416 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 417 418 // TODO: split to each transcoder's PSR debug state 419 *val = READ_ONCE(intel_dp->psr.debug); 420 return 0; 421 } 422 423 return -ENODEV; 424 } 425 426 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, 427 i915_edp_psr_debug_get, i915_edp_psr_debug_set, 428 "%llu\n"); 429 430 static int i915_power_domain_info(struct seq_file *m, void *unused) 431 { 432 struct drm_i915_private *i915 = node_to_i915(m->private); 433 434 intel_display_power_debug(i915, m); 435 436 return 0; 437 } 438 439 static void intel_seq_print_mode(struct seq_file *m, int tabs, 440 const struct drm_display_mode *mode) 441 { 442 int i; 443 444 for (i = 0; i < tabs; i++) 445 seq_putc(m, '\t'); 446 447 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); 448 } 449 450 static void intel_encoder_info(struct seq_file *m, 451 struct intel_crtc *crtc, 452 struct intel_encoder *encoder) 453 { 454 struct drm_i915_private *dev_priv = node_to_i915(m->private); 455 struct drm_connector_list_iter conn_iter; 456 struct drm_connector *connector; 457 458 seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n", 459 encoder->base.base.id, encoder->base.name); 460 461 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 462 drm_for_each_connector_iter(connector, &conn_iter) { 463 const struct drm_connector_state *conn_state = 464 connector->state; 465 466 if (conn_state->best_encoder != &encoder->base) 467 continue; 468 469 seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n", 470 connector->base.id, connector->name); 471 } 472 drm_connector_list_iter_end(&conn_iter); 473 } 474 475 static void intel_panel_info(struct seq_file *m, 476 struct intel_connector *connector) 477 { 478 const struct drm_display_mode *fixed_mode; 479 480 if (list_empty(&connector->panel.fixed_modes)) 481 return; 482 483 seq_puts(m, "\tfixed modes:\n"); 484 485 list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) 486 intel_seq_print_mode(m, 2, fixed_mode); 487 } 488 489 static void intel_hdcp_info(struct seq_file *m, 490 struct intel_connector *intel_connector) 491 { 492 bool hdcp_cap, hdcp2_cap; 493 494 if (!intel_connector->hdcp.shim) { 495 seq_puts(m, "No Connector Support"); 496 goto out; 497 } 498 499 hdcp_cap = intel_hdcp_capable(intel_connector); 500 hdcp2_cap = intel_hdcp2_capable(intel_connector); 501 502 if (hdcp_cap) 503 seq_puts(m, "HDCP1.4 "); 504 if (hdcp2_cap) 505 seq_puts(m, "HDCP2.2 "); 506 507 if (!hdcp_cap && !hdcp2_cap) 508 seq_puts(m, "None"); 509 510 out: 511 seq_puts(m, "\n"); 512 } 513 514 static void intel_dp_info(struct seq_file *m, 515 struct intel_connector *intel_connector) 516 { 517 struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); 518 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 519 const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr; 520 521 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 522 seq_printf(m, "\taudio support: %s\n", 523 str_yes_no(intel_dp->has_audio)); 524 525 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, 526 edid ? edid->data : NULL, &intel_dp->aux); 527 } 528 529 static void intel_dp_mst_info(struct seq_file *m, 530 struct intel_connector *intel_connector) 531 { 532 bool has_audio = intel_connector->port->has_audio; 533 534 seq_printf(m, "\taudio support: %s\n", str_yes_no(has_audio)); 535 } 536 537 static void intel_hdmi_info(struct seq_file *m, 538 struct intel_connector *intel_connector) 539 { 540 struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); 541 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder); 542 543 seq_printf(m, "\taudio support: %s\n", 544 str_yes_no(intel_hdmi->has_audio)); 545 } 546 547 static void intel_connector_info(struct seq_file *m, 548 struct drm_connector *connector) 549 { 550 struct intel_connector *intel_connector = to_intel_connector(connector); 551 const struct drm_connector_state *conn_state = connector->state; 552 struct intel_encoder *encoder = 553 to_intel_encoder(conn_state->best_encoder); 554 const struct drm_display_mode *mode; 555 556 seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n", 557 connector->base.id, connector->name, 558 drm_get_connector_status_name(connector->status)); 559 560 if (connector->status == connector_status_disconnected) 561 return; 562 563 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 564 connector->display_info.width_mm, 565 connector->display_info.height_mm); 566 seq_printf(m, "\tsubpixel order: %s\n", 567 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 568 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); 569 570 if (!encoder) 571 return; 572 573 switch (connector->connector_type) { 574 case DRM_MODE_CONNECTOR_DisplayPort: 575 case DRM_MODE_CONNECTOR_eDP: 576 if (encoder->type == INTEL_OUTPUT_DP_MST) 577 intel_dp_mst_info(m, intel_connector); 578 else 579 intel_dp_info(m, intel_connector); 580 break; 581 case DRM_MODE_CONNECTOR_HDMIA: 582 if (encoder->type == INTEL_OUTPUT_HDMI || 583 encoder->type == INTEL_OUTPUT_DDI) 584 intel_hdmi_info(m, intel_connector); 585 break; 586 default: 587 break; 588 } 589 590 seq_puts(m, "\tHDCP version: "); 591 intel_hdcp_info(m, intel_connector); 592 593 intel_panel_info(m, intel_connector); 594 595 seq_printf(m, "\tmodes:\n"); 596 list_for_each_entry(mode, &connector->modes, head) 597 intel_seq_print_mode(m, 2, mode); 598 } 599 600 static const char *plane_type(enum drm_plane_type type) 601 { 602 switch (type) { 603 case DRM_PLANE_TYPE_OVERLAY: 604 return "OVL"; 605 case DRM_PLANE_TYPE_PRIMARY: 606 return "PRI"; 607 case DRM_PLANE_TYPE_CURSOR: 608 return "CUR"; 609 /* 610 * Deliberately omitting default: to generate compiler warnings 611 * when a new drm_plane_type gets added. 612 */ 613 } 614 615 return "unknown"; 616 } 617 618 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation) 619 { 620 /* 621 * According to doc only one DRM_MODE_ROTATE_ is allowed but this 622 * will print them all to visualize if the values are misused 623 */ 624 snprintf(buf, bufsize, 625 "%s%s%s%s%s%s(0x%08x)", 626 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "", 627 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "", 628 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "", 629 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "", 630 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "", 631 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "", 632 rotation); 633 } 634 635 static const char *plane_visibility(const struct intel_plane_state *plane_state) 636 { 637 if (plane_state->uapi.visible) 638 return "visible"; 639 640 if (plane_state->planar_slave) 641 return "planar-slave"; 642 643 return "hidden"; 644 } 645 646 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane) 647 { 648 const struct intel_plane_state *plane_state = 649 to_intel_plane_state(plane->base.state); 650 const struct drm_framebuffer *fb = plane_state->uapi.fb; 651 struct drm_rect src, dst; 652 char rot_str[48]; 653 654 src = drm_plane_state_src(&plane_state->uapi); 655 dst = drm_plane_state_dest(&plane_state->uapi); 656 657 plane_rotation(rot_str, sizeof(rot_str), 658 plane_state->uapi.rotation); 659 660 seq_puts(m, "\t\tuapi: [FB:"); 661 if (fb) 662 seq_printf(m, "%d] %p4cc,0x%llx,%dx%d", fb->base.id, 663 &fb->format->format, fb->modifier, fb->width, 664 fb->height); 665 else 666 seq_puts(m, "0] n/a,0x0,0x0,"); 667 seq_printf(m, ", visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT 668 ", rotation=%s\n", plane_visibility(plane_state), 669 DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str); 670 671 if (plane_state->planar_linked_plane) 672 seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n", 673 plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name, 674 plane_state->planar_slave ? "slave" : "master"); 675 } 676 677 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane) 678 { 679 const struct intel_plane_state *plane_state = 680 to_intel_plane_state(plane->base.state); 681 const struct drm_framebuffer *fb = plane_state->hw.fb; 682 char rot_str[48]; 683 684 if (!fb) 685 return; 686 687 plane_rotation(rot_str, sizeof(rot_str), 688 plane_state->hw.rotation); 689 690 seq_printf(m, "\t\thw: [FB:%d] %p4cc,0x%llx,%dx%d, visible=%s, src=" 691 DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", 692 fb->base.id, &fb->format->format, 693 fb->modifier, fb->width, fb->height, 694 str_yes_no(plane_state->uapi.visible), 695 DRM_RECT_FP_ARG(&plane_state->uapi.src), 696 DRM_RECT_ARG(&plane_state->uapi.dst), 697 rot_str); 698 } 699 700 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc) 701 { 702 struct drm_i915_private *dev_priv = node_to_i915(m->private); 703 struct intel_plane *plane; 704 705 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 706 seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n", 707 plane->base.base.id, plane->base.name, 708 plane_type(plane->base.type)); 709 intel_plane_uapi_info(m, plane); 710 intel_plane_hw_info(m, plane); 711 } 712 } 713 714 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc) 715 { 716 const struct intel_crtc_state *crtc_state = 717 to_intel_crtc_state(crtc->base.state); 718 int num_scalers = crtc->num_scalers; 719 int i; 720 721 /* Not all platformas have a scaler */ 722 if (num_scalers) { 723 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", 724 num_scalers, 725 crtc_state->scaler_state.scaler_users, 726 crtc_state->scaler_state.scaler_id); 727 728 for (i = 0; i < num_scalers; i++) { 729 const struct intel_scaler *sc = 730 &crtc_state->scaler_state.scalers[i]; 731 732 seq_printf(m, ", scalers[%d]: use=%s, mode=%x", 733 i, str_yes_no(sc->in_use), sc->mode); 734 } 735 seq_puts(m, "\n"); 736 } else { 737 seq_puts(m, "\tNo scalers available on this platform\n"); 738 } 739 } 740 741 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) 742 static void crtc_updates_info(struct seq_file *m, 743 struct intel_crtc *crtc, 744 const char *hdr) 745 { 746 u64 count; 747 int row; 748 749 count = 0; 750 for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) 751 count += crtc->debug.vbl.times[row]; 752 seq_printf(m, "%sUpdates: %llu\n", hdr, count); 753 if (!count) 754 return; 755 756 for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) { 757 char columns[80] = " |"; 758 unsigned int x; 759 760 if (row & 1) { 761 const char *units; 762 763 if (row > 10) { 764 x = 1000000; 765 units = "ms"; 766 } else { 767 x = 1000; 768 units = "us"; 769 } 770 771 snprintf(columns, sizeof(columns), "%4ld%s |", 772 DIV_ROUND_CLOSEST(BIT(row + 9), x), units); 773 } 774 775 if (crtc->debug.vbl.times[row]) { 776 x = ilog2(crtc->debug.vbl.times[row]); 777 memset(columns + 8, '*', x); 778 columns[8 + x] = '\0'; 779 } 780 781 seq_printf(m, "%s%s\n", hdr, columns); 782 } 783 784 seq_printf(m, "%sMin update: %lluns\n", 785 hdr, crtc->debug.vbl.min); 786 seq_printf(m, "%sMax update: %lluns\n", 787 hdr, crtc->debug.vbl.max); 788 seq_printf(m, "%sAverage update: %lluns\n", 789 hdr, div64_u64(crtc->debug.vbl.sum, count)); 790 seq_printf(m, "%sOverruns > %uus: %u\n", 791 hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over); 792 } 793 794 static int crtc_updates_show(struct seq_file *m, void *data) 795 { 796 crtc_updates_info(m, m->private, ""); 797 return 0; 798 } 799 800 static int crtc_updates_open(struct inode *inode, struct file *file) 801 { 802 return single_open(file, crtc_updates_show, inode->i_private); 803 } 804 805 static ssize_t crtc_updates_write(struct file *file, 806 const char __user *ubuf, 807 size_t len, loff_t *offp) 808 { 809 struct seq_file *m = file->private_data; 810 struct intel_crtc *crtc = m->private; 811 812 /* May race with an update. Meh. */ 813 memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl)); 814 815 return len; 816 } 817 818 static const struct file_operations crtc_updates_fops = { 819 .owner = THIS_MODULE, 820 .open = crtc_updates_open, 821 .read = seq_read, 822 .llseek = seq_lseek, 823 .release = single_release, 824 .write = crtc_updates_write 825 }; 826 827 static void crtc_updates_add(struct drm_crtc *crtc) 828 { 829 debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry, 830 to_intel_crtc(crtc), &crtc_updates_fops); 831 } 832 833 #else 834 static void crtc_updates_info(struct seq_file *m, 835 struct intel_crtc *crtc, 836 const char *hdr) 837 { 838 } 839 840 static void crtc_updates_add(struct drm_crtc *crtc) 841 { 842 } 843 #endif 844 845 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) 846 { 847 struct drm_i915_private *dev_priv = node_to_i915(m->private); 848 const struct intel_crtc_state *crtc_state = 849 to_intel_crtc_state(crtc->base.state); 850 struct intel_encoder *encoder; 851 852 seq_printf(m, "[CRTC:%d:%s]:\n", 853 crtc->base.base.id, crtc->base.name); 854 855 seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n", 856 str_yes_no(crtc_state->uapi.enable), 857 str_yes_no(crtc_state->uapi.active), 858 DRM_MODE_ARG(&crtc_state->uapi.mode)); 859 860 seq_printf(m, "\thw: enable=%s, active=%s\n", 861 str_yes_no(crtc_state->hw.enable), str_yes_no(crtc_state->hw.active)); 862 seq_printf(m, "\tadjusted_mode=" DRM_MODE_FMT "\n", 863 DRM_MODE_ARG(&crtc_state->hw.adjusted_mode)); 864 seq_printf(m, "\tpipe__mode=" DRM_MODE_FMT "\n", 865 DRM_MODE_ARG(&crtc_state->hw.pipe_mode)); 866 867 seq_printf(m, "\tpipe src=" DRM_RECT_FMT ", dither=%s, bpp=%d\n", 868 DRM_RECT_ARG(&crtc_state->pipe_src), 869 str_yes_no(crtc_state->dither), crtc_state->pipe_bpp); 870 871 intel_scaler_info(m, crtc); 872 873 if (crtc_state->bigjoiner_pipes) 874 seq_printf(m, "\tLinked to 0x%x pipes as a %s\n", 875 crtc_state->bigjoiner_pipes, 876 intel_crtc_is_bigjoiner_slave(crtc_state) ? "slave" : "master"); 877 878 for_each_intel_encoder_mask(&dev_priv->drm, encoder, 879 crtc_state->uapi.encoder_mask) 880 intel_encoder_info(m, crtc, encoder); 881 882 intel_plane_info(m, crtc); 883 884 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n", 885 str_yes_no(!crtc->cpu_fifo_underrun_disabled), 886 str_yes_no(!crtc->pch_fifo_underrun_disabled)); 887 888 crtc_updates_info(m, crtc, "\t"); 889 } 890 891 static int i915_display_info(struct seq_file *m, void *unused) 892 { 893 struct drm_i915_private *dev_priv = node_to_i915(m->private); 894 struct drm_device *dev = &dev_priv->drm; 895 struct intel_crtc *crtc; 896 struct drm_connector *connector; 897 struct drm_connector_list_iter conn_iter; 898 intel_wakeref_t wakeref; 899 900 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 901 902 drm_modeset_lock_all(dev); 903 904 seq_printf(m, "CRTC info\n"); 905 seq_printf(m, "---------\n"); 906 for_each_intel_crtc(dev, crtc) 907 intel_crtc_info(m, crtc); 908 909 seq_printf(m, "\n"); 910 seq_printf(m, "Connector info\n"); 911 seq_printf(m, "--------------\n"); 912 drm_connector_list_iter_begin(dev, &conn_iter); 913 drm_for_each_connector_iter(connector, &conn_iter) 914 intel_connector_info(m, connector); 915 drm_connector_list_iter_end(&conn_iter); 916 917 drm_modeset_unlock_all(dev); 918 919 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 920 921 return 0; 922 } 923 924 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 925 { 926 struct drm_i915_private *dev_priv = node_to_i915(m->private); 927 struct drm_device *dev = &dev_priv->drm; 928 int i; 929 930 drm_modeset_lock_all(dev); 931 932 seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n", 933 dev_priv->dpll.ref_clks.nssc, 934 dev_priv->dpll.ref_clks.ssc); 935 936 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { 937 struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i]; 938 939 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name, 940 pll->info->id); 941 seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n", 942 pll->state.pipe_mask, pll->active_mask, 943 str_yes_no(pll->on)); 944 seq_printf(m, " tracked hardware state:\n"); 945 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); 946 seq_printf(m, " dpll_md: 0x%08x\n", 947 pll->state.hw_state.dpll_md); 948 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0); 949 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1); 950 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); 951 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0); 952 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1); 953 seq_printf(m, " div0: 0x%08x\n", pll->state.hw_state.div0); 954 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n", 955 pll->state.hw_state.mg_refclkin_ctl); 956 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n", 957 pll->state.hw_state.mg_clktop2_coreclkctl1); 958 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n", 959 pll->state.hw_state.mg_clktop2_hsclkctl); 960 seq_printf(m, " mg_pll_div0: 0x%08x\n", 961 pll->state.hw_state.mg_pll_div0); 962 seq_printf(m, " mg_pll_div1: 0x%08x\n", 963 pll->state.hw_state.mg_pll_div1); 964 seq_printf(m, " mg_pll_lf: 0x%08x\n", 965 pll->state.hw_state.mg_pll_lf); 966 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n", 967 pll->state.hw_state.mg_pll_frac_lock); 968 seq_printf(m, " mg_pll_ssc: 0x%08x\n", 969 pll->state.hw_state.mg_pll_ssc); 970 seq_printf(m, " mg_pll_bias: 0x%08x\n", 971 pll->state.hw_state.mg_pll_bias); 972 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n", 973 pll->state.hw_state.mg_pll_tdc_coldst_bias); 974 } 975 drm_modeset_unlock_all(dev); 976 977 return 0; 978 } 979 980 static int i915_ipc_status_show(struct seq_file *m, void *data) 981 { 982 struct drm_i915_private *dev_priv = m->private; 983 984 seq_printf(m, "Isochronous Priority Control: %s\n", 985 str_yes_no(dev_priv->ipc_enabled)); 986 return 0; 987 } 988 989 static int i915_ipc_status_open(struct inode *inode, struct file *file) 990 { 991 struct drm_i915_private *dev_priv = inode->i_private; 992 993 if (!HAS_IPC(dev_priv)) 994 return -ENODEV; 995 996 return single_open(file, i915_ipc_status_show, dev_priv); 997 } 998 999 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, 1000 size_t len, loff_t *offp) 1001 { 1002 struct seq_file *m = file->private_data; 1003 struct drm_i915_private *dev_priv = m->private; 1004 intel_wakeref_t wakeref; 1005 bool enable; 1006 int ret; 1007 1008 ret = kstrtobool_from_user(ubuf, len, &enable); 1009 if (ret < 0) 1010 return ret; 1011 1012 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { 1013 if (!dev_priv->ipc_enabled && enable) 1014 drm_info(&dev_priv->drm, 1015 "Enabling IPC: WM will be proper only after next commit\n"); 1016 dev_priv->ipc_enabled = enable; 1017 intel_enable_ipc(dev_priv); 1018 } 1019 1020 return len; 1021 } 1022 1023 static const struct file_operations i915_ipc_status_fops = { 1024 .owner = THIS_MODULE, 1025 .open = i915_ipc_status_open, 1026 .read = seq_read, 1027 .llseek = seq_lseek, 1028 .release = single_release, 1029 .write = i915_ipc_status_write 1030 }; 1031 1032 static int i915_ddb_info(struct seq_file *m, void *unused) 1033 { 1034 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1035 struct drm_device *dev = &dev_priv->drm; 1036 struct skl_ddb_entry *entry; 1037 struct intel_crtc *crtc; 1038 1039 if (DISPLAY_VER(dev_priv) < 9) 1040 return -ENODEV; 1041 1042 drm_modeset_lock_all(dev); 1043 1044 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 1045 1046 for_each_intel_crtc(&dev_priv->drm, crtc) { 1047 struct intel_crtc_state *crtc_state = 1048 to_intel_crtc_state(crtc->base.state); 1049 enum pipe pipe = crtc->pipe; 1050 enum plane_id plane_id; 1051 1052 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 1053 1054 for_each_plane_id_on_crtc(crtc, plane_id) { 1055 entry = &crtc_state->wm.skl.plane_ddb[plane_id]; 1056 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1, 1057 entry->start, entry->end, 1058 skl_ddb_entry_size(entry)); 1059 } 1060 1061 entry = &crtc_state->wm.skl.plane_ddb[PLANE_CURSOR]; 1062 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 1063 entry->end, skl_ddb_entry_size(entry)); 1064 } 1065 1066 drm_modeset_unlock_all(dev); 1067 1068 return 0; 1069 } 1070 1071 static int i915_drrs_status(struct seq_file *m, void *unused) 1072 { 1073 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1074 struct drm_connector_list_iter conn_iter; 1075 struct intel_connector *connector; 1076 struct intel_crtc *crtc; 1077 1078 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 1079 for_each_intel_connector_iter(connector, &conn_iter) { 1080 seq_printf(m, "[CONNECTOR:%d:%s] DRRS type: %s\n", 1081 connector->base.base.id, connector->base.name, 1082 intel_drrs_type_str(intel_panel_drrs_type(connector))); 1083 } 1084 drm_connector_list_iter_end(&conn_iter); 1085 1086 seq_puts(m, "\n"); 1087 1088 for_each_intel_crtc(&dev_priv->drm, crtc) { 1089 const struct intel_crtc_state *crtc_state = 1090 to_intel_crtc_state(crtc->base.state); 1091 1092 seq_printf(m, "[CRTC:%d:%s]:\n", 1093 crtc->base.base.id, crtc->base.name); 1094 1095 mutex_lock(&crtc->drrs.mutex); 1096 1097 /* DRRS Supported */ 1098 seq_printf(m, "\tDRRS Enabled: %s\n", 1099 str_yes_no(crtc_state->has_drrs)); 1100 1101 seq_printf(m, "\tDRRS Active: %s\n", 1102 str_yes_no(intel_drrs_is_active(crtc))); 1103 1104 seq_printf(m, "\tBusy_frontbuffer_bits: 0x%X\n", 1105 crtc->drrs.busy_frontbuffer_bits); 1106 1107 seq_printf(m, "\tDRRS refresh rate: %s\n", 1108 crtc->drrs.refresh_rate == DRRS_REFRESH_RATE_LOW ? 1109 "low" : "high"); 1110 1111 mutex_unlock(&crtc->drrs.mutex); 1112 } 1113 1114 return 0; 1115 } 1116 1117 static bool 1118 intel_lpsp_power_well_enabled(struct drm_i915_private *i915, 1119 enum i915_power_well_id power_well_id) 1120 { 1121 intel_wakeref_t wakeref; 1122 bool is_enabled; 1123 1124 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 1125 is_enabled = intel_display_power_well_is_enabled(i915, 1126 power_well_id); 1127 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 1128 1129 return is_enabled; 1130 } 1131 1132 static int i915_lpsp_status(struct seq_file *m, void *unused) 1133 { 1134 struct drm_i915_private *i915 = node_to_i915(m->private); 1135 bool lpsp_enabled = false; 1136 1137 if (DISPLAY_VER(i915) >= 13 || IS_DISPLAY_VER(i915, 9, 10)) { 1138 lpsp_enabled = !intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2); 1139 } else if (IS_DISPLAY_VER(i915, 11, 12)) { 1140 lpsp_enabled = !intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3); 1141 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 1142 lpsp_enabled = !intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL); 1143 } else { 1144 seq_puts(m, "LPSP: not supported\n"); 1145 return 0; 1146 } 1147 1148 seq_printf(m, "LPSP: %s\n", str_enabled_disabled(lpsp_enabled)); 1149 1150 return 0; 1151 } 1152 1153 static int i915_dp_mst_info(struct seq_file *m, void *unused) 1154 { 1155 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1156 struct drm_device *dev = &dev_priv->drm; 1157 struct intel_encoder *intel_encoder; 1158 struct intel_digital_port *dig_port; 1159 struct drm_connector *connector; 1160 struct drm_connector_list_iter conn_iter; 1161 1162 drm_connector_list_iter_begin(dev, &conn_iter); 1163 drm_for_each_connector_iter(connector, &conn_iter) { 1164 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 1165 continue; 1166 1167 intel_encoder = intel_attached_encoder(to_intel_connector(connector)); 1168 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) 1169 continue; 1170 1171 dig_port = enc_to_dig_port(intel_encoder); 1172 if (!intel_dp_mst_source_support(&dig_port->dp)) 1173 continue; 1174 1175 seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n", 1176 dig_port->base.base.base.id, 1177 dig_port->base.base.name); 1178 drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr); 1179 } 1180 drm_connector_list_iter_end(&conn_iter); 1181 1182 return 0; 1183 } 1184 1185 static ssize_t i915_displayport_test_active_write(struct file *file, 1186 const char __user *ubuf, 1187 size_t len, loff_t *offp) 1188 { 1189 char *input_buffer; 1190 int status = 0; 1191 struct drm_device *dev; 1192 struct drm_connector *connector; 1193 struct drm_connector_list_iter conn_iter; 1194 struct intel_dp *intel_dp; 1195 int val = 0; 1196 1197 dev = ((struct seq_file *)file->private_data)->private; 1198 1199 if (len == 0) 1200 return 0; 1201 1202 input_buffer = memdup_user_nul(ubuf, len); 1203 if (IS_ERR(input_buffer)) 1204 return PTR_ERR(input_buffer); 1205 1206 drm_dbg(&to_i915(dev)->drm, 1207 "Copied %d bytes from user\n", (unsigned int)len); 1208 1209 drm_connector_list_iter_begin(dev, &conn_iter); 1210 drm_for_each_connector_iter(connector, &conn_iter) { 1211 struct intel_encoder *encoder; 1212 1213 if (connector->connector_type != 1214 DRM_MODE_CONNECTOR_DisplayPort) 1215 continue; 1216 1217 encoder = to_intel_encoder(connector->encoder); 1218 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1219 continue; 1220 1221 if (encoder && connector->status == connector_status_connected) { 1222 intel_dp = enc_to_intel_dp(encoder); 1223 status = kstrtoint(input_buffer, 10, &val); 1224 if (status < 0) 1225 break; 1226 drm_dbg(&to_i915(dev)->drm, 1227 "Got %d for test active\n", val); 1228 /* To prevent erroneous activation of the compliance 1229 * testing code, only accept an actual value of 1 here 1230 */ 1231 if (val == 1) 1232 intel_dp->compliance.test_active = true; 1233 else 1234 intel_dp->compliance.test_active = false; 1235 } 1236 } 1237 drm_connector_list_iter_end(&conn_iter); 1238 kfree(input_buffer); 1239 if (status < 0) 1240 return status; 1241 1242 *offp += len; 1243 return len; 1244 } 1245 1246 static int i915_displayport_test_active_show(struct seq_file *m, void *data) 1247 { 1248 struct drm_i915_private *dev_priv = m->private; 1249 struct drm_device *dev = &dev_priv->drm; 1250 struct drm_connector *connector; 1251 struct drm_connector_list_iter conn_iter; 1252 struct intel_dp *intel_dp; 1253 1254 drm_connector_list_iter_begin(dev, &conn_iter); 1255 drm_for_each_connector_iter(connector, &conn_iter) { 1256 struct intel_encoder *encoder; 1257 1258 if (connector->connector_type != 1259 DRM_MODE_CONNECTOR_DisplayPort) 1260 continue; 1261 1262 encoder = to_intel_encoder(connector->encoder); 1263 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1264 continue; 1265 1266 if (encoder && connector->status == connector_status_connected) { 1267 intel_dp = enc_to_intel_dp(encoder); 1268 if (intel_dp->compliance.test_active) 1269 seq_puts(m, "1"); 1270 else 1271 seq_puts(m, "0"); 1272 } else 1273 seq_puts(m, "0"); 1274 } 1275 drm_connector_list_iter_end(&conn_iter); 1276 1277 return 0; 1278 } 1279 1280 static int i915_displayport_test_active_open(struct inode *inode, 1281 struct file *file) 1282 { 1283 return single_open(file, i915_displayport_test_active_show, 1284 inode->i_private); 1285 } 1286 1287 static const struct file_operations i915_displayport_test_active_fops = { 1288 .owner = THIS_MODULE, 1289 .open = i915_displayport_test_active_open, 1290 .read = seq_read, 1291 .llseek = seq_lseek, 1292 .release = single_release, 1293 .write = i915_displayport_test_active_write 1294 }; 1295 1296 static int i915_displayport_test_data_show(struct seq_file *m, void *data) 1297 { 1298 struct drm_i915_private *dev_priv = m->private; 1299 struct drm_device *dev = &dev_priv->drm; 1300 struct drm_connector *connector; 1301 struct drm_connector_list_iter conn_iter; 1302 struct intel_dp *intel_dp; 1303 1304 drm_connector_list_iter_begin(dev, &conn_iter); 1305 drm_for_each_connector_iter(connector, &conn_iter) { 1306 struct intel_encoder *encoder; 1307 1308 if (connector->connector_type != 1309 DRM_MODE_CONNECTOR_DisplayPort) 1310 continue; 1311 1312 encoder = to_intel_encoder(connector->encoder); 1313 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1314 continue; 1315 1316 if (encoder && connector->status == connector_status_connected) { 1317 intel_dp = enc_to_intel_dp(encoder); 1318 if (intel_dp->compliance.test_type == 1319 DP_TEST_LINK_EDID_READ) 1320 seq_printf(m, "%lx", 1321 intel_dp->compliance.test_data.edid); 1322 else if (intel_dp->compliance.test_type == 1323 DP_TEST_LINK_VIDEO_PATTERN) { 1324 seq_printf(m, "hdisplay: %d\n", 1325 intel_dp->compliance.test_data.hdisplay); 1326 seq_printf(m, "vdisplay: %d\n", 1327 intel_dp->compliance.test_data.vdisplay); 1328 seq_printf(m, "bpc: %u\n", 1329 intel_dp->compliance.test_data.bpc); 1330 } else if (intel_dp->compliance.test_type == 1331 DP_TEST_LINK_PHY_TEST_PATTERN) { 1332 seq_printf(m, "pattern: %d\n", 1333 intel_dp->compliance.test_data.phytest.phy_pattern); 1334 seq_printf(m, "Number of lanes: %d\n", 1335 intel_dp->compliance.test_data.phytest.num_lanes); 1336 seq_printf(m, "Link Rate: %d\n", 1337 intel_dp->compliance.test_data.phytest.link_rate); 1338 seq_printf(m, "level: %02x\n", 1339 intel_dp->train_set[0]); 1340 } 1341 } else 1342 seq_puts(m, "0"); 1343 } 1344 drm_connector_list_iter_end(&conn_iter); 1345 1346 return 0; 1347 } 1348 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data); 1349 1350 static int i915_displayport_test_type_show(struct seq_file *m, void *data) 1351 { 1352 struct drm_i915_private *dev_priv = m->private; 1353 struct drm_device *dev = &dev_priv->drm; 1354 struct drm_connector *connector; 1355 struct drm_connector_list_iter conn_iter; 1356 struct intel_dp *intel_dp; 1357 1358 drm_connector_list_iter_begin(dev, &conn_iter); 1359 drm_for_each_connector_iter(connector, &conn_iter) { 1360 struct intel_encoder *encoder; 1361 1362 if (connector->connector_type != 1363 DRM_MODE_CONNECTOR_DisplayPort) 1364 continue; 1365 1366 encoder = to_intel_encoder(connector->encoder); 1367 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1368 continue; 1369 1370 if (encoder && connector->status == connector_status_connected) { 1371 intel_dp = enc_to_intel_dp(encoder); 1372 seq_printf(m, "%02lx\n", intel_dp->compliance.test_type); 1373 } else 1374 seq_puts(m, "0"); 1375 } 1376 drm_connector_list_iter_end(&conn_iter); 1377 1378 return 0; 1379 } 1380 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); 1381 1382 static void wm_latency_show(struct seq_file *m, const u16 wm[8]) 1383 { 1384 struct drm_i915_private *dev_priv = m->private; 1385 struct drm_device *dev = &dev_priv->drm; 1386 int level; 1387 int num_levels; 1388 1389 if (IS_CHERRYVIEW(dev_priv)) 1390 num_levels = 3; 1391 else if (IS_VALLEYVIEW(dev_priv)) 1392 num_levels = 1; 1393 else if (IS_G4X(dev_priv)) 1394 num_levels = 3; 1395 else 1396 num_levels = ilk_wm_max_level(dev_priv) + 1; 1397 1398 drm_modeset_lock_all(dev); 1399 1400 for (level = 0; level < num_levels; level++) { 1401 unsigned int latency = wm[level]; 1402 1403 /* 1404 * - WM1+ latency values in 0.5us units 1405 * - latencies are in us on gen9/vlv/chv 1406 */ 1407 if (DISPLAY_VER(dev_priv) >= 9 || 1408 IS_VALLEYVIEW(dev_priv) || 1409 IS_CHERRYVIEW(dev_priv) || 1410 IS_G4X(dev_priv)) 1411 latency *= 10; 1412 else if (level > 0) 1413 latency *= 5; 1414 1415 seq_printf(m, "WM%d %u (%u.%u usec)\n", 1416 level, wm[level], latency / 10, latency % 10); 1417 } 1418 1419 drm_modeset_unlock_all(dev); 1420 } 1421 1422 static int pri_wm_latency_show(struct seq_file *m, void *data) 1423 { 1424 struct drm_i915_private *dev_priv = m->private; 1425 const u16 *latencies; 1426 1427 if (DISPLAY_VER(dev_priv) >= 9) 1428 latencies = dev_priv->wm.skl_latency; 1429 else 1430 latencies = dev_priv->wm.pri_latency; 1431 1432 wm_latency_show(m, latencies); 1433 1434 return 0; 1435 } 1436 1437 static int spr_wm_latency_show(struct seq_file *m, void *data) 1438 { 1439 struct drm_i915_private *dev_priv = m->private; 1440 const u16 *latencies; 1441 1442 if (DISPLAY_VER(dev_priv) >= 9) 1443 latencies = dev_priv->wm.skl_latency; 1444 else 1445 latencies = dev_priv->wm.spr_latency; 1446 1447 wm_latency_show(m, latencies); 1448 1449 return 0; 1450 } 1451 1452 static int cur_wm_latency_show(struct seq_file *m, void *data) 1453 { 1454 struct drm_i915_private *dev_priv = m->private; 1455 const u16 *latencies; 1456 1457 if (DISPLAY_VER(dev_priv) >= 9) 1458 latencies = dev_priv->wm.skl_latency; 1459 else 1460 latencies = dev_priv->wm.cur_latency; 1461 1462 wm_latency_show(m, latencies); 1463 1464 return 0; 1465 } 1466 1467 static int pri_wm_latency_open(struct inode *inode, struct file *file) 1468 { 1469 struct drm_i915_private *dev_priv = inode->i_private; 1470 1471 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) 1472 return -ENODEV; 1473 1474 return single_open(file, pri_wm_latency_show, dev_priv); 1475 } 1476 1477 static int spr_wm_latency_open(struct inode *inode, struct file *file) 1478 { 1479 struct drm_i915_private *dev_priv = inode->i_private; 1480 1481 if (HAS_GMCH(dev_priv)) 1482 return -ENODEV; 1483 1484 return single_open(file, spr_wm_latency_show, dev_priv); 1485 } 1486 1487 static int cur_wm_latency_open(struct inode *inode, struct file *file) 1488 { 1489 struct drm_i915_private *dev_priv = inode->i_private; 1490 1491 if (HAS_GMCH(dev_priv)) 1492 return -ENODEV; 1493 1494 return single_open(file, cur_wm_latency_show, dev_priv); 1495 } 1496 1497 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 1498 size_t len, loff_t *offp, u16 wm[8]) 1499 { 1500 struct seq_file *m = file->private_data; 1501 struct drm_i915_private *dev_priv = m->private; 1502 struct drm_device *dev = &dev_priv->drm; 1503 u16 new[8] = { 0 }; 1504 int num_levels; 1505 int level; 1506 int ret; 1507 char tmp[32]; 1508 1509 if (IS_CHERRYVIEW(dev_priv)) 1510 num_levels = 3; 1511 else if (IS_VALLEYVIEW(dev_priv)) 1512 num_levels = 1; 1513 else if (IS_G4X(dev_priv)) 1514 num_levels = 3; 1515 else 1516 num_levels = ilk_wm_max_level(dev_priv) + 1; 1517 1518 if (len >= sizeof(tmp)) 1519 return -EINVAL; 1520 1521 if (copy_from_user(tmp, ubuf, len)) 1522 return -EFAULT; 1523 1524 tmp[len] = '\0'; 1525 1526 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 1527 &new[0], &new[1], &new[2], &new[3], 1528 &new[4], &new[5], &new[6], &new[7]); 1529 if (ret != num_levels) 1530 return -EINVAL; 1531 1532 drm_modeset_lock_all(dev); 1533 1534 for (level = 0; level < num_levels; level++) 1535 wm[level] = new[level]; 1536 1537 drm_modeset_unlock_all(dev); 1538 1539 return len; 1540 } 1541 1542 1543 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 1544 size_t len, loff_t *offp) 1545 { 1546 struct seq_file *m = file->private_data; 1547 struct drm_i915_private *dev_priv = m->private; 1548 u16 *latencies; 1549 1550 if (DISPLAY_VER(dev_priv) >= 9) 1551 latencies = dev_priv->wm.skl_latency; 1552 else 1553 latencies = dev_priv->wm.pri_latency; 1554 1555 return wm_latency_write(file, ubuf, len, offp, latencies); 1556 } 1557 1558 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 1559 size_t len, loff_t *offp) 1560 { 1561 struct seq_file *m = file->private_data; 1562 struct drm_i915_private *dev_priv = m->private; 1563 u16 *latencies; 1564 1565 if (DISPLAY_VER(dev_priv) >= 9) 1566 latencies = dev_priv->wm.skl_latency; 1567 else 1568 latencies = dev_priv->wm.spr_latency; 1569 1570 return wm_latency_write(file, ubuf, len, offp, latencies); 1571 } 1572 1573 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 1574 size_t len, loff_t *offp) 1575 { 1576 struct seq_file *m = file->private_data; 1577 struct drm_i915_private *dev_priv = m->private; 1578 u16 *latencies; 1579 1580 if (DISPLAY_VER(dev_priv) >= 9) 1581 latencies = dev_priv->wm.skl_latency; 1582 else 1583 latencies = dev_priv->wm.cur_latency; 1584 1585 return wm_latency_write(file, ubuf, len, offp, latencies); 1586 } 1587 1588 static const struct file_operations i915_pri_wm_latency_fops = { 1589 .owner = THIS_MODULE, 1590 .open = pri_wm_latency_open, 1591 .read = seq_read, 1592 .llseek = seq_lseek, 1593 .release = single_release, 1594 .write = pri_wm_latency_write 1595 }; 1596 1597 static const struct file_operations i915_spr_wm_latency_fops = { 1598 .owner = THIS_MODULE, 1599 .open = spr_wm_latency_open, 1600 .read = seq_read, 1601 .llseek = seq_lseek, 1602 .release = single_release, 1603 .write = spr_wm_latency_write 1604 }; 1605 1606 static const struct file_operations i915_cur_wm_latency_fops = { 1607 .owner = THIS_MODULE, 1608 .open = cur_wm_latency_open, 1609 .read = seq_read, 1610 .llseek = seq_lseek, 1611 .release = single_release, 1612 .write = cur_wm_latency_write 1613 }; 1614 1615 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 1616 { 1617 struct drm_i915_private *dev_priv = m->private; 1618 struct i915_hotplug *hotplug = &dev_priv->hotplug; 1619 1620 /* Synchronize with everything first in case there's been an HPD 1621 * storm, but we haven't finished handling it in the kernel yet 1622 */ 1623 intel_synchronize_irq(dev_priv); 1624 flush_work(&dev_priv->hotplug.dig_port_work); 1625 flush_delayed_work(&dev_priv->hotplug.hotplug_work); 1626 1627 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 1628 seq_printf(m, "Detected: %s\n", 1629 str_yes_no(delayed_work_pending(&hotplug->reenable_work))); 1630 1631 return 0; 1632 } 1633 1634 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 1635 const char __user *ubuf, size_t len, 1636 loff_t *offp) 1637 { 1638 struct seq_file *m = file->private_data; 1639 struct drm_i915_private *dev_priv = m->private; 1640 struct i915_hotplug *hotplug = &dev_priv->hotplug; 1641 unsigned int new_threshold; 1642 int i; 1643 char *newline; 1644 char tmp[16]; 1645 1646 if (len >= sizeof(tmp)) 1647 return -EINVAL; 1648 1649 if (copy_from_user(tmp, ubuf, len)) 1650 return -EFAULT; 1651 1652 tmp[len] = '\0'; 1653 1654 /* Strip newline, if any */ 1655 newline = strchr(tmp, '\n'); 1656 if (newline) 1657 *newline = '\0'; 1658 1659 if (strcmp(tmp, "reset") == 0) 1660 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 1661 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 1662 return -EINVAL; 1663 1664 if (new_threshold > 0) 1665 drm_dbg_kms(&dev_priv->drm, 1666 "Setting HPD storm detection threshold to %d\n", 1667 new_threshold); 1668 else 1669 drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n"); 1670 1671 spin_lock_irq(&dev_priv->irq_lock); 1672 hotplug->hpd_storm_threshold = new_threshold; 1673 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1674 for_each_hpd_pin(i) 1675 hotplug->stats[i].count = 0; 1676 spin_unlock_irq(&dev_priv->irq_lock); 1677 1678 /* Re-enable hpd immediately if we were in an irq storm */ 1679 flush_delayed_work(&dev_priv->hotplug.reenable_work); 1680 1681 return len; 1682 } 1683 1684 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 1685 { 1686 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 1687 } 1688 1689 static const struct file_operations i915_hpd_storm_ctl_fops = { 1690 .owner = THIS_MODULE, 1691 .open = i915_hpd_storm_ctl_open, 1692 .read = seq_read, 1693 .llseek = seq_lseek, 1694 .release = single_release, 1695 .write = i915_hpd_storm_ctl_write 1696 }; 1697 1698 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) 1699 { 1700 struct drm_i915_private *dev_priv = m->private; 1701 1702 seq_printf(m, "Enabled: %s\n", 1703 str_yes_no(dev_priv->hotplug.hpd_short_storm_enabled)); 1704 1705 return 0; 1706 } 1707 1708 static int 1709 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) 1710 { 1711 return single_open(file, i915_hpd_short_storm_ctl_show, 1712 inode->i_private); 1713 } 1714 1715 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, 1716 const char __user *ubuf, 1717 size_t len, loff_t *offp) 1718 { 1719 struct seq_file *m = file->private_data; 1720 struct drm_i915_private *dev_priv = m->private; 1721 struct i915_hotplug *hotplug = &dev_priv->hotplug; 1722 char *newline; 1723 char tmp[16]; 1724 int i; 1725 bool new_state; 1726 1727 if (len >= sizeof(tmp)) 1728 return -EINVAL; 1729 1730 if (copy_from_user(tmp, ubuf, len)) 1731 return -EFAULT; 1732 1733 tmp[len] = '\0'; 1734 1735 /* Strip newline, if any */ 1736 newline = strchr(tmp, '\n'); 1737 if (newline) 1738 *newline = '\0'; 1739 1740 /* Reset to the "default" state for this system */ 1741 if (strcmp(tmp, "reset") == 0) 1742 new_state = !HAS_DP_MST(dev_priv); 1743 else if (kstrtobool(tmp, &new_state) != 0) 1744 return -EINVAL; 1745 1746 drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n", 1747 new_state ? "En" : "Dis"); 1748 1749 spin_lock_irq(&dev_priv->irq_lock); 1750 hotplug->hpd_short_storm_enabled = new_state; 1751 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1752 for_each_hpd_pin(i) 1753 hotplug->stats[i].count = 0; 1754 spin_unlock_irq(&dev_priv->irq_lock); 1755 1756 /* Re-enable hpd immediately if we were in an irq storm */ 1757 flush_delayed_work(&dev_priv->hotplug.reenable_work); 1758 1759 return len; 1760 } 1761 1762 static const struct file_operations i915_hpd_short_storm_ctl_fops = { 1763 .owner = THIS_MODULE, 1764 .open = i915_hpd_short_storm_ctl_open, 1765 .read = seq_read, 1766 .llseek = seq_lseek, 1767 .release = single_release, 1768 .write = i915_hpd_short_storm_ctl_write, 1769 }; 1770 1771 static int i915_drrs_ctl_set(void *data, u64 val) 1772 { 1773 struct drm_i915_private *dev_priv = data; 1774 struct drm_device *dev = &dev_priv->drm; 1775 struct intel_crtc *crtc; 1776 1777 for_each_intel_crtc(dev, crtc) { 1778 struct intel_crtc_state *crtc_state; 1779 struct drm_crtc_commit *commit; 1780 int ret; 1781 1782 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); 1783 if (ret) 1784 return ret; 1785 1786 crtc_state = to_intel_crtc_state(crtc->base.state); 1787 1788 if (!crtc_state->hw.active || 1789 !crtc_state->has_drrs) 1790 goto out; 1791 1792 commit = crtc_state->uapi.commit; 1793 if (commit) { 1794 ret = wait_for_completion_interruptible(&commit->hw_done); 1795 if (ret) 1796 goto out; 1797 } 1798 1799 drm_dbg(&dev_priv->drm, 1800 "Manually %sactivating DRRS\n", val ? "" : "de"); 1801 1802 if (val) 1803 intel_drrs_activate(crtc_state); 1804 else 1805 intel_drrs_deactivate(crtc_state); 1806 1807 out: 1808 drm_modeset_unlock(&crtc->base.mutex); 1809 if (ret) 1810 return ret; 1811 } 1812 1813 return 0; 1814 } 1815 1816 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n"); 1817 1818 static ssize_t 1819 i915_fifo_underrun_reset_write(struct file *filp, 1820 const char __user *ubuf, 1821 size_t cnt, loff_t *ppos) 1822 { 1823 struct drm_i915_private *dev_priv = filp->private_data; 1824 struct intel_crtc *crtc; 1825 struct drm_device *dev = &dev_priv->drm; 1826 int ret; 1827 bool reset; 1828 1829 ret = kstrtobool_from_user(ubuf, cnt, &reset); 1830 if (ret) 1831 return ret; 1832 1833 if (!reset) 1834 return cnt; 1835 1836 for_each_intel_crtc(dev, crtc) { 1837 struct drm_crtc_commit *commit; 1838 struct intel_crtc_state *crtc_state; 1839 1840 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); 1841 if (ret) 1842 return ret; 1843 1844 crtc_state = to_intel_crtc_state(crtc->base.state); 1845 commit = crtc_state->uapi.commit; 1846 if (commit) { 1847 ret = wait_for_completion_interruptible(&commit->hw_done); 1848 if (!ret) 1849 ret = wait_for_completion_interruptible(&commit->flip_done); 1850 } 1851 1852 if (!ret && crtc_state->hw.active) { 1853 drm_dbg_kms(&dev_priv->drm, 1854 "Re-arming FIFO underruns on pipe %c\n", 1855 pipe_name(crtc->pipe)); 1856 1857 intel_crtc_arm_fifo_underrun(crtc, crtc_state); 1858 } 1859 1860 drm_modeset_unlock(&crtc->base.mutex); 1861 1862 if (ret) 1863 return ret; 1864 } 1865 1866 intel_fbc_reset_underrun(dev_priv); 1867 1868 return cnt; 1869 } 1870 1871 static const struct file_operations i915_fifo_underrun_reset_ops = { 1872 .owner = THIS_MODULE, 1873 .open = simple_open, 1874 .write = i915_fifo_underrun_reset_write, 1875 .llseek = default_llseek, 1876 }; 1877 1878 static const struct drm_info_list intel_display_debugfs_list[] = { 1879 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 1880 {"i915_ips_status", i915_ips_status, 0}, 1881 {"i915_sr_status", i915_sr_status, 0}, 1882 {"i915_opregion", i915_opregion, 0}, 1883 {"i915_vbt", i915_vbt, 0}, 1884 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 1885 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 1886 {"i915_power_domain_info", i915_power_domain_info, 0}, 1887 {"i915_display_info", i915_display_info, 0}, 1888 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 1889 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 1890 {"i915_ddb_info", i915_ddb_info, 0}, 1891 {"i915_drrs_status", i915_drrs_status, 0}, 1892 {"i915_lpsp_status", i915_lpsp_status, 0}, 1893 }; 1894 1895 static const struct { 1896 const char *name; 1897 const struct file_operations *fops; 1898 } intel_display_debugfs_files[] = { 1899 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, 1900 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 1901 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 1902 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 1903 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 1904 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 1905 {"i915_dp_test_active", &i915_displayport_test_active_fops}, 1906 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, 1907 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops}, 1908 {"i915_ipc_status", &i915_ipc_status_fops}, 1909 {"i915_drrs_ctl", &i915_drrs_ctl_fops}, 1910 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}, 1911 }; 1912 1913 void intel_display_debugfs_register(struct drm_i915_private *i915) 1914 { 1915 struct drm_minor *minor = i915->drm.primary; 1916 int i; 1917 1918 for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) { 1919 debugfs_create_file(intel_display_debugfs_files[i].name, 1920 S_IRUGO | S_IWUSR, 1921 minor->debugfs_root, 1922 to_i915(minor->dev), 1923 intel_display_debugfs_files[i].fops); 1924 } 1925 1926 drm_debugfs_create_files(intel_display_debugfs_list, 1927 ARRAY_SIZE(intel_display_debugfs_list), 1928 minor->debugfs_root, minor); 1929 1930 intel_dmc_debugfs_register(i915); 1931 intel_fbc_debugfs_register(i915); 1932 } 1933 1934 static int i915_panel_show(struct seq_file *m, void *data) 1935 { 1936 struct drm_connector *connector = m->private; 1937 struct intel_dp *intel_dp = 1938 intel_attached_dp(to_intel_connector(connector)); 1939 1940 if (connector->status != connector_status_connected) 1941 return -ENODEV; 1942 1943 seq_printf(m, "Panel power up delay: %d\n", 1944 intel_dp->pps.panel_power_up_delay); 1945 seq_printf(m, "Panel power down delay: %d\n", 1946 intel_dp->pps.panel_power_down_delay); 1947 seq_printf(m, "Backlight on delay: %d\n", 1948 intel_dp->pps.backlight_on_delay); 1949 seq_printf(m, "Backlight off delay: %d\n", 1950 intel_dp->pps.backlight_off_delay); 1951 1952 return 0; 1953 } 1954 DEFINE_SHOW_ATTRIBUTE(i915_panel); 1955 1956 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) 1957 { 1958 struct drm_connector *connector = m->private; 1959 struct drm_i915_private *i915 = to_i915(connector->dev); 1960 struct intel_connector *intel_connector = to_intel_connector(connector); 1961 int ret; 1962 1963 ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); 1964 if (ret) 1965 return ret; 1966 1967 if (!connector->encoder || connector->status != connector_status_connected) { 1968 ret = -ENODEV; 1969 goto out; 1970 } 1971 1972 seq_printf(m, "%s:%d HDCP version: ", connector->name, 1973 connector->base.id); 1974 intel_hdcp_info(m, intel_connector); 1975 1976 out: 1977 drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); 1978 1979 return ret; 1980 } 1981 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); 1982 1983 static int i915_psr_status_show(struct seq_file *m, void *data) 1984 { 1985 struct drm_connector *connector = m->private; 1986 struct intel_dp *intel_dp = 1987 intel_attached_dp(to_intel_connector(connector)); 1988 1989 return intel_psr_status(m, intel_dp); 1990 } 1991 DEFINE_SHOW_ATTRIBUTE(i915_psr_status); 1992 1993 static int i915_lpsp_capability_show(struct seq_file *m, void *data) 1994 { 1995 struct drm_connector *connector = m->private; 1996 struct drm_i915_private *i915 = to_i915(connector->dev); 1997 struct intel_encoder *encoder; 1998 bool lpsp_capable = false; 1999 2000 encoder = intel_attached_encoder(to_intel_connector(connector)); 2001 if (!encoder) 2002 return -ENODEV; 2003 2004 if (connector->status != connector_status_connected) 2005 return -ENODEV; 2006 2007 if (DISPLAY_VER(i915) >= 13) 2008 lpsp_capable = encoder->port <= PORT_B; 2009 else if (DISPLAY_VER(i915) >= 12) 2010 /* 2011 * Actually TGL can drive LPSP on port till DDI_C 2012 * but there is no physical connected DDI_C on TGL sku's, 2013 * even driver is not initilizing DDI_C port for gen12. 2014 */ 2015 lpsp_capable = encoder->port <= PORT_B; 2016 else if (DISPLAY_VER(i915) == 11) 2017 lpsp_capable = (connector->connector_type == DRM_MODE_CONNECTOR_DSI || 2018 connector->connector_type == DRM_MODE_CONNECTOR_eDP); 2019 else if (IS_DISPLAY_VER(i915, 9, 10)) 2020 lpsp_capable = (encoder->port == PORT_A && 2021 (connector->connector_type == DRM_MODE_CONNECTOR_DSI || 2022 connector->connector_type == DRM_MODE_CONNECTOR_eDP || 2023 connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)); 2024 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 2025 lpsp_capable = connector->connector_type == DRM_MODE_CONNECTOR_eDP; 2026 2027 seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable"); 2028 2029 return 0; 2030 } 2031 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability); 2032 2033 static int i915_dsc_fec_support_show(struct seq_file *m, void *data) 2034 { 2035 struct drm_connector *connector = m->private; 2036 struct drm_device *dev = connector->dev; 2037 struct drm_crtc *crtc; 2038 struct intel_dp *intel_dp; 2039 struct drm_modeset_acquire_ctx ctx; 2040 struct intel_crtc_state *crtc_state = NULL; 2041 int ret = 0; 2042 bool try_again = false; 2043 2044 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 2045 2046 do { 2047 try_again = false; 2048 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, 2049 &ctx); 2050 if (ret) { 2051 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) { 2052 try_again = true; 2053 continue; 2054 } 2055 break; 2056 } 2057 crtc = connector->state->crtc; 2058 if (connector->status != connector_status_connected || !crtc) { 2059 ret = -ENODEV; 2060 break; 2061 } 2062 ret = drm_modeset_lock(&crtc->mutex, &ctx); 2063 if (ret == -EDEADLK) { 2064 ret = drm_modeset_backoff(&ctx); 2065 if (!ret) { 2066 try_again = true; 2067 continue; 2068 } 2069 break; 2070 } else if (ret) { 2071 break; 2072 } 2073 intel_dp = intel_attached_dp(to_intel_connector(connector)); 2074 crtc_state = to_intel_crtc_state(crtc->state); 2075 seq_printf(m, "DSC_Enabled: %s\n", 2076 str_yes_no(crtc_state->dsc.compression_enable)); 2077 seq_printf(m, "DSC_Sink_Support: %s\n", 2078 str_yes_no(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd))); 2079 seq_printf(m, "Force_DSC_Enable: %s\n", 2080 str_yes_no(intel_dp->force_dsc_en)); 2081 if (!intel_dp_is_edp(intel_dp)) 2082 seq_printf(m, "FEC_Sink_Support: %s\n", 2083 str_yes_no(drm_dp_sink_supports_fec(intel_dp->fec_capable))); 2084 } while (try_again); 2085 2086 drm_modeset_drop_locks(&ctx); 2087 drm_modeset_acquire_fini(&ctx); 2088 2089 return ret; 2090 } 2091 2092 static ssize_t i915_dsc_fec_support_write(struct file *file, 2093 const char __user *ubuf, 2094 size_t len, loff_t *offp) 2095 { 2096 bool dsc_enable = false; 2097 int ret; 2098 struct drm_connector *connector = 2099 ((struct seq_file *)file->private_data)->private; 2100 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 2101 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2102 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2103 2104 if (len == 0) 2105 return 0; 2106 2107 drm_dbg(&i915->drm, 2108 "Copied %zu bytes from user to force DSC\n", len); 2109 2110 ret = kstrtobool_from_user(ubuf, len, &dsc_enable); 2111 if (ret < 0) 2112 return ret; 2113 2114 drm_dbg(&i915->drm, "Got %s for DSC Enable\n", 2115 (dsc_enable) ? "true" : "false"); 2116 intel_dp->force_dsc_en = dsc_enable; 2117 2118 *offp += len; 2119 return len; 2120 } 2121 2122 static int i915_dsc_fec_support_open(struct inode *inode, 2123 struct file *file) 2124 { 2125 return single_open(file, i915_dsc_fec_support_show, 2126 inode->i_private); 2127 } 2128 2129 static const struct file_operations i915_dsc_fec_support_fops = { 2130 .owner = THIS_MODULE, 2131 .open = i915_dsc_fec_support_open, 2132 .read = seq_read, 2133 .llseek = seq_lseek, 2134 .release = single_release, 2135 .write = i915_dsc_fec_support_write 2136 }; 2137 2138 static int i915_dsc_bpp_show(struct seq_file *m, void *data) 2139 { 2140 struct drm_connector *connector = m->private; 2141 struct drm_device *dev = connector->dev; 2142 struct drm_crtc *crtc; 2143 struct intel_crtc_state *crtc_state; 2144 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 2145 int ret; 2146 2147 if (!encoder) 2148 return -ENODEV; 2149 2150 ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex); 2151 if (ret) 2152 return ret; 2153 2154 crtc = connector->state->crtc; 2155 if (connector->status != connector_status_connected || !crtc) { 2156 ret = -ENODEV; 2157 goto out; 2158 } 2159 2160 crtc_state = to_intel_crtc_state(crtc->state); 2161 seq_printf(m, "Compressed_BPP: %d\n", crtc_state->dsc.compressed_bpp); 2162 2163 out: drm_modeset_unlock(&dev->mode_config.connection_mutex); 2164 2165 return ret; 2166 } 2167 2168 static ssize_t i915_dsc_bpp_write(struct file *file, 2169 const char __user *ubuf, 2170 size_t len, loff_t *offp) 2171 { 2172 struct drm_connector *connector = 2173 ((struct seq_file *)file->private_data)->private; 2174 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 2175 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2176 int dsc_bpp = 0; 2177 int ret; 2178 2179 ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpp); 2180 if (ret < 0) 2181 return ret; 2182 2183 intel_dp->force_dsc_bpp = dsc_bpp; 2184 *offp += len; 2185 2186 return len; 2187 } 2188 2189 static int i915_dsc_bpp_open(struct inode *inode, 2190 struct file *file) 2191 { 2192 return single_open(file, i915_dsc_bpp_show, 2193 inode->i_private); 2194 } 2195 2196 static const struct file_operations i915_dsc_bpp_fops = { 2197 .owner = THIS_MODULE, 2198 .open = i915_dsc_bpp_open, 2199 .read = seq_read, 2200 .llseek = seq_lseek, 2201 .release = single_release, 2202 .write = i915_dsc_bpp_write 2203 }; 2204 2205 /** 2206 * intel_connector_debugfs_add - add i915 specific connector debugfs files 2207 * @connector: pointer to a registered drm_connector 2208 * 2209 * Cleanup will be done by drm_connector_unregister() through a call to 2210 * drm_debugfs_connector_remove(). 2211 */ 2212 void intel_connector_debugfs_add(struct intel_connector *intel_connector) 2213 { 2214 struct drm_connector *connector = &intel_connector->base; 2215 struct dentry *root = connector->debugfs_entry; 2216 struct drm_i915_private *dev_priv = to_i915(connector->dev); 2217 2218 /* The connector must have been registered beforehands. */ 2219 if (!root) 2220 return; 2221 2222 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 2223 debugfs_create_file("i915_panel_timings", S_IRUGO, root, 2224 connector, &i915_panel_fops); 2225 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root, 2226 connector, &i915_psr_sink_status_fops); 2227 } 2228 2229 if (HAS_PSR(dev_priv) && 2230 connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 2231 debugfs_create_file("i915_psr_status", 0444, root, 2232 connector, &i915_psr_status_fops); 2233 } 2234 2235 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 2236 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || 2237 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { 2238 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root, 2239 connector, &i915_hdcp_sink_capability_fops); 2240 } 2241 2242 if (DISPLAY_VER(dev_priv) >= 11 && 2243 ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && 2244 !to_intel_connector(connector)->mst_port) || 2245 connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 2246 debugfs_create_file("i915_dsc_fec_support", 0644, root, 2247 connector, &i915_dsc_fec_support_fops); 2248 2249 debugfs_create_file("i915_dsc_bpp", 0644, root, 2250 connector, &i915_dsc_bpp_fops); 2251 } 2252 2253 if (connector->connector_type == DRM_MODE_CONNECTOR_DSI || 2254 connector->connector_type == DRM_MODE_CONNECTOR_eDP || 2255 connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 2256 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || 2257 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) 2258 debugfs_create_file("i915_lpsp_capability", 0444, root, 2259 connector, &i915_lpsp_capability_fops); 2260 } 2261 2262 /** 2263 * intel_crtc_debugfs_add - add i915 specific crtc debugfs files 2264 * @crtc: pointer to a drm_crtc 2265 * 2266 * Failure to add debugfs entries should generally be ignored. 2267 */ 2268 void intel_crtc_debugfs_add(struct drm_crtc *crtc) 2269 { 2270 if (!crtc->debugfs_entry) 2271 return; 2272 2273 crtc_updates_add(crtc); 2274 intel_fbc_crtc_debugfs_add(to_intel_crtc(crtc)); 2275 } 2276