1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <drm/drm_debugfs.h> 7 #include <drm/drm_fourcc.h> 8 9 #include "i915_debugfs.h" 10 #include "intel_csr.h" 11 #include "intel_display_debugfs.h" 12 #include "intel_display_power.h" 13 #include "intel_display_types.h" 14 #include "intel_dp.h" 15 #include "intel_fbc.h" 16 #include "intel_hdcp.h" 17 #include "intel_hdmi.h" 18 #include "intel_pm.h" 19 #include "intel_psr.h" 20 #include "intel_sideband.h" 21 #include "intel_sprite.h" 22 23 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 24 { 25 return to_i915(node->minor->dev); 26 } 27 28 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) 29 { 30 struct drm_i915_private *dev_priv = node_to_i915(m->private); 31 32 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 33 dev_priv->fb_tracking.busy_bits); 34 35 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 36 dev_priv->fb_tracking.flip_bits); 37 38 return 0; 39 } 40 41 static int i915_fbc_status(struct seq_file *m, void *unused) 42 { 43 struct drm_i915_private *dev_priv = node_to_i915(m->private); 44 struct intel_fbc *fbc = &dev_priv->fbc; 45 intel_wakeref_t wakeref; 46 47 if (!HAS_FBC(dev_priv)) 48 return -ENODEV; 49 50 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 51 mutex_lock(&fbc->lock); 52 53 if (intel_fbc_is_active(dev_priv)) 54 seq_puts(m, "FBC enabled\n"); 55 else 56 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); 57 58 if (intel_fbc_is_active(dev_priv)) { 59 u32 mask; 60 61 if (INTEL_GEN(dev_priv) >= 8) 62 mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK; 63 else if (INTEL_GEN(dev_priv) >= 7) 64 mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK; 65 else if (INTEL_GEN(dev_priv) >= 5) 66 mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK; 67 else if (IS_G4X(dev_priv)) 68 mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK; 69 else 70 mask = intel_de_read(dev_priv, FBC_STATUS) & 71 (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED); 72 73 seq_printf(m, "Compressing: %s\n", yesno(mask)); 74 } 75 76 mutex_unlock(&fbc->lock); 77 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 78 79 return 0; 80 } 81 82 static int i915_fbc_false_color_get(void *data, u64 *val) 83 { 84 struct drm_i915_private *dev_priv = data; 85 86 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 87 return -ENODEV; 88 89 *val = dev_priv->fbc.false_color; 90 91 return 0; 92 } 93 94 static int i915_fbc_false_color_set(void *data, u64 val) 95 { 96 struct drm_i915_private *dev_priv = data; 97 u32 reg; 98 99 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 100 return -ENODEV; 101 102 mutex_lock(&dev_priv->fbc.lock); 103 104 reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL); 105 dev_priv->fbc.false_color = val; 106 107 intel_de_write(dev_priv, ILK_DPFC_CONTROL, 108 val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR)); 109 110 mutex_unlock(&dev_priv->fbc.lock); 111 return 0; 112 } 113 114 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, 115 i915_fbc_false_color_get, i915_fbc_false_color_set, 116 "%llu\n"); 117 118 static int i915_ips_status(struct seq_file *m, void *unused) 119 { 120 struct drm_i915_private *dev_priv = node_to_i915(m->private); 121 intel_wakeref_t wakeref; 122 123 if (!HAS_IPS(dev_priv)) 124 return -ENODEV; 125 126 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 127 128 seq_printf(m, "Enabled by kernel parameter: %s\n", 129 yesno(dev_priv->params.enable_ips)); 130 131 if (INTEL_GEN(dev_priv) >= 8) { 132 seq_puts(m, "Currently: unknown\n"); 133 } else { 134 if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE) 135 seq_puts(m, "Currently: enabled\n"); 136 else 137 seq_puts(m, "Currently: disabled\n"); 138 } 139 140 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 141 142 return 0; 143 } 144 145 static int i915_sr_status(struct seq_file *m, void *unused) 146 { 147 struct drm_i915_private *dev_priv = node_to_i915(m->private); 148 intel_wakeref_t wakeref; 149 bool sr_enabled = false; 150 151 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 152 153 if (INTEL_GEN(dev_priv) >= 9) 154 /* no global SR status; inspect per-plane WM */; 155 else if (HAS_PCH_SPLIT(dev_priv)) 156 sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN; 157 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || 158 IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 159 sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN; 160 else if (IS_I915GM(dev_priv)) 161 sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN; 162 else if (IS_PINEVIEW(dev_priv)) 163 sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 164 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 165 sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 166 167 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 168 169 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); 170 171 return 0; 172 } 173 174 static int i915_opregion(struct seq_file *m, void *unused) 175 { 176 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; 177 178 if (opregion->header) 179 seq_write(m, opregion->header, OPREGION_SIZE); 180 181 return 0; 182 } 183 184 static int i915_vbt(struct seq_file *m, void *unused) 185 { 186 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; 187 188 if (opregion->vbt) 189 seq_write(m, opregion->vbt, opregion->vbt_size); 190 191 return 0; 192 } 193 194 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 195 { 196 struct drm_i915_private *dev_priv = node_to_i915(m->private); 197 struct drm_device *dev = &dev_priv->drm; 198 struct intel_framebuffer *fbdev_fb = NULL; 199 struct drm_framebuffer *drm_fb; 200 201 #ifdef CONFIG_DRM_FBDEV_EMULATION 202 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) { 203 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb); 204 205 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 206 fbdev_fb->base.width, 207 fbdev_fb->base.height, 208 fbdev_fb->base.format->depth, 209 fbdev_fb->base.format->cpp[0] * 8, 210 fbdev_fb->base.modifier, 211 drm_framebuffer_read_refcount(&fbdev_fb->base)); 212 i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base)); 213 seq_putc(m, '\n'); 214 } 215 #endif 216 217 mutex_lock(&dev->mode_config.fb_lock); 218 drm_for_each_fb(drm_fb, dev) { 219 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 220 if (fb == fbdev_fb) 221 continue; 222 223 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 224 fb->base.width, 225 fb->base.height, 226 fb->base.format->depth, 227 fb->base.format->cpp[0] * 8, 228 fb->base.modifier, 229 drm_framebuffer_read_refcount(&fb->base)); 230 i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base)); 231 seq_putc(m, '\n'); 232 } 233 mutex_unlock(&dev->mode_config.fb_lock); 234 235 return 0; 236 } 237 238 static int i915_psr_sink_status_show(struct seq_file *m, void *data) 239 { 240 u8 val; 241 static const char * const sink_status[] = { 242 "inactive", 243 "transition to active, capture and display", 244 "active, display from RFB", 245 "active, capture and display on sink device timings", 246 "transition to inactive, capture and display, timing re-sync", 247 "reserved", 248 "reserved", 249 "sink internal error", 250 }; 251 struct drm_connector *connector = m->private; 252 struct drm_i915_private *dev_priv = to_i915(connector->dev); 253 struct intel_dp *intel_dp = 254 intel_attached_dp(to_intel_connector(connector)); 255 int ret; 256 257 if (!CAN_PSR(dev_priv)) { 258 seq_puts(m, "PSR Unsupported\n"); 259 return -ENODEV; 260 } 261 262 if (connector->status != connector_status_connected) 263 return -ENODEV; 264 265 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val); 266 267 if (ret == 1) { 268 const char *str = "unknown"; 269 270 val &= DP_PSR_SINK_STATE_MASK; 271 if (val < ARRAY_SIZE(sink_status)) 272 str = sink_status[val]; 273 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); 274 } else { 275 return ret; 276 } 277 278 return 0; 279 } 280 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); 281 282 static void 283 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) 284 { 285 u32 val, status_val; 286 const char *status = "unknown"; 287 288 if (dev_priv->psr.psr2_enabled) { 289 static const char * const live_status[] = { 290 "IDLE", 291 "CAPTURE", 292 "CAPTURE_FS", 293 "SLEEP", 294 "BUFON_FW", 295 "ML_UP", 296 "SU_STANDBY", 297 "FAST_SLEEP", 298 "DEEP_SLEEP", 299 "BUF_ON", 300 "TG_ON" 301 }; 302 val = intel_de_read(dev_priv, 303 EDP_PSR2_STATUS(dev_priv->psr.transcoder)); 304 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >> 305 EDP_PSR2_STATUS_STATE_SHIFT; 306 if (status_val < ARRAY_SIZE(live_status)) 307 status = live_status[status_val]; 308 } else { 309 static const char * const live_status[] = { 310 "IDLE", 311 "SRDONACK", 312 "SRDENT", 313 "BUFOFF", 314 "BUFON", 315 "AUXACK", 316 "SRDOFFACK", 317 "SRDENT_ON", 318 }; 319 val = intel_de_read(dev_priv, 320 EDP_PSR_STATUS(dev_priv->psr.transcoder)); 321 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> 322 EDP_PSR_STATUS_STATE_SHIFT; 323 if (status_val < ARRAY_SIZE(live_status)) 324 status = live_status[status_val]; 325 } 326 327 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); 328 } 329 330 static int i915_edp_psr_status(struct seq_file *m, void *data) 331 { 332 struct drm_i915_private *dev_priv = node_to_i915(m->private); 333 struct i915_psr *psr = &dev_priv->psr; 334 intel_wakeref_t wakeref; 335 const char *status; 336 bool enabled; 337 u32 val; 338 339 if (!HAS_PSR(dev_priv)) 340 return -ENODEV; 341 342 seq_printf(m, "Sink support: %s", yesno(psr->sink_support)); 343 if (psr->dp) 344 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]); 345 seq_puts(m, "\n"); 346 347 if (!psr->sink_support) 348 return 0; 349 350 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 351 mutex_lock(&psr->lock); 352 353 if (psr->enabled) 354 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; 355 else 356 status = "disabled"; 357 seq_printf(m, "PSR mode: %s\n", status); 358 359 if (!psr->enabled) { 360 seq_printf(m, "PSR sink not reliable: %s\n", 361 yesno(psr->sink_not_reliable)); 362 363 goto unlock; 364 } 365 366 if (psr->psr2_enabled) { 367 val = intel_de_read(dev_priv, 368 EDP_PSR2_CTL(dev_priv->psr.transcoder)); 369 enabled = val & EDP_PSR2_ENABLE; 370 } else { 371 val = intel_de_read(dev_priv, 372 EDP_PSR_CTL(dev_priv->psr.transcoder)); 373 enabled = val & EDP_PSR_ENABLE; 374 } 375 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", 376 enableddisabled(enabled), val); 377 psr_source_status(dev_priv, m); 378 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", 379 psr->busy_frontbuffer_bits); 380 381 /* 382 * SKL+ Perf counter is reset to 0 everytime DC state is entered 383 */ 384 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 385 val = intel_de_read(dev_priv, 386 EDP_PSR_PERF_CNT(dev_priv->psr.transcoder)); 387 val &= EDP_PSR_PERF_CNT_MASK; 388 seq_printf(m, "Performance counter: %u\n", val); 389 } 390 391 if (psr->debug & I915_PSR_DEBUG_IRQ) { 392 seq_printf(m, "Last attempted entry at: %lld\n", 393 psr->last_entry_attempt); 394 seq_printf(m, "Last exit at: %lld\n", psr->last_exit); 395 } 396 397 if (psr->psr2_enabled) { 398 u32 su_frames_val[3]; 399 int frame; 400 401 /* 402 * Reading all 3 registers before hand to minimize crossing a 403 * frame boundary between register reads 404 */ 405 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { 406 val = intel_de_read(dev_priv, 407 PSR2_SU_STATUS(dev_priv->psr.transcoder, frame)); 408 su_frames_val[frame / 3] = val; 409 } 410 411 seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); 412 413 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { 414 u32 su_blocks; 415 416 su_blocks = su_frames_val[frame / 3] & 417 PSR2_SU_STATUS_MASK(frame); 418 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); 419 seq_printf(m, "%d\t%d\n", frame, su_blocks); 420 } 421 422 seq_printf(m, "PSR2 selective fetch: %s\n", 423 enableddisabled(psr->psr2_sel_fetch_enabled)); 424 } 425 426 unlock: 427 mutex_unlock(&psr->lock); 428 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 429 430 return 0; 431 } 432 433 static int 434 i915_edp_psr_debug_set(void *data, u64 val) 435 { 436 struct drm_i915_private *dev_priv = data; 437 intel_wakeref_t wakeref; 438 int ret; 439 440 if (!CAN_PSR(dev_priv)) 441 return -ENODEV; 442 443 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val); 444 445 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 446 447 ret = intel_psr_debug_set(dev_priv, val); 448 449 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 450 451 return ret; 452 } 453 454 static int 455 i915_edp_psr_debug_get(void *data, u64 *val) 456 { 457 struct drm_i915_private *dev_priv = data; 458 459 if (!CAN_PSR(dev_priv)) 460 return -ENODEV; 461 462 *val = READ_ONCE(dev_priv->psr.debug); 463 return 0; 464 } 465 466 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, 467 i915_edp_psr_debug_get, i915_edp_psr_debug_set, 468 "%llu\n"); 469 470 static int i915_power_domain_info(struct seq_file *m, void *unused) 471 { 472 struct drm_i915_private *dev_priv = node_to_i915(m->private); 473 struct i915_power_domains *power_domains = &dev_priv->power_domains; 474 int i; 475 476 mutex_lock(&power_domains->lock); 477 478 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 479 for (i = 0; i < power_domains->power_well_count; i++) { 480 struct i915_power_well *power_well; 481 enum intel_display_power_domain power_domain; 482 483 power_well = &power_domains->power_wells[i]; 484 seq_printf(m, "%-25s %d\n", power_well->desc->name, 485 power_well->count); 486 487 for_each_power_domain(power_domain, power_well->desc->domains) 488 seq_printf(m, " %-23s %d\n", 489 intel_display_power_domain_str(power_domain), 490 power_domains->domain_use_count[power_domain]); 491 } 492 493 mutex_unlock(&power_domains->lock); 494 495 return 0; 496 } 497 498 static int i915_dmc_info(struct seq_file *m, void *unused) 499 { 500 struct drm_i915_private *dev_priv = node_to_i915(m->private); 501 intel_wakeref_t wakeref; 502 struct intel_csr *csr; 503 i915_reg_t dc5_reg, dc6_reg = {}; 504 505 if (!HAS_CSR(dev_priv)) 506 return -ENODEV; 507 508 csr = &dev_priv->csr; 509 510 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 511 512 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); 513 seq_printf(m, "path: %s\n", csr->fw_path); 514 515 if (!csr->dmc_payload) 516 goto out; 517 518 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), 519 CSR_VERSION_MINOR(csr->version)); 520 521 if (INTEL_GEN(dev_priv) >= 12) { 522 if (IS_DGFX(dev_priv)) { 523 dc5_reg = DG1_DMC_DEBUG_DC5_COUNT; 524 } else { 525 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT; 526 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT; 527 } 528 529 /* 530 * NOTE: DMC_DEBUG3 is a general purpose reg. 531 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter 532 * reg for DC3CO debugging and validation, 533 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter. 534 */ 535 seq_printf(m, "DC3CO count: %d\n", 536 intel_de_read(dev_priv, DMC_DEBUG3)); 537 } else { 538 dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT : 539 SKL_CSR_DC3_DC5_COUNT; 540 if (!IS_GEN9_LP(dev_priv)) 541 dc6_reg = SKL_CSR_DC5_DC6_COUNT; 542 } 543 544 seq_printf(m, "DC3 -> DC5 count: %d\n", 545 intel_de_read(dev_priv, dc5_reg)); 546 if (dc6_reg.reg) 547 seq_printf(m, "DC5 -> DC6 count: %d\n", 548 intel_de_read(dev_priv, dc6_reg)); 549 550 out: 551 seq_printf(m, "program base: 0x%08x\n", 552 intel_de_read(dev_priv, CSR_PROGRAM(0))); 553 seq_printf(m, "ssp base: 0x%08x\n", 554 intel_de_read(dev_priv, CSR_SSP_BASE)); 555 seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL)); 556 557 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 558 559 return 0; 560 } 561 562 static void intel_seq_print_mode(struct seq_file *m, int tabs, 563 const struct drm_display_mode *mode) 564 { 565 int i; 566 567 for (i = 0; i < tabs; i++) 568 seq_putc(m, '\t'); 569 570 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); 571 } 572 573 static void intel_encoder_info(struct seq_file *m, 574 struct intel_crtc *crtc, 575 struct intel_encoder *encoder) 576 { 577 struct drm_i915_private *dev_priv = node_to_i915(m->private); 578 struct drm_connector_list_iter conn_iter; 579 struct drm_connector *connector; 580 581 seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n", 582 encoder->base.base.id, encoder->base.name); 583 584 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 585 drm_for_each_connector_iter(connector, &conn_iter) { 586 const struct drm_connector_state *conn_state = 587 connector->state; 588 589 if (conn_state->best_encoder != &encoder->base) 590 continue; 591 592 seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n", 593 connector->base.id, connector->name); 594 } 595 drm_connector_list_iter_end(&conn_iter); 596 } 597 598 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 599 { 600 const struct drm_display_mode *mode = panel->fixed_mode; 601 602 seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); 603 } 604 605 static void intel_hdcp_info(struct seq_file *m, 606 struct intel_connector *intel_connector) 607 { 608 bool hdcp_cap, hdcp2_cap; 609 610 if (!intel_connector->hdcp.shim) { 611 seq_puts(m, "No Connector Support"); 612 goto out; 613 } 614 615 hdcp_cap = intel_hdcp_capable(intel_connector); 616 hdcp2_cap = intel_hdcp2_capable(intel_connector); 617 618 if (hdcp_cap) 619 seq_puts(m, "HDCP1.4 "); 620 if (hdcp2_cap) 621 seq_puts(m, "HDCP2.2 "); 622 623 if (!hdcp_cap && !hdcp2_cap) 624 seq_puts(m, "None"); 625 626 out: 627 seq_puts(m, "\n"); 628 } 629 630 static void intel_dp_info(struct seq_file *m, 631 struct intel_connector *intel_connector) 632 { 633 struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); 634 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 635 const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr; 636 637 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 638 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); 639 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) 640 intel_panel_info(m, &intel_connector->panel); 641 642 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, 643 edid ? edid->data : NULL, &intel_dp->aux); 644 } 645 646 static void intel_dp_mst_info(struct seq_file *m, 647 struct intel_connector *intel_connector) 648 { 649 bool has_audio = intel_connector->port->has_audio; 650 651 seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); 652 } 653 654 static void intel_hdmi_info(struct seq_file *m, 655 struct intel_connector *intel_connector) 656 { 657 struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); 658 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder); 659 660 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); 661 } 662 663 static void intel_lvds_info(struct seq_file *m, 664 struct intel_connector *intel_connector) 665 { 666 intel_panel_info(m, &intel_connector->panel); 667 } 668 669 static void intel_connector_info(struct seq_file *m, 670 struct drm_connector *connector) 671 { 672 struct intel_connector *intel_connector = to_intel_connector(connector); 673 const struct drm_connector_state *conn_state = connector->state; 674 struct intel_encoder *encoder = 675 to_intel_encoder(conn_state->best_encoder); 676 const struct drm_display_mode *mode; 677 678 seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n", 679 connector->base.id, connector->name, 680 drm_get_connector_status_name(connector->status)); 681 682 if (connector->status == connector_status_disconnected) 683 return; 684 685 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 686 connector->display_info.width_mm, 687 connector->display_info.height_mm); 688 seq_printf(m, "\tsubpixel order: %s\n", 689 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 690 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); 691 692 if (!encoder) 693 return; 694 695 switch (connector->connector_type) { 696 case DRM_MODE_CONNECTOR_DisplayPort: 697 case DRM_MODE_CONNECTOR_eDP: 698 if (encoder->type == INTEL_OUTPUT_DP_MST) 699 intel_dp_mst_info(m, intel_connector); 700 else 701 intel_dp_info(m, intel_connector); 702 break; 703 case DRM_MODE_CONNECTOR_LVDS: 704 if (encoder->type == INTEL_OUTPUT_LVDS) 705 intel_lvds_info(m, intel_connector); 706 break; 707 case DRM_MODE_CONNECTOR_HDMIA: 708 if (encoder->type == INTEL_OUTPUT_HDMI || 709 encoder->type == INTEL_OUTPUT_DDI) 710 intel_hdmi_info(m, intel_connector); 711 break; 712 default: 713 break; 714 } 715 716 seq_puts(m, "\tHDCP version: "); 717 intel_hdcp_info(m, intel_connector); 718 719 seq_printf(m, "\tmodes:\n"); 720 list_for_each_entry(mode, &connector->modes, head) 721 intel_seq_print_mode(m, 2, mode); 722 } 723 724 static const char *plane_type(enum drm_plane_type type) 725 { 726 switch (type) { 727 case DRM_PLANE_TYPE_OVERLAY: 728 return "OVL"; 729 case DRM_PLANE_TYPE_PRIMARY: 730 return "PRI"; 731 case DRM_PLANE_TYPE_CURSOR: 732 return "CUR"; 733 /* 734 * Deliberately omitting default: to generate compiler warnings 735 * when a new drm_plane_type gets added. 736 */ 737 } 738 739 return "unknown"; 740 } 741 742 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation) 743 { 744 /* 745 * According to doc only one DRM_MODE_ROTATE_ is allowed but this 746 * will print them all to visualize if the values are misused 747 */ 748 snprintf(buf, bufsize, 749 "%s%s%s%s%s%s(0x%08x)", 750 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "", 751 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "", 752 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "", 753 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "", 754 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "", 755 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "", 756 rotation); 757 } 758 759 static const char *plane_visibility(const struct intel_plane_state *plane_state) 760 { 761 if (plane_state->uapi.visible) 762 return "visible"; 763 764 if (plane_state->planar_slave) 765 return "planar-slave"; 766 767 return "hidden"; 768 } 769 770 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane) 771 { 772 const struct intel_plane_state *plane_state = 773 to_intel_plane_state(plane->base.state); 774 const struct drm_framebuffer *fb = plane_state->uapi.fb; 775 struct drm_format_name_buf format_name; 776 struct drm_rect src, dst; 777 char rot_str[48]; 778 779 src = drm_plane_state_src(&plane_state->uapi); 780 dst = drm_plane_state_dest(&plane_state->uapi); 781 782 if (fb) 783 drm_get_format_name(fb->format->format, &format_name); 784 785 plane_rotation(rot_str, sizeof(rot_str), 786 plane_state->uapi.rotation); 787 788 seq_printf(m, "\t\tuapi: [FB:%d] %s,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", 789 fb ? fb->base.id : 0, fb ? format_name.str : "n/a", 790 fb ? fb->modifier : 0, 791 fb ? fb->width : 0, fb ? fb->height : 0, 792 plane_visibility(plane_state), 793 DRM_RECT_FP_ARG(&src), 794 DRM_RECT_ARG(&dst), 795 rot_str); 796 797 if (plane_state->planar_linked_plane) 798 seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n", 799 plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name, 800 plane_state->planar_slave ? "slave" : "master"); 801 } 802 803 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane) 804 { 805 const struct intel_plane_state *plane_state = 806 to_intel_plane_state(plane->base.state); 807 const struct drm_framebuffer *fb = plane_state->hw.fb; 808 struct drm_format_name_buf format_name; 809 char rot_str[48]; 810 811 if (!fb) 812 return; 813 814 drm_get_format_name(fb->format->format, &format_name); 815 816 plane_rotation(rot_str, sizeof(rot_str), 817 plane_state->hw.rotation); 818 819 seq_printf(m, "\t\thw: [FB:%d] %s,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", 820 fb->base.id, format_name.str, 821 fb->modifier, fb->width, fb->height, 822 yesno(plane_state->uapi.visible), 823 DRM_RECT_FP_ARG(&plane_state->uapi.src), 824 DRM_RECT_ARG(&plane_state->uapi.dst), 825 rot_str); 826 } 827 828 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc) 829 { 830 struct drm_i915_private *dev_priv = node_to_i915(m->private); 831 struct intel_plane *plane; 832 833 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 834 seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n", 835 plane->base.base.id, plane->base.name, 836 plane_type(plane->base.type)); 837 intel_plane_uapi_info(m, plane); 838 intel_plane_hw_info(m, plane); 839 } 840 } 841 842 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc) 843 { 844 const struct intel_crtc_state *crtc_state = 845 to_intel_crtc_state(crtc->base.state); 846 int num_scalers = crtc->num_scalers; 847 int i; 848 849 /* Not all platformas have a scaler */ 850 if (num_scalers) { 851 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", 852 num_scalers, 853 crtc_state->scaler_state.scaler_users, 854 crtc_state->scaler_state.scaler_id); 855 856 for (i = 0; i < num_scalers; i++) { 857 const struct intel_scaler *sc = 858 &crtc_state->scaler_state.scalers[i]; 859 860 seq_printf(m, ", scalers[%d]: use=%s, mode=%x", 861 i, yesno(sc->in_use), sc->mode); 862 } 863 seq_puts(m, "\n"); 864 } else { 865 seq_puts(m, "\tNo scalers available on this platform\n"); 866 } 867 } 868 869 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) 870 static void crtc_updates_info(struct seq_file *m, 871 struct intel_crtc *crtc, 872 const char *hdr) 873 { 874 u64 count; 875 int row; 876 877 count = 0; 878 for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) 879 count += crtc->debug.vbl.times[row]; 880 seq_printf(m, "%sUpdates: %llu\n", hdr, count); 881 if (!count) 882 return; 883 884 for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) { 885 char columns[80] = " |"; 886 unsigned int x; 887 888 if (row & 1) { 889 const char *units; 890 891 if (row > 10) { 892 x = 1000000; 893 units = "ms"; 894 } else { 895 x = 1000; 896 units = "us"; 897 } 898 899 snprintf(columns, sizeof(columns), "%4ld%s |", 900 DIV_ROUND_CLOSEST(BIT(row + 9), x), units); 901 } 902 903 if (crtc->debug.vbl.times[row]) { 904 x = ilog2(crtc->debug.vbl.times[row]); 905 memset(columns + 8, '*', x); 906 columns[8 + x] = '\0'; 907 } 908 909 seq_printf(m, "%s%s\n", hdr, columns); 910 } 911 912 seq_printf(m, "%sMin update: %lluns\n", 913 hdr, crtc->debug.vbl.min); 914 seq_printf(m, "%sMax update: %lluns\n", 915 hdr, crtc->debug.vbl.max); 916 seq_printf(m, "%sAverage update: %lluns\n", 917 hdr, div64_u64(crtc->debug.vbl.sum, count)); 918 seq_printf(m, "%sOverruns > %uus: %u\n", 919 hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over); 920 } 921 922 static int crtc_updates_show(struct seq_file *m, void *data) 923 { 924 crtc_updates_info(m, m->private, ""); 925 return 0; 926 } 927 928 static int crtc_updates_open(struct inode *inode, struct file *file) 929 { 930 return single_open(file, crtc_updates_show, inode->i_private); 931 } 932 933 static ssize_t crtc_updates_write(struct file *file, 934 const char __user *ubuf, 935 size_t len, loff_t *offp) 936 { 937 struct seq_file *m = file->private_data; 938 struct intel_crtc *crtc = m->private; 939 940 /* May race with an update. Meh. */ 941 memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl)); 942 943 return len; 944 } 945 946 static const struct file_operations crtc_updates_fops = { 947 .owner = THIS_MODULE, 948 .open = crtc_updates_open, 949 .read = seq_read, 950 .llseek = seq_lseek, 951 .release = single_release, 952 .write = crtc_updates_write 953 }; 954 955 static void crtc_updates_add(struct drm_crtc *crtc) 956 { 957 debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry, 958 to_intel_crtc(crtc), &crtc_updates_fops); 959 } 960 961 #else 962 static void crtc_updates_info(struct seq_file *m, 963 struct intel_crtc *crtc, 964 const char *hdr) 965 { 966 } 967 968 static void crtc_updates_add(struct drm_crtc *crtc) 969 { 970 } 971 #endif 972 973 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) 974 { 975 struct drm_i915_private *dev_priv = node_to_i915(m->private); 976 const struct intel_crtc_state *crtc_state = 977 to_intel_crtc_state(crtc->base.state); 978 struct intel_encoder *encoder; 979 980 seq_printf(m, "[CRTC:%d:%s]:\n", 981 crtc->base.base.id, crtc->base.name); 982 983 seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n", 984 yesno(crtc_state->uapi.enable), 985 yesno(crtc_state->uapi.active), 986 DRM_MODE_ARG(&crtc_state->uapi.mode)); 987 988 if (crtc_state->hw.enable) { 989 seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n", 990 yesno(crtc_state->hw.active), 991 DRM_MODE_ARG(&crtc_state->hw.adjusted_mode)); 992 993 seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n", 994 crtc_state->pipe_src_w, crtc_state->pipe_src_h, 995 yesno(crtc_state->dither), crtc_state->pipe_bpp); 996 997 intel_scaler_info(m, crtc); 998 } 999 1000 if (crtc_state->bigjoiner) 1001 seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n", 1002 crtc_state->bigjoiner_linked_crtc->base.base.id, 1003 crtc_state->bigjoiner_linked_crtc->base.name, 1004 crtc_state->bigjoiner_slave ? "slave" : "master"); 1005 1006 for_each_intel_encoder_mask(&dev_priv->drm, encoder, 1007 crtc_state->uapi.encoder_mask) 1008 intel_encoder_info(m, crtc, encoder); 1009 1010 intel_plane_info(m, crtc); 1011 1012 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n", 1013 yesno(!crtc->cpu_fifo_underrun_disabled), 1014 yesno(!crtc->pch_fifo_underrun_disabled)); 1015 1016 crtc_updates_info(m, crtc, "\t"); 1017 } 1018 1019 static int i915_display_info(struct seq_file *m, void *unused) 1020 { 1021 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1022 struct drm_device *dev = &dev_priv->drm; 1023 struct intel_crtc *crtc; 1024 struct drm_connector *connector; 1025 struct drm_connector_list_iter conn_iter; 1026 intel_wakeref_t wakeref; 1027 1028 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 1029 1030 drm_modeset_lock_all(dev); 1031 1032 seq_printf(m, "CRTC info\n"); 1033 seq_printf(m, "---------\n"); 1034 for_each_intel_crtc(dev, crtc) 1035 intel_crtc_info(m, crtc); 1036 1037 seq_printf(m, "\n"); 1038 seq_printf(m, "Connector info\n"); 1039 seq_printf(m, "--------------\n"); 1040 drm_connector_list_iter_begin(dev, &conn_iter); 1041 drm_for_each_connector_iter(connector, &conn_iter) 1042 intel_connector_info(m, connector); 1043 drm_connector_list_iter_end(&conn_iter); 1044 1045 drm_modeset_unlock_all(dev); 1046 1047 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 1048 1049 return 0; 1050 } 1051 1052 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 1053 { 1054 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1055 struct drm_device *dev = &dev_priv->drm; 1056 int i; 1057 1058 drm_modeset_lock_all(dev); 1059 1060 seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n", 1061 dev_priv->dpll.ref_clks.nssc, 1062 dev_priv->dpll.ref_clks.ssc); 1063 1064 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { 1065 struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i]; 1066 1067 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name, 1068 pll->info->id); 1069 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", 1070 pll->state.crtc_mask, pll->active_mask, yesno(pll->on)); 1071 seq_printf(m, " tracked hardware state:\n"); 1072 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); 1073 seq_printf(m, " dpll_md: 0x%08x\n", 1074 pll->state.hw_state.dpll_md); 1075 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0); 1076 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1); 1077 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); 1078 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0); 1079 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1); 1080 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n", 1081 pll->state.hw_state.mg_refclkin_ctl); 1082 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n", 1083 pll->state.hw_state.mg_clktop2_coreclkctl1); 1084 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n", 1085 pll->state.hw_state.mg_clktop2_hsclkctl); 1086 seq_printf(m, " mg_pll_div0: 0x%08x\n", 1087 pll->state.hw_state.mg_pll_div0); 1088 seq_printf(m, " mg_pll_div1: 0x%08x\n", 1089 pll->state.hw_state.mg_pll_div1); 1090 seq_printf(m, " mg_pll_lf: 0x%08x\n", 1091 pll->state.hw_state.mg_pll_lf); 1092 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n", 1093 pll->state.hw_state.mg_pll_frac_lock); 1094 seq_printf(m, " mg_pll_ssc: 0x%08x\n", 1095 pll->state.hw_state.mg_pll_ssc); 1096 seq_printf(m, " mg_pll_bias: 0x%08x\n", 1097 pll->state.hw_state.mg_pll_bias); 1098 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n", 1099 pll->state.hw_state.mg_pll_tdc_coldst_bias); 1100 } 1101 drm_modeset_unlock_all(dev); 1102 1103 return 0; 1104 } 1105 1106 static int i915_ipc_status_show(struct seq_file *m, void *data) 1107 { 1108 struct drm_i915_private *dev_priv = m->private; 1109 1110 seq_printf(m, "Isochronous Priority Control: %s\n", 1111 yesno(dev_priv->ipc_enabled)); 1112 return 0; 1113 } 1114 1115 static int i915_ipc_status_open(struct inode *inode, struct file *file) 1116 { 1117 struct drm_i915_private *dev_priv = inode->i_private; 1118 1119 if (!HAS_IPC(dev_priv)) 1120 return -ENODEV; 1121 1122 return single_open(file, i915_ipc_status_show, dev_priv); 1123 } 1124 1125 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, 1126 size_t len, loff_t *offp) 1127 { 1128 struct seq_file *m = file->private_data; 1129 struct drm_i915_private *dev_priv = m->private; 1130 intel_wakeref_t wakeref; 1131 bool enable; 1132 int ret; 1133 1134 ret = kstrtobool_from_user(ubuf, len, &enable); 1135 if (ret < 0) 1136 return ret; 1137 1138 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { 1139 if (!dev_priv->ipc_enabled && enable) 1140 drm_info(&dev_priv->drm, 1141 "Enabling IPC: WM will be proper only after next commit\n"); 1142 dev_priv->ipc_enabled = enable; 1143 intel_enable_ipc(dev_priv); 1144 } 1145 1146 return len; 1147 } 1148 1149 static const struct file_operations i915_ipc_status_fops = { 1150 .owner = THIS_MODULE, 1151 .open = i915_ipc_status_open, 1152 .read = seq_read, 1153 .llseek = seq_lseek, 1154 .release = single_release, 1155 .write = i915_ipc_status_write 1156 }; 1157 1158 static int i915_ddb_info(struct seq_file *m, void *unused) 1159 { 1160 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1161 struct drm_device *dev = &dev_priv->drm; 1162 struct skl_ddb_entry *entry; 1163 struct intel_crtc *crtc; 1164 1165 if (INTEL_GEN(dev_priv) < 9) 1166 return -ENODEV; 1167 1168 drm_modeset_lock_all(dev); 1169 1170 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 1171 1172 for_each_intel_crtc(&dev_priv->drm, crtc) { 1173 struct intel_crtc_state *crtc_state = 1174 to_intel_crtc_state(crtc->base.state); 1175 enum pipe pipe = crtc->pipe; 1176 enum plane_id plane_id; 1177 1178 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 1179 1180 for_each_plane_id_on_crtc(crtc, plane_id) { 1181 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id]; 1182 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1, 1183 entry->start, entry->end, 1184 skl_ddb_entry_size(entry)); 1185 } 1186 1187 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; 1188 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 1189 entry->end, skl_ddb_entry_size(entry)); 1190 } 1191 1192 drm_modeset_unlock_all(dev); 1193 1194 return 0; 1195 } 1196 1197 static void drrs_status_per_crtc(struct seq_file *m, 1198 struct drm_device *dev, 1199 struct intel_crtc *intel_crtc) 1200 { 1201 struct drm_i915_private *dev_priv = to_i915(dev); 1202 struct i915_drrs *drrs = &dev_priv->drrs; 1203 int vrefresh = 0; 1204 struct drm_connector *connector; 1205 struct drm_connector_list_iter conn_iter; 1206 1207 drm_connector_list_iter_begin(dev, &conn_iter); 1208 drm_for_each_connector_iter(connector, &conn_iter) { 1209 bool supported = false; 1210 1211 if (connector->state->crtc != &intel_crtc->base) 1212 continue; 1213 1214 seq_printf(m, "%s:\n", connector->name); 1215 1216 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && 1217 drrs->type == SEAMLESS_DRRS_SUPPORT) 1218 supported = true; 1219 1220 seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported)); 1221 } 1222 drm_connector_list_iter_end(&conn_iter); 1223 1224 seq_puts(m, "\n"); 1225 1226 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { 1227 struct intel_panel *panel; 1228 1229 mutex_lock(&drrs->mutex); 1230 /* DRRS Supported */ 1231 seq_puts(m, "\tDRRS Enabled: Yes\n"); 1232 1233 /* disable_drrs() will make drrs->dp NULL */ 1234 if (!drrs->dp) { 1235 seq_puts(m, "Idleness DRRS: Disabled\n"); 1236 if (dev_priv->psr.enabled) 1237 seq_puts(m, 1238 "\tAs PSR is enabled, DRRS is not enabled\n"); 1239 mutex_unlock(&drrs->mutex); 1240 return; 1241 } 1242 1243 panel = &drrs->dp->attached_connector->panel; 1244 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", 1245 drrs->busy_frontbuffer_bits); 1246 1247 seq_puts(m, "\n\t\t"); 1248 if (drrs->refresh_rate_type == DRRS_HIGH_RR) { 1249 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); 1250 vrefresh = drm_mode_vrefresh(panel->fixed_mode); 1251 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { 1252 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); 1253 vrefresh = drm_mode_vrefresh(panel->downclock_mode); 1254 } else { 1255 seq_printf(m, "DRRS_State: Unknown(%d)\n", 1256 drrs->refresh_rate_type); 1257 mutex_unlock(&drrs->mutex); 1258 return; 1259 } 1260 seq_printf(m, "\t\tVrefresh: %d", vrefresh); 1261 1262 seq_puts(m, "\n\t\t"); 1263 mutex_unlock(&drrs->mutex); 1264 } else { 1265 /* DRRS not supported. Print the VBT parameter*/ 1266 seq_puts(m, "\tDRRS Enabled : No"); 1267 } 1268 seq_puts(m, "\n"); 1269 } 1270 1271 static int i915_drrs_status(struct seq_file *m, void *unused) 1272 { 1273 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1274 struct drm_device *dev = &dev_priv->drm; 1275 struct intel_crtc *intel_crtc; 1276 int active_crtc_cnt = 0; 1277 1278 drm_modeset_lock_all(dev); 1279 for_each_intel_crtc(dev, intel_crtc) { 1280 if (intel_crtc->base.state->active) { 1281 active_crtc_cnt++; 1282 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 1283 1284 drrs_status_per_crtc(m, dev, intel_crtc); 1285 } 1286 } 1287 drm_modeset_unlock_all(dev); 1288 1289 if (!active_crtc_cnt) 1290 seq_puts(m, "No active crtc found\n"); 1291 1292 return 0; 1293 } 1294 1295 #define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \ 1296 seq_puts(m, "LPSP: disabled\n")) 1297 1298 static bool 1299 intel_lpsp_power_well_enabled(struct drm_i915_private *i915, 1300 enum i915_power_well_id power_well_id) 1301 { 1302 intel_wakeref_t wakeref; 1303 bool is_enabled; 1304 1305 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 1306 is_enabled = intel_display_power_well_is_enabled(i915, 1307 power_well_id); 1308 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 1309 1310 return is_enabled; 1311 } 1312 1313 static int i915_lpsp_status(struct seq_file *m, void *unused) 1314 { 1315 struct drm_i915_private *i915 = node_to_i915(m->private); 1316 1317 switch (INTEL_GEN(i915)) { 1318 case 12: 1319 case 11: 1320 LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3)); 1321 break; 1322 case 10: 1323 case 9: 1324 LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2)); 1325 break; 1326 default: 1327 /* 1328 * Apart from HASWELL/BROADWELL other legacy platform doesn't 1329 * support lpsp. 1330 */ 1331 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 1332 LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL)); 1333 else 1334 seq_puts(m, "LPSP: not supported\n"); 1335 } 1336 1337 return 0; 1338 } 1339 1340 static int i915_dp_mst_info(struct seq_file *m, void *unused) 1341 { 1342 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1343 struct drm_device *dev = &dev_priv->drm; 1344 struct intel_encoder *intel_encoder; 1345 struct intel_digital_port *dig_port; 1346 struct drm_connector *connector; 1347 struct drm_connector_list_iter conn_iter; 1348 1349 drm_connector_list_iter_begin(dev, &conn_iter); 1350 drm_for_each_connector_iter(connector, &conn_iter) { 1351 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 1352 continue; 1353 1354 intel_encoder = intel_attached_encoder(to_intel_connector(connector)); 1355 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) 1356 continue; 1357 1358 dig_port = enc_to_dig_port(intel_encoder); 1359 if (!dig_port->dp.can_mst) 1360 continue; 1361 1362 seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n", 1363 dig_port->base.base.base.id, 1364 dig_port->base.base.name); 1365 drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr); 1366 } 1367 drm_connector_list_iter_end(&conn_iter); 1368 1369 return 0; 1370 } 1371 1372 static ssize_t i915_displayport_test_active_write(struct file *file, 1373 const char __user *ubuf, 1374 size_t len, loff_t *offp) 1375 { 1376 char *input_buffer; 1377 int status = 0; 1378 struct drm_device *dev; 1379 struct drm_connector *connector; 1380 struct drm_connector_list_iter conn_iter; 1381 struct intel_dp *intel_dp; 1382 int val = 0; 1383 1384 dev = ((struct seq_file *)file->private_data)->private; 1385 1386 if (len == 0) 1387 return 0; 1388 1389 input_buffer = memdup_user_nul(ubuf, len); 1390 if (IS_ERR(input_buffer)) 1391 return PTR_ERR(input_buffer); 1392 1393 drm_dbg(&to_i915(dev)->drm, 1394 "Copied %d bytes from user\n", (unsigned int)len); 1395 1396 drm_connector_list_iter_begin(dev, &conn_iter); 1397 drm_for_each_connector_iter(connector, &conn_iter) { 1398 struct intel_encoder *encoder; 1399 1400 if (connector->connector_type != 1401 DRM_MODE_CONNECTOR_DisplayPort) 1402 continue; 1403 1404 encoder = to_intel_encoder(connector->encoder); 1405 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1406 continue; 1407 1408 if (encoder && connector->status == connector_status_connected) { 1409 intel_dp = enc_to_intel_dp(encoder); 1410 status = kstrtoint(input_buffer, 10, &val); 1411 if (status < 0) 1412 break; 1413 drm_dbg(&to_i915(dev)->drm, 1414 "Got %d for test active\n", val); 1415 /* To prevent erroneous activation of the compliance 1416 * testing code, only accept an actual value of 1 here 1417 */ 1418 if (val == 1) 1419 intel_dp->compliance.test_active = true; 1420 else 1421 intel_dp->compliance.test_active = false; 1422 } 1423 } 1424 drm_connector_list_iter_end(&conn_iter); 1425 kfree(input_buffer); 1426 if (status < 0) 1427 return status; 1428 1429 *offp += len; 1430 return len; 1431 } 1432 1433 static int i915_displayport_test_active_show(struct seq_file *m, void *data) 1434 { 1435 struct drm_i915_private *dev_priv = m->private; 1436 struct drm_device *dev = &dev_priv->drm; 1437 struct drm_connector *connector; 1438 struct drm_connector_list_iter conn_iter; 1439 struct intel_dp *intel_dp; 1440 1441 drm_connector_list_iter_begin(dev, &conn_iter); 1442 drm_for_each_connector_iter(connector, &conn_iter) { 1443 struct intel_encoder *encoder; 1444 1445 if (connector->connector_type != 1446 DRM_MODE_CONNECTOR_DisplayPort) 1447 continue; 1448 1449 encoder = to_intel_encoder(connector->encoder); 1450 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1451 continue; 1452 1453 if (encoder && connector->status == connector_status_connected) { 1454 intel_dp = enc_to_intel_dp(encoder); 1455 if (intel_dp->compliance.test_active) 1456 seq_puts(m, "1"); 1457 else 1458 seq_puts(m, "0"); 1459 } else 1460 seq_puts(m, "0"); 1461 } 1462 drm_connector_list_iter_end(&conn_iter); 1463 1464 return 0; 1465 } 1466 1467 static int i915_displayport_test_active_open(struct inode *inode, 1468 struct file *file) 1469 { 1470 return single_open(file, i915_displayport_test_active_show, 1471 inode->i_private); 1472 } 1473 1474 static const struct file_operations i915_displayport_test_active_fops = { 1475 .owner = THIS_MODULE, 1476 .open = i915_displayport_test_active_open, 1477 .read = seq_read, 1478 .llseek = seq_lseek, 1479 .release = single_release, 1480 .write = i915_displayport_test_active_write 1481 }; 1482 1483 static int i915_displayport_test_data_show(struct seq_file *m, void *data) 1484 { 1485 struct drm_i915_private *dev_priv = m->private; 1486 struct drm_device *dev = &dev_priv->drm; 1487 struct drm_connector *connector; 1488 struct drm_connector_list_iter conn_iter; 1489 struct intel_dp *intel_dp; 1490 1491 drm_connector_list_iter_begin(dev, &conn_iter); 1492 drm_for_each_connector_iter(connector, &conn_iter) { 1493 struct intel_encoder *encoder; 1494 1495 if (connector->connector_type != 1496 DRM_MODE_CONNECTOR_DisplayPort) 1497 continue; 1498 1499 encoder = to_intel_encoder(connector->encoder); 1500 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1501 continue; 1502 1503 if (encoder && connector->status == connector_status_connected) { 1504 intel_dp = enc_to_intel_dp(encoder); 1505 if (intel_dp->compliance.test_type == 1506 DP_TEST_LINK_EDID_READ) 1507 seq_printf(m, "%lx", 1508 intel_dp->compliance.test_data.edid); 1509 else if (intel_dp->compliance.test_type == 1510 DP_TEST_LINK_VIDEO_PATTERN) { 1511 seq_printf(m, "hdisplay: %d\n", 1512 intel_dp->compliance.test_data.hdisplay); 1513 seq_printf(m, "vdisplay: %d\n", 1514 intel_dp->compliance.test_data.vdisplay); 1515 seq_printf(m, "bpc: %u\n", 1516 intel_dp->compliance.test_data.bpc); 1517 } else if (intel_dp->compliance.test_type == 1518 DP_TEST_LINK_PHY_TEST_PATTERN) { 1519 seq_printf(m, "pattern: %d\n", 1520 intel_dp->compliance.test_data.phytest.phy_pattern); 1521 seq_printf(m, "Number of lanes: %d\n", 1522 intel_dp->compliance.test_data.phytest.num_lanes); 1523 seq_printf(m, "Link Rate: %d\n", 1524 intel_dp->compliance.test_data.phytest.link_rate); 1525 seq_printf(m, "level: %02x\n", 1526 intel_dp->train_set[0]); 1527 } 1528 } else 1529 seq_puts(m, "0"); 1530 } 1531 drm_connector_list_iter_end(&conn_iter); 1532 1533 return 0; 1534 } 1535 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data); 1536 1537 static int i915_displayport_test_type_show(struct seq_file *m, void *data) 1538 { 1539 struct drm_i915_private *dev_priv = m->private; 1540 struct drm_device *dev = &dev_priv->drm; 1541 struct drm_connector *connector; 1542 struct drm_connector_list_iter conn_iter; 1543 struct intel_dp *intel_dp; 1544 1545 drm_connector_list_iter_begin(dev, &conn_iter); 1546 drm_for_each_connector_iter(connector, &conn_iter) { 1547 struct intel_encoder *encoder; 1548 1549 if (connector->connector_type != 1550 DRM_MODE_CONNECTOR_DisplayPort) 1551 continue; 1552 1553 encoder = to_intel_encoder(connector->encoder); 1554 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1555 continue; 1556 1557 if (encoder && connector->status == connector_status_connected) { 1558 intel_dp = enc_to_intel_dp(encoder); 1559 seq_printf(m, "%02lx\n", intel_dp->compliance.test_type); 1560 } else 1561 seq_puts(m, "0"); 1562 } 1563 drm_connector_list_iter_end(&conn_iter); 1564 1565 return 0; 1566 } 1567 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); 1568 1569 static void wm_latency_show(struct seq_file *m, const u16 wm[8]) 1570 { 1571 struct drm_i915_private *dev_priv = m->private; 1572 struct drm_device *dev = &dev_priv->drm; 1573 int level; 1574 int num_levels; 1575 1576 if (IS_CHERRYVIEW(dev_priv)) 1577 num_levels = 3; 1578 else if (IS_VALLEYVIEW(dev_priv)) 1579 num_levels = 1; 1580 else if (IS_G4X(dev_priv)) 1581 num_levels = 3; 1582 else 1583 num_levels = ilk_wm_max_level(dev_priv) + 1; 1584 1585 drm_modeset_lock_all(dev); 1586 1587 for (level = 0; level < num_levels; level++) { 1588 unsigned int latency = wm[level]; 1589 1590 /* 1591 * - WM1+ latency values in 0.5us units 1592 * - latencies are in us on gen9/vlv/chv 1593 */ 1594 if (INTEL_GEN(dev_priv) >= 9 || 1595 IS_VALLEYVIEW(dev_priv) || 1596 IS_CHERRYVIEW(dev_priv) || 1597 IS_G4X(dev_priv)) 1598 latency *= 10; 1599 else if (level > 0) 1600 latency *= 5; 1601 1602 seq_printf(m, "WM%d %u (%u.%u usec)\n", 1603 level, wm[level], latency / 10, latency % 10); 1604 } 1605 1606 drm_modeset_unlock_all(dev); 1607 } 1608 1609 static int pri_wm_latency_show(struct seq_file *m, void *data) 1610 { 1611 struct drm_i915_private *dev_priv = m->private; 1612 const u16 *latencies; 1613 1614 if (INTEL_GEN(dev_priv) >= 9) 1615 latencies = dev_priv->wm.skl_latency; 1616 else 1617 latencies = dev_priv->wm.pri_latency; 1618 1619 wm_latency_show(m, latencies); 1620 1621 return 0; 1622 } 1623 1624 static int spr_wm_latency_show(struct seq_file *m, void *data) 1625 { 1626 struct drm_i915_private *dev_priv = m->private; 1627 const u16 *latencies; 1628 1629 if (INTEL_GEN(dev_priv) >= 9) 1630 latencies = dev_priv->wm.skl_latency; 1631 else 1632 latencies = dev_priv->wm.spr_latency; 1633 1634 wm_latency_show(m, latencies); 1635 1636 return 0; 1637 } 1638 1639 static int cur_wm_latency_show(struct seq_file *m, void *data) 1640 { 1641 struct drm_i915_private *dev_priv = m->private; 1642 const u16 *latencies; 1643 1644 if (INTEL_GEN(dev_priv) >= 9) 1645 latencies = dev_priv->wm.skl_latency; 1646 else 1647 latencies = dev_priv->wm.cur_latency; 1648 1649 wm_latency_show(m, latencies); 1650 1651 return 0; 1652 } 1653 1654 static int pri_wm_latency_open(struct inode *inode, struct file *file) 1655 { 1656 struct drm_i915_private *dev_priv = inode->i_private; 1657 1658 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 1659 return -ENODEV; 1660 1661 return single_open(file, pri_wm_latency_show, dev_priv); 1662 } 1663 1664 static int spr_wm_latency_open(struct inode *inode, struct file *file) 1665 { 1666 struct drm_i915_private *dev_priv = inode->i_private; 1667 1668 if (HAS_GMCH(dev_priv)) 1669 return -ENODEV; 1670 1671 return single_open(file, spr_wm_latency_show, dev_priv); 1672 } 1673 1674 static int cur_wm_latency_open(struct inode *inode, struct file *file) 1675 { 1676 struct drm_i915_private *dev_priv = inode->i_private; 1677 1678 if (HAS_GMCH(dev_priv)) 1679 return -ENODEV; 1680 1681 return single_open(file, cur_wm_latency_show, dev_priv); 1682 } 1683 1684 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 1685 size_t len, loff_t *offp, u16 wm[8]) 1686 { 1687 struct seq_file *m = file->private_data; 1688 struct drm_i915_private *dev_priv = m->private; 1689 struct drm_device *dev = &dev_priv->drm; 1690 u16 new[8] = { 0 }; 1691 int num_levels; 1692 int level; 1693 int ret; 1694 char tmp[32]; 1695 1696 if (IS_CHERRYVIEW(dev_priv)) 1697 num_levels = 3; 1698 else if (IS_VALLEYVIEW(dev_priv)) 1699 num_levels = 1; 1700 else if (IS_G4X(dev_priv)) 1701 num_levels = 3; 1702 else 1703 num_levels = ilk_wm_max_level(dev_priv) + 1; 1704 1705 if (len >= sizeof(tmp)) 1706 return -EINVAL; 1707 1708 if (copy_from_user(tmp, ubuf, len)) 1709 return -EFAULT; 1710 1711 tmp[len] = '\0'; 1712 1713 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 1714 &new[0], &new[1], &new[2], &new[3], 1715 &new[4], &new[5], &new[6], &new[7]); 1716 if (ret != num_levels) 1717 return -EINVAL; 1718 1719 drm_modeset_lock_all(dev); 1720 1721 for (level = 0; level < num_levels; level++) 1722 wm[level] = new[level]; 1723 1724 drm_modeset_unlock_all(dev); 1725 1726 return len; 1727 } 1728 1729 1730 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 1731 size_t len, loff_t *offp) 1732 { 1733 struct seq_file *m = file->private_data; 1734 struct drm_i915_private *dev_priv = m->private; 1735 u16 *latencies; 1736 1737 if (INTEL_GEN(dev_priv) >= 9) 1738 latencies = dev_priv->wm.skl_latency; 1739 else 1740 latencies = dev_priv->wm.pri_latency; 1741 1742 return wm_latency_write(file, ubuf, len, offp, latencies); 1743 } 1744 1745 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 1746 size_t len, loff_t *offp) 1747 { 1748 struct seq_file *m = file->private_data; 1749 struct drm_i915_private *dev_priv = m->private; 1750 u16 *latencies; 1751 1752 if (INTEL_GEN(dev_priv) >= 9) 1753 latencies = dev_priv->wm.skl_latency; 1754 else 1755 latencies = dev_priv->wm.spr_latency; 1756 1757 return wm_latency_write(file, ubuf, len, offp, latencies); 1758 } 1759 1760 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 1761 size_t len, loff_t *offp) 1762 { 1763 struct seq_file *m = file->private_data; 1764 struct drm_i915_private *dev_priv = m->private; 1765 u16 *latencies; 1766 1767 if (INTEL_GEN(dev_priv) >= 9) 1768 latencies = dev_priv->wm.skl_latency; 1769 else 1770 latencies = dev_priv->wm.cur_latency; 1771 1772 return wm_latency_write(file, ubuf, len, offp, latencies); 1773 } 1774 1775 static const struct file_operations i915_pri_wm_latency_fops = { 1776 .owner = THIS_MODULE, 1777 .open = pri_wm_latency_open, 1778 .read = seq_read, 1779 .llseek = seq_lseek, 1780 .release = single_release, 1781 .write = pri_wm_latency_write 1782 }; 1783 1784 static const struct file_operations i915_spr_wm_latency_fops = { 1785 .owner = THIS_MODULE, 1786 .open = spr_wm_latency_open, 1787 .read = seq_read, 1788 .llseek = seq_lseek, 1789 .release = single_release, 1790 .write = spr_wm_latency_write 1791 }; 1792 1793 static const struct file_operations i915_cur_wm_latency_fops = { 1794 .owner = THIS_MODULE, 1795 .open = cur_wm_latency_open, 1796 .read = seq_read, 1797 .llseek = seq_lseek, 1798 .release = single_release, 1799 .write = cur_wm_latency_write 1800 }; 1801 1802 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 1803 { 1804 struct drm_i915_private *dev_priv = m->private; 1805 struct i915_hotplug *hotplug = &dev_priv->hotplug; 1806 1807 /* Synchronize with everything first in case there's been an HPD 1808 * storm, but we haven't finished handling it in the kernel yet 1809 */ 1810 intel_synchronize_irq(dev_priv); 1811 flush_work(&dev_priv->hotplug.dig_port_work); 1812 flush_delayed_work(&dev_priv->hotplug.hotplug_work); 1813 1814 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 1815 seq_printf(m, "Detected: %s\n", 1816 yesno(delayed_work_pending(&hotplug->reenable_work))); 1817 1818 return 0; 1819 } 1820 1821 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 1822 const char __user *ubuf, size_t len, 1823 loff_t *offp) 1824 { 1825 struct seq_file *m = file->private_data; 1826 struct drm_i915_private *dev_priv = m->private; 1827 struct i915_hotplug *hotplug = &dev_priv->hotplug; 1828 unsigned int new_threshold; 1829 int i; 1830 char *newline; 1831 char tmp[16]; 1832 1833 if (len >= sizeof(tmp)) 1834 return -EINVAL; 1835 1836 if (copy_from_user(tmp, ubuf, len)) 1837 return -EFAULT; 1838 1839 tmp[len] = '\0'; 1840 1841 /* Strip newline, if any */ 1842 newline = strchr(tmp, '\n'); 1843 if (newline) 1844 *newline = '\0'; 1845 1846 if (strcmp(tmp, "reset") == 0) 1847 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 1848 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 1849 return -EINVAL; 1850 1851 if (new_threshold > 0) 1852 drm_dbg_kms(&dev_priv->drm, 1853 "Setting HPD storm detection threshold to %d\n", 1854 new_threshold); 1855 else 1856 drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n"); 1857 1858 spin_lock_irq(&dev_priv->irq_lock); 1859 hotplug->hpd_storm_threshold = new_threshold; 1860 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1861 for_each_hpd_pin(i) 1862 hotplug->stats[i].count = 0; 1863 spin_unlock_irq(&dev_priv->irq_lock); 1864 1865 /* Re-enable hpd immediately if we were in an irq storm */ 1866 flush_delayed_work(&dev_priv->hotplug.reenable_work); 1867 1868 return len; 1869 } 1870 1871 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 1872 { 1873 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 1874 } 1875 1876 static const struct file_operations i915_hpd_storm_ctl_fops = { 1877 .owner = THIS_MODULE, 1878 .open = i915_hpd_storm_ctl_open, 1879 .read = seq_read, 1880 .llseek = seq_lseek, 1881 .release = single_release, 1882 .write = i915_hpd_storm_ctl_write 1883 }; 1884 1885 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) 1886 { 1887 struct drm_i915_private *dev_priv = m->private; 1888 1889 seq_printf(m, "Enabled: %s\n", 1890 yesno(dev_priv->hotplug.hpd_short_storm_enabled)); 1891 1892 return 0; 1893 } 1894 1895 static int 1896 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) 1897 { 1898 return single_open(file, i915_hpd_short_storm_ctl_show, 1899 inode->i_private); 1900 } 1901 1902 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, 1903 const char __user *ubuf, 1904 size_t len, loff_t *offp) 1905 { 1906 struct seq_file *m = file->private_data; 1907 struct drm_i915_private *dev_priv = m->private; 1908 struct i915_hotplug *hotplug = &dev_priv->hotplug; 1909 char *newline; 1910 char tmp[16]; 1911 int i; 1912 bool new_state; 1913 1914 if (len >= sizeof(tmp)) 1915 return -EINVAL; 1916 1917 if (copy_from_user(tmp, ubuf, len)) 1918 return -EFAULT; 1919 1920 tmp[len] = '\0'; 1921 1922 /* Strip newline, if any */ 1923 newline = strchr(tmp, '\n'); 1924 if (newline) 1925 *newline = '\0'; 1926 1927 /* Reset to the "default" state for this system */ 1928 if (strcmp(tmp, "reset") == 0) 1929 new_state = !HAS_DP_MST(dev_priv); 1930 else if (kstrtobool(tmp, &new_state) != 0) 1931 return -EINVAL; 1932 1933 drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n", 1934 new_state ? "En" : "Dis"); 1935 1936 spin_lock_irq(&dev_priv->irq_lock); 1937 hotplug->hpd_short_storm_enabled = new_state; 1938 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1939 for_each_hpd_pin(i) 1940 hotplug->stats[i].count = 0; 1941 spin_unlock_irq(&dev_priv->irq_lock); 1942 1943 /* Re-enable hpd immediately if we were in an irq storm */ 1944 flush_delayed_work(&dev_priv->hotplug.reenable_work); 1945 1946 return len; 1947 } 1948 1949 static const struct file_operations i915_hpd_short_storm_ctl_fops = { 1950 .owner = THIS_MODULE, 1951 .open = i915_hpd_short_storm_ctl_open, 1952 .read = seq_read, 1953 .llseek = seq_lseek, 1954 .release = single_release, 1955 .write = i915_hpd_short_storm_ctl_write, 1956 }; 1957 1958 static int i915_drrs_ctl_set(void *data, u64 val) 1959 { 1960 struct drm_i915_private *dev_priv = data; 1961 struct drm_device *dev = &dev_priv->drm; 1962 struct intel_crtc *crtc; 1963 1964 if (INTEL_GEN(dev_priv) < 7) 1965 return -ENODEV; 1966 1967 for_each_intel_crtc(dev, crtc) { 1968 struct drm_connector_list_iter conn_iter; 1969 struct intel_crtc_state *crtc_state; 1970 struct drm_connector *connector; 1971 struct drm_crtc_commit *commit; 1972 int ret; 1973 1974 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); 1975 if (ret) 1976 return ret; 1977 1978 crtc_state = to_intel_crtc_state(crtc->base.state); 1979 1980 if (!crtc_state->hw.active || 1981 !crtc_state->has_drrs) 1982 goto out; 1983 1984 commit = crtc_state->uapi.commit; 1985 if (commit) { 1986 ret = wait_for_completion_interruptible(&commit->hw_done); 1987 if (ret) 1988 goto out; 1989 } 1990 1991 drm_connector_list_iter_begin(dev, &conn_iter); 1992 drm_for_each_connector_iter(connector, &conn_iter) { 1993 struct intel_encoder *encoder; 1994 struct intel_dp *intel_dp; 1995 1996 if (!(crtc_state->uapi.connector_mask & 1997 drm_connector_mask(connector))) 1998 continue; 1999 2000 encoder = intel_attached_encoder(to_intel_connector(connector)); 2001 if (encoder->type != INTEL_OUTPUT_EDP) 2002 continue; 2003 2004 drm_dbg(&dev_priv->drm, 2005 "Manually %sabling DRRS. %llu\n", 2006 val ? "en" : "dis", val); 2007 2008 intel_dp = enc_to_intel_dp(encoder); 2009 if (val) 2010 intel_edp_drrs_enable(intel_dp, 2011 crtc_state); 2012 else 2013 intel_edp_drrs_disable(intel_dp, 2014 crtc_state); 2015 } 2016 drm_connector_list_iter_end(&conn_iter); 2017 2018 out: 2019 drm_modeset_unlock(&crtc->base.mutex); 2020 if (ret) 2021 return ret; 2022 } 2023 2024 return 0; 2025 } 2026 2027 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n"); 2028 2029 static ssize_t 2030 i915_fifo_underrun_reset_write(struct file *filp, 2031 const char __user *ubuf, 2032 size_t cnt, loff_t *ppos) 2033 { 2034 struct drm_i915_private *dev_priv = filp->private_data; 2035 struct intel_crtc *intel_crtc; 2036 struct drm_device *dev = &dev_priv->drm; 2037 int ret; 2038 bool reset; 2039 2040 ret = kstrtobool_from_user(ubuf, cnt, &reset); 2041 if (ret) 2042 return ret; 2043 2044 if (!reset) 2045 return cnt; 2046 2047 for_each_intel_crtc(dev, intel_crtc) { 2048 struct drm_crtc_commit *commit; 2049 struct intel_crtc_state *crtc_state; 2050 2051 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex); 2052 if (ret) 2053 return ret; 2054 2055 crtc_state = to_intel_crtc_state(intel_crtc->base.state); 2056 commit = crtc_state->uapi.commit; 2057 if (commit) { 2058 ret = wait_for_completion_interruptible(&commit->hw_done); 2059 if (!ret) 2060 ret = wait_for_completion_interruptible(&commit->flip_done); 2061 } 2062 2063 if (!ret && crtc_state->hw.active) { 2064 drm_dbg_kms(&dev_priv->drm, 2065 "Re-arming FIFO underruns on pipe %c\n", 2066 pipe_name(intel_crtc->pipe)); 2067 2068 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state); 2069 } 2070 2071 drm_modeset_unlock(&intel_crtc->base.mutex); 2072 2073 if (ret) 2074 return ret; 2075 } 2076 2077 ret = intel_fbc_reset_underrun(dev_priv); 2078 if (ret) 2079 return ret; 2080 2081 return cnt; 2082 } 2083 2084 static const struct file_operations i915_fifo_underrun_reset_ops = { 2085 .owner = THIS_MODULE, 2086 .open = simple_open, 2087 .write = i915_fifo_underrun_reset_write, 2088 .llseek = default_llseek, 2089 }; 2090 2091 static const struct drm_info_list intel_display_debugfs_list[] = { 2092 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 2093 {"i915_fbc_status", i915_fbc_status, 0}, 2094 {"i915_ips_status", i915_ips_status, 0}, 2095 {"i915_sr_status", i915_sr_status, 0}, 2096 {"i915_opregion", i915_opregion, 0}, 2097 {"i915_vbt", i915_vbt, 0}, 2098 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 2099 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 2100 {"i915_power_domain_info", i915_power_domain_info, 0}, 2101 {"i915_dmc_info", i915_dmc_info, 0}, 2102 {"i915_display_info", i915_display_info, 0}, 2103 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 2104 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 2105 {"i915_ddb_info", i915_ddb_info, 0}, 2106 {"i915_drrs_status", i915_drrs_status, 0}, 2107 {"i915_lpsp_status", i915_lpsp_status, 0}, 2108 }; 2109 2110 static const struct { 2111 const char *name; 2112 const struct file_operations *fops; 2113 } intel_display_debugfs_files[] = { 2114 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, 2115 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 2116 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 2117 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 2118 {"i915_fbc_false_color", &i915_fbc_false_color_fops}, 2119 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 2120 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 2121 {"i915_dp_test_active", &i915_displayport_test_active_fops}, 2122 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, 2123 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops}, 2124 {"i915_ipc_status", &i915_ipc_status_fops}, 2125 {"i915_drrs_ctl", &i915_drrs_ctl_fops}, 2126 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}, 2127 }; 2128 2129 void intel_display_debugfs_register(struct drm_i915_private *i915) 2130 { 2131 struct drm_minor *minor = i915->drm.primary; 2132 int i; 2133 2134 for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) { 2135 debugfs_create_file(intel_display_debugfs_files[i].name, 2136 S_IRUGO | S_IWUSR, 2137 minor->debugfs_root, 2138 to_i915(minor->dev), 2139 intel_display_debugfs_files[i].fops); 2140 } 2141 2142 drm_debugfs_create_files(intel_display_debugfs_list, 2143 ARRAY_SIZE(intel_display_debugfs_list), 2144 minor->debugfs_root, minor); 2145 } 2146 2147 static int i915_panel_show(struct seq_file *m, void *data) 2148 { 2149 struct drm_connector *connector = m->private; 2150 struct intel_dp *intel_dp = 2151 intel_attached_dp(to_intel_connector(connector)); 2152 2153 if (connector->status != connector_status_connected) 2154 return -ENODEV; 2155 2156 seq_printf(m, "Panel power up delay: %d\n", 2157 intel_dp->pps.panel_power_up_delay); 2158 seq_printf(m, "Panel power down delay: %d\n", 2159 intel_dp->pps.panel_power_down_delay); 2160 seq_printf(m, "Backlight on delay: %d\n", 2161 intel_dp->pps.backlight_on_delay); 2162 seq_printf(m, "Backlight off delay: %d\n", 2163 intel_dp->pps.backlight_off_delay); 2164 2165 return 0; 2166 } 2167 DEFINE_SHOW_ATTRIBUTE(i915_panel); 2168 2169 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) 2170 { 2171 struct drm_connector *connector = m->private; 2172 struct intel_connector *intel_connector = to_intel_connector(connector); 2173 2174 if (connector->status != connector_status_connected) 2175 return -ENODEV; 2176 2177 seq_printf(m, "%s:%d HDCP version: ", connector->name, 2178 connector->base.id); 2179 intel_hdcp_info(m, intel_connector); 2180 2181 return 0; 2182 } 2183 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); 2184 2185 #define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \ 2186 seq_puts(m, "LPSP: incapable\n")) 2187 2188 static int i915_lpsp_capability_show(struct seq_file *m, void *data) 2189 { 2190 struct drm_connector *connector = m->private; 2191 struct drm_i915_private *i915 = to_i915(connector->dev); 2192 struct intel_encoder *encoder; 2193 2194 encoder = intel_attached_encoder(to_intel_connector(connector)); 2195 if (!encoder) 2196 return -ENODEV; 2197 2198 if (connector->status != connector_status_connected) 2199 return -ENODEV; 2200 2201 switch (INTEL_GEN(i915)) { 2202 case 12: 2203 /* 2204 * Actually TGL can drive LPSP on port till DDI_C 2205 * but there is no physical connected DDI_C on TGL sku's, 2206 * even driver is not initilizing DDI_C port for gen12. 2207 */ 2208 LPSP_CAPABLE(encoder->port <= PORT_B); 2209 break; 2210 case 11: 2211 LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI || 2212 connector->connector_type == DRM_MODE_CONNECTOR_eDP); 2213 break; 2214 case 10: 2215 case 9: 2216 LPSP_CAPABLE(encoder->port == PORT_A && 2217 (connector->connector_type == DRM_MODE_CONNECTOR_DSI || 2218 connector->connector_type == DRM_MODE_CONNECTOR_eDP || 2219 connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)); 2220 break; 2221 default: 2222 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 2223 LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP); 2224 } 2225 2226 return 0; 2227 } 2228 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability); 2229 2230 static int i915_dsc_fec_support_show(struct seq_file *m, void *data) 2231 { 2232 struct drm_connector *connector = m->private; 2233 struct drm_device *dev = connector->dev; 2234 struct drm_crtc *crtc; 2235 struct intel_dp *intel_dp; 2236 struct drm_modeset_acquire_ctx ctx; 2237 struct intel_crtc_state *crtc_state = NULL; 2238 int ret = 0; 2239 bool try_again = false; 2240 2241 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 2242 2243 do { 2244 try_again = false; 2245 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, 2246 &ctx); 2247 if (ret) { 2248 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) { 2249 try_again = true; 2250 continue; 2251 } 2252 break; 2253 } 2254 crtc = connector->state->crtc; 2255 if (connector->status != connector_status_connected || !crtc) { 2256 ret = -ENODEV; 2257 break; 2258 } 2259 ret = drm_modeset_lock(&crtc->mutex, &ctx); 2260 if (ret == -EDEADLK) { 2261 ret = drm_modeset_backoff(&ctx); 2262 if (!ret) { 2263 try_again = true; 2264 continue; 2265 } 2266 break; 2267 } else if (ret) { 2268 break; 2269 } 2270 intel_dp = intel_attached_dp(to_intel_connector(connector)); 2271 crtc_state = to_intel_crtc_state(crtc->state); 2272 seq_printf(m, "DSC_Enabled: %s\n", 2273 yesno(crtc_state->dsc.compression_enable)); 2274 seq_printf(m, "DSC_Sink_Support: %s\n", 2275 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd))); 2276 seq_printf(m, "Force_DSC_Enable: %s\n", 2277 yesno(intel_dp->force_dsc_en)); 2278 if (!intel_dp_is_edp(intel_dp)) 2279 seq_printf(m, "FEC_Sink_Support: %s\n", 2280 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable))); 2281 } while (try_again); 2282 2283 drm_modeset_drop_locks(&ctx); 2284 drm_modeset_acquire_fini(&ctx); 2285 2286 return ret; 2287 } 2288 2289 static ssize_t i915_dsc_fec_support_write(struct file *file, 2290 const char __user *ubuf, 2291 size_t len, loff_t *offp) 2292 { 2293 bool dsc_enable = false; 2294 int ret; 2295 struct drm_connector *connector = 2296 ((struct seq_file *)file->private_data)->private; 2297 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 2298 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2299 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2300 2301 if (len == 0) 2302 return 0; 2303 2304 drm_dbg(&i915->drm, 2305 "Copied %zu bytes from user to force DSC\n", len); 2306 2307 ret = kstrtobool_from_user(ubuf, len, &dsc_enable); 2308 if (ret < 0) 2309 return ret; 2310 2311 drm_dbg(&i915->drm, "Got %s for DSC Enable\n", 2312 (dsc_enable) ? "true" : "false"); 2313 intel_dp->force_dsc_en = dsc_enable; 2314 2315 *offp += len; 2316 return len; 2317 } 2318 2319 static int i915_dsc_fec_support_open(struct inode *inode, 2320 struct file *file) 2321 { 2322 return single_open(file, i915_dsc_fec_support_show, 2323 inode->i_private); 2324 } 2325 2326 static const struct file_operations i915_dsc_fec_support_fops = { 2327 .owner = THIS_MODULE, 2328 .open = i915_dsc_fec_support_open, 2329 .read = seq_read, 2330 .llseek = seq_lseek, 2331 .release = single_release, 2332 .write = i915_dsc_fec_support_write 2333 }; 2334 2335 /** 2336 * intel_connector_debugfs_add - add i915 specific connector debugfs files 2337 * @connector: pointer to a registered drm_connector 2338 * 2339 * Cleanup will be done by drm_connector_unregister() through a call to 2340 * drm_debugfs_connector_remove(). 2341 * 2342 * Returns 0 on success, negative error codes on error. 2343 */ 2344 int intel_connector_debugfs_add(struct drm_connector *connector) 2345 { 2346 struct dentry *root = connector->debugfs_entry; 2347 struct drm_i915_private *dev_priv = to_i915(connector->dev); 2348 2349 /* The connector must have been registered beforehands. */ 2350 if (!root) 2351 return -ENODEV; 2352 2353 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 2354 debugfs_create_file("i915_panel_timings", S_IRUGO, root, 2355 connector, &i915_panel_fops); 2356 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root, 2357 connector, &i915_psr_sink_status_fops); 2358 } 2359 2360 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 2361 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || 2362 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { 2363 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root, 2364 connector, &i915_hdcp_sink_capability_fops); 2365 } 2366 2367 if (INTEL_GEN(dev_priv) >= 10 && 2368 ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && 2369 !to_intel_connector(connector)->mst_port) || 2370 connector->connector_type == DRM_MODE_CONNECTOR_eDP)) 2371 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root, 2372 connector, &i915_dsc_fec_support_fops); 2373 2374 /* Legacy panels doesn't lpsp on any platform */ 2375 if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) || 2376 IS_BROADWELL(dev_priv)) && 2377 (connector->connector_type == DRM_MODE_CONNECTOR_DSI || 2378 connector->connector_type == DRM_MODE_CONNECTOR_eDP || 2379 connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 2380 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || 2381 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) 2382 debugfs_create_file("i915_lpsp_capability", 0444, root, 2383 connector, &i915_lpsp_capability_fops); 2384 2385 return 0; 2386 } 2387 2388 /** 2389 * intel_crtc_debugfs_add - add i915 specific crtc debugfs files 2390 * @crtc: pointer to a drm_crtc 2391 * 2392 * Returns 0 on success, negative error codes on error. 2393 * 2394 * Failure to add debugfs entries should generally be ignored. 2395 */ 2396 int intel_crtc_debugfs_add(struct drm_crtc *crtc) 2397 { 2398 if (!crtc->debugfs_entry) 2399 return -ENODEV; 2400 2401 crtc_updates_add(crtc); 2402 return 0; 2403 } 2404