1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <drm/drm_debugfs.h> 7 #include <drm/drm_fourcc.h> 8 9 #include "i915_debugfs.h" 10 #include "intel_csr.h" 11 #include "intel_display_debugfs.h" 12 #include "intel_display_power.h" 13 #include "intel_display_types.h" 14 #include "intel_dp.h" 15 #include "intel_fbc.h" 16 #include "intel_hdcp.h" 17 #include "intel_hdmi.h" 18 #include "intel_pm.h" 19 #include "intel_psr.h" 20 #include "intel_sideband.h" 21 #include "intel_sprite.h" 22 23 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 24 { 25 return to_i915(node->minor->dev); 26 } 27 28 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) 29 { 30 struct drm_i915_private *dev_priv = node_to_i915(m->private); 31 32 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 33 dev_priv->fb_tracking.busy_bits); 34 35 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 36 dev_priv->fb_tracking.flip_bits); 37 38 return 0; 39 } 40 41 static int i915_fbc_status(struct seq_file *m, void *unused) 42 { 43 struct drm_i915_private *dev_priv = node_to_i915(m->private); 44 struct intel_fbc *fbc = &dev_priv->fbc; 45 intel_wakeref_t wakeref; 46 47 if (!HAS_FBC(dev_priv)) 48 return -ENODEV; 49 50 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 51 mutex_lock(&fbc->lock); 52 53 if (intel_fbc_is_active(dev_priv)) 54 seq_puts(m, "FBC enabled\n"); 55 else 56 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); 57 58 if (intel_fbc_is_active(dev_priv)) { 59 u32 mask; 60 61 if (INTEL_GEN(dev_priv) >= 8) 62 mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK; 63 else if (INTEL_GEN(dev_priv) >= 7) 64 mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK; 65 else if (INTEL_GEN(dev_priv) >= 5) 66 mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK; 67 else if (IS_G4X(dev_priv)) 68 mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK; 69 else 70 mask = intel_de_read(dev_priv, FBC_STATUS) & 71 (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED); 72 73 seq_printf(m, "Compressing: %s\n", yesno(mask)); 74 } 75 76 mutex_unlock(&fbc->lock); 77 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 78 79 return 0; 80 } 81 82 static int i915_fbc_false_color_get(void *data, u64 *val) 83 { 84 struct drm_i915_private *dev_priv = data; 85 86 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 87 return -ENODEV; 88 89 *val = dev_priv->fbc.false_color; 90 91 return 0; 92 } 93 94 static int i915_fbc_false_color_set(void *data, u64 val) 95 { 96 struct drm_i915_private *dev_priv = data; 97 u32 reg; 98 99 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 100 return -ENODEV; 101 102 mutex_lock(&dev_priv->fbc.lock); 103 104 reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL); 105 dev_priv->fbc.false_color = val; 106 107 intel_de_write(dev_priv, ILK_DPFC_CONTROL, 108 val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR)); 109 110 mutex_unlock(&dev_priv->fbc.lock); 111 return 0; 112 } 113 114 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, 115 i915_fbc_false_color_get, i915_fbc_false_color_set, 116 "%llu\n"); 117 118 static int i915_ips_status(struct seq_file *m, void *unused) 119 { 120 struct drm_i915_private *dev_priv = node_to_i915(m->private); 121 intel_wakeref_t wakeref; 122 123 if (!HAS_IPS(dev_priv)) 124 return -ENODEV; 125 126 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 127 128 seq_printf(m, "Enabled by kernel parameter: %s\n", 129 yesno(dev_priv->params.enable_ips)); 130 131 if (INTEL_GEN(dev_priv) >= 8) { 132 seq_puts(m, "Currently: unknown\n"); 133 } else { 134 if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE) 135 seq_puts(m, "Currently: enabled\n"); 136 else 137 seq_puts(m, "Currently: disabled\n"); 138 } 139 140 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 141 142 return 0; 143 } 144 145 static int i915_sr_status(struct seq_file *m, void *unused) 146 { 147 struct drm_i915_private *dev_priv = node_to_i915(m->private); 148 intel_wakeref_t wakeref; 149 bool sr_enabled = false; 150 151 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 152 153 if (INTEL_GEN(dev_priv) >= 9) 154 /* no global SR status; inspect per-plane WM */; 155 else if (HAS_PCH_SPLIT(dev_priv)) 156 sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN; 157 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || 158 IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 159 sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN; 160 else if (IS_I915GM(dev_priv)) 161 sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN; 162 else if (IS_PINEVIEW(dev_priv)) 163 sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 164 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 165 sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 166 167 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 168 169 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); 170 171 return 0; 172 } 173 174 static int i915_opregion(struct seq_file *m, void *unused) 175 { 176 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; 177 178 if (opregion->header) 179 seq_write(m, opregion->header, OPREGION_SIZE); 180 181 return 0; 182 } 183 184 static int i915_vbt(struct seq_file *m, void *unused) 185 { 186 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; 187 188 if (opregion->vbt) 189 seq_write(m, opregion->vbt, opregion->vbt_size); 190 191 return 0; 192 } 193 194 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 195 { 196 struct drm_i915_private *dev_priv = node_to_i915(m->private); 197 struct drm_device *dev = &dev_priv->drm; 198 struct intel_framebuffer *fbdev_fb = NULL; 199 struct drm_framebuffer *drm_fb; 200 201 #ifdef CONFIG_DRM_FBDEV_EMULATION 202 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) { 203 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb); 204 205 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 206 fbdev_fb->base.width, 207 fbdev_fb->base.height, 208 fbdev_fb->base.format->depth, 209 fbdev_fb->base.format->cpp[0] * 8, 210 fbdev_fb->base.modifier, 211 drm_framebuffer_read_refcount(&fbdev_fb->base)); 212 i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base)); 213 seq_putc(m, '\n'); 214 } 215 #endif 216 217 mutex_lock(&dev->mode_config.fb_lock); 218 drm_for_each_fb(drm_fb, dev) { 219 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 220 if (fb == fbdev_fb) 221 continue; 222 223 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 224 fb->base.width, 225 fb->base.height, 226 fb->base.format->depth, 227 fb->base.format->cpp[0] * 8, 228 fb->base.modifier, 229 drm_framebuffer_read_refcount(&fb->base)); 230 i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base)); 231 seq_putc(m, '\n'); 232 } 233 mutex_unlock(&dev->mode_config.fb_lock); 234 235 return 0; 236 } 237 238 static int i915_psr_sink_status_show(struct seq_file *m, void *data) 239 { 240 u8 val; 241 static const char * const sink_status[] = { 242 "inactive", 243 "transition to active, capture and display", 244 "active, display from RFB", 245 "active, capture and display on sink device timings", 246 "transition to inactive, capture and display, timing re-sync", 247 "reserved", 248 "reserved", 249 "sink internal error", 250 }; 251 struct drm_connector *connector = m->private; 252 struct drm_i915_private *dev_priv = to_i915(connector->dev); 253 struct intel_dp *intel_dp = 254 intel_attached_dp(to_intel_connector(connector)); 255 int ret; 256 257 if (!CAN_PSR(dev_priv)) { 258 seq_puts(m, "PSR Unsupported\n"); 259 return -ENODEV; 260 } 261 262 if (connector->status != connector_status_connected) 263 return -ENODEV; 264 265 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val); 266 267 if (ret == 1) { 268 const char *str = "unknown"; 269 270 val &= DP_PSR_SINK_STATE_MASK; 271 if (val < ARRAY_SIZE(sink_status)) 272 str = sink_status[val]; 273 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); 274 } else { 275 return ret; 276 } 277 278 return 0; 279 } 280 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); 281 282 static void 283 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) 284 { 285 u32 val, status_val; 286 const char *status = "unknown"; 287 288 if (dev_priv->psr.psr2_enabled) { 289 static const char * const live_status[] = { 290 "IDLE", 291 "CAPTURE", 292 "CAPTURE_FS", 293 "SLEEP", 294 "BUFON_FW", 295 "ML_UP", 296 "SU_STANDBY", 297 "FAST_SLEEP", 298 "DEEP_SLEEP", 299 "BUF_ON", 300 "TG_ON" 301 }; 302 val = intel_de_read(dev_priv, 303 EDP_PSR2_STATUS(dev_priv->psr.transcoder)); 304 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >> 305 EDP_PSR2_STATUS_STATE_SHIFT; 306 if (status_val < ARRAY_SIZE(live_status)) 307 status = live_status[status_val]; 308 } else { 309 static const char * const live_status[] = { 310 "IDLE", 311 "SRDONACK", 312 "SRDENT", 313 "BUFOFF", 314 "BUFON", 315 "AUXACK", 316 "SRDOFFACK", 317 "SRDENT_ON", 318 }; 319 val = intel_de_read(dev_priv, 320 EDP_PSR_STATUS(dev_priv->psr.transcoder)); 321 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> 322 EDP_PSR_STATUS_STATE_SHIFT; 323 if (status_val < ARRAY_SIZE(live_status)) 324 status = live_status[status_val]; 325 } 326 327 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); 328 } 329 330 static int i915_edp_psr_status(struct seq_file *m, void *data) 331 { 332 struct drm_i915_private *dev_priv = node_to_i915(m->private); 333 struct i915_psr *psr = &dev_priv->psr; 334 intel_wakeref_t wakeref; 335 const char *status; 336 bool enabled; 337 u32 val; 338 339 if (!HAS_PSR(dev_priv)) 340 return -ENODEV; 341 342 seq_printf(m, "Sink support: %s", yesno(psr->sink_support)); 343 if (psr->dp) 344 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]); 345 seq_puts(m, "\n"); 346 347 if (!psr->sink_support) 348 return 0; 349 350 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 351 mutex_lock(&psr->lock); 352 353 if (psr->enabled) 354 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; 355 else 356 status = "disabled"; 357 seq_printf(m, "PSR mode: %s\n", status); 358 359 if (!psr->enabled) { 360 seq_printf(m, "PSR sink not reliable: %s\n", 361 yesno(psr->sink_not_reliable)); 362 363 goto unlock; 364 } 365 366 if (psr->psr2_enabled) { 367 val = intel_de_read(dev_priv, 368 EDP_PSR2_CTL(dev_priv->psr.transcoder)); 369 enabled = val & EDP_PSR2_ENABLE; 370 } else { 371 val = intel_de_read(dev_priv, 372 EDP_PSR_CTL(dev_priv->psr.transcoder)); 373 enabled = val & EDP_PSR_ENABLE; 374 } 375 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", 376 enableddisabled(enabled), val); 377 psr_source_status(dev_priv, m); 378 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", 379 psr->busy_frontbuffer_bits); 380 381 /* 382 * SKL+ Perf counter is reset to 0 everytime DC state is entered 383 */ 384 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 385 val = intel_de_read(dev_priv, 386 EDP_PSR_PERF_CNT(dev_priv->psr.transcoder)); 387 val &= EDP_PSR_PERF_CNT_MASK; 388 seq_printf(m, "Performance counter: %u\n", val); 389 } 390 391 if (psr->debug & I915_PSR_DEBUG_IRQ) { 392 seq_printf(m, "Last attempted entry at: %lld\n", 393 psr->last_entry_attempt); 394 seq_printf(m, "Last exit at: %lld\n", psr->last_exit); 395 } 396 397 if (psr->psr2_enabled) { 398 u32 su_frames_val[3]; 399 int frame; 400 401 /* 402 * Reading all 3 registers before hand to minimize crossing a 403 * frame boundary between register reads 404 */ 405 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { 406 val = intel_de_read(dev_priv, 407 PSR2_SU_STATUS(dev_priv->psr.transcoder, frame)); 408 su_frames_val[frame / 3] = val; 409 } 410 411 seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); 412 413 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { 414 u32 su_blocks; 415 416 su_blocks = su_frames_val[frame / 3] & 417 PSR2_SU_STATUS_MASK(frame); 418 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); 419 seq_printf(m, "%d\t%d\n", frame, su_blocks); 420 } 421 422 seq_printf(m, "PSR2 selective fetch: %s\n", 423 enableddisabled(psr->psr2_sel_fetch_enabled)); 424 } 425 426 unlock: 427 mutex_unlock(&psr->lock); 428 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 429 430 return 0; 431 } 432 433 static int 434 i915_edp_psr_debug_set(void *data, u64 val) 435 { 436 struct drm_i915_private *dev_priv = data; 437 intel_wakeref_t wakeref; 438 int ret; 439 440 if (!CAN_PSR(dev_priv)) 441 return -ENODEV; 442 443 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val); 444 445 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 446 447 ret = intel_psr_debug_set(dev_priv, val); 448 449 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 450 451 return ret; 452 } 453 454 static int 455 i915_edp_psr_debug_get(void *data, u64 *val) 456 { 457 struct drm_i915_private *dev_priv = data; 458 459 if (!CAN_PSR(dev_priv)) 460 return -ENODEV; 461 462 *val = READ_ONCE(dev_priv->psr.debug); 463 return 0; 464 } 465 466 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, 467 i915_edp_psr_debug_get, i915_edp_psr_debug_set, 468 "%llu\n"); 469 470 static int i915_power_domain_info(struct seq_file *m, void *unused) 471 { 472 struct drm_i915_private *dev_priv = node_to_i915(m->private); 473 struct i915_power_domains *power_domains = &dev_priv->power_domains; 474 int i; 475 476 mutex_lock(&power_domains->lock); 477 478 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 479 for (i = 0; i < power_domains->power_well_count; i++) { 480 struct i915_power_well *power_well; 481 enum intel_display_power_domain power_domain; 482 483 power_well = &power_domains->power_wells[i]; 484 seq_printf(m, "%-25s %d\n", power_well->desc->name, 485 power_well->count); 486 487 for_each_power_domain(power_domain, power_well->desc->domains) 488 seq_printf(m, " %-23s %d\n", 489 intel_display_power_domain_str(power_domain), 490 power_domains->domain_use_count[power_domain]); 491 } 492 493 mutex_unlock(&power_domains->lock); 494 495 return 0; 496 } 497 498 static int i915_dmc_info(struct seq_file *m, void *unused) 499 { 500 struct drm_i915_private *dev_priv = node_to_i915(m->private); 501 intel_wakeref_t wakeref; 502 struct intel_csr *csr; 503 i915_reg_t dc5_reg, dc6_reg = {}; 504 505 if (!HAS_CSR(dev_priv)) 506 return -ENODEV; 507 508 csr = &dev_priv->csr; 509 510 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 511 512 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); 513 seq_printf(m, "path: %s\n", csr->fw_path); 514 515 if (!csr->dmc_payload) 516 goto out; 517 518 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), 519 CSR_VERSION_MINOR(csr->version)); 520 521 if (INTEL_GEN(dev_priv) >= 12) { 522 if (IS_DGFX(dev_priv)) { 523 dc5_reg = DG1_DMC_DEBUG_DC5_COUNT; 524 } else { 525 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT; 526 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT; 527 } 528 529 /* 530 * NOTE: DMC_DEBUG3 is a general purpose reg. 531 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter 532 * reg for DC3CO debugging and validation, 533 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter. 534 */ 535 seq_printf(m, "DC3CO count: %d\n", 536 intel_de_read(dev_priv, DMC_DEBUG3)); 537 } else { 538 dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT : 539 SKL_CSR_DC3_DC5_COUNT; 540 if (!IS_GEN9_LP(dev_priv)) 541 dc6_reg = SKL_CSR_DC5_DC6_COUNT; 542 } 543 544 seq_printf(m, "DC3 -> DC5 count: %d\n", 545 intel_de_read(dev_priv, dc5_reg)); 546 if (dc6_reg.reg) 547 seq_printf(m, "DC5 -> DC6 count: %d\n", 548 intel_de_read(dev_priv, dc6_reg)); 549 550 out: 551 seq_printf(m, "program base: 0x%08x\n", 552 intel_de_read(dev_priv, CSR_PROGRAM(0))); 553 seq_printf(m, "ssp base: 0x%08x\n", 554 intel_de_read(dev_priv, CSR_SSP_BASE)); 555 seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL)); 556 557 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 558 559 return 0; 560 } 561 562 static void intel_seq_print_mode(struct seq_file *m, int tabs, 563 const struct drm_display_mode *mode) 564 { 565 int i; 566 567 for (i = 0; i < tabs; i++) 568 seq_putc(m, '\t'); 569 570 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); 571 } 572 573 static void intel_encoder_info(struct seq_file *m, 574 struct intel_crtc *crtc, 575 struct intel_encoder *encoder) 576 { 577 struct drm_i915_private *dev_priv = node_to_i915(m->private); 578 struct drm_connector_list_iter conn_iter; 579 struct drm_connector *connector; 580 581 seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n", 582 encoder->base.base.id, encoder->base.name); 583 584 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 585 drm_for_each_connector_iter(connector, &conn_iter) { 586 const struct drm_connector_state *conn_state = 587 connector->state; 588 589 if (conn_state->best_encoder != &encoder->base) 590 continue; 591 592 seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n", 593 connector->base.id, connector->name); 594 } 595 drm_connector_list_iter_end(&conn_iter); 596 } 597 598 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 599 { 600 const struct drm_display_mode *mode = panel->fixed_mode; 601 602 seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); 603 } 604 605 static void intel_hdcp_info(struct seq_file *m, 606 struct intel_connector *intel_connector) 607 { 608 bool hdcp_cap, hdcp2_cap; 609 610 if (!intel_connector->hdcp.shim) { 611 seq_puts(m, "No Connector Support"); 612 goto out; 613 } 614 615 hdcp_cap = intel_hdcp_capable(intel_connector); 616 hdcp2_cap = intel_hdcp2_capable(intel_connector); 617 618 if (hdcp_cap) 619 seq_puts(m, "HDCP1.4 "); 620 if (hdcp2_cap) 621 seq_puts(m, "HDCP2.2 "); 622 623 if (!hdcp_cap && !hdcp2_cap) 624 seq_puts(m, "None"); 625 626 out: 627 seq_puts(m, "\n"); 628 } 629 630 static void intel_dp_info(struct seq_file *m, 631 struct intel_connector *intel_connector) 632 { 633 struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); 634 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 635 const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr; 636 637 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 638 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); 639 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) 640 intel_panel_info(m, &intel_connector->panel); 641 642 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, 643 edid ? edid->data : NULL, &intel_dp->aux); 644 } 645 646 static void intel_dp_mst_info(struct seq_file *m, 647 struct intel_connector *intel_connector) 648 { 649 bool has_audio = intel_connector->port->has_audio; 650 651 seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); 652 } 653 654 static void intel_hdmi_info(struct seq_file *m, 655 struct intel_connector *intel_connector) 656 { 657 struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); 658 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder); 659 660 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); 661 } 662 663 static void intel_lvds_info(struct seq_file *m, 664 struct intel_connector *intel_connector) 665 { 666 intel_panel_info(m, &intel_connector->panel); 667 } 668 669 static void intel_connector_info(struct seq_file *m, 670 struct drm_connector *connector) 671 { 672 struct intel_connector *intel_connector = to_intel_connector(connector); 673 const struct drm_connector_state *conn_state = connector->state; 674 struct intel_encoder *encoder = 675 to_intel_encoder(conn_state->best_encoder); 676 const struct drm_display_mode *mode; 677 678 seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n", 679 connector->base.id, connector->name, 680 drm_get_connector_status_name(connector->status)); 681 682 if (connector->status == connector_status_disconnected) 683 return; 684 685 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 686 connector->display_info.width_mm, 687 connector->display_info.height_mm); 688 seq_printf(m, "\tsubpixel order: %s\n", 689 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 690 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); 691 692 if (!encoder) 693 return; 694 695 switch (connector->connector_type) { 696 case DRM_MODE_CONNECTOR_DisplayPort: 697 case DRM_MODE_CONNECTOR_eDP: 698 if (encoder->type == INTEL_OUTPUT_DP_MST) 699 intel_dp_mst_info(m, intel_connector); 700 else 701 intel_dp_info(m, intel_connector); 702 break; 703 case DRM_MODE_CONNECTOR_LVDS: 704 if (encoder->type == INTEL_OUTPUT_LVDS) 705 intel_lvds_info(m, intel_connector); 706 break; 707 case DRM_MODE_CONNECTOR_HDMIA: 708 if (encoder->type == INTEL_OUTPUT_HDMI || 709 encoder->type == INTEL_OUTPUT_DDI) 710 intel_hdmi_info(m, intel_connector); 711 break; 712 default: 713 break; 714 } 715 716 seq_puts(m, "\tHDCP version: "); 717 intel_hdcp_info(m, intel_connector); 718 719 seq_printf(m, "\tmodes:\n"); 720 list_for_each_entry(mode, &connector->modes, head) 721 intel_seq_print_mode(m, 2, mode); 722 } 723 724 static const char *plane_type(enum drm_plane_type type) 725 { 726 switch (type) { 727 case DRM_PLANE_TYPE_OVERLAY: 728 return "OVL"; 729 case DRM_PLANE_TYPE_PRIMARY: 730 return "PRI"; 731 case DRM_PLANE_TYPE_CURSOR: 732 return "CUR"; 733 /* 734 * Deliberately omitting default: to generate compiler warnings 735 * when a new drm_plane_type gets added. 736 */ 737 } 738 739 return "unknown"; 740 } 741 742 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation) 743 { 744 /* 745 * According to doc only one DRM_MODE_ROTATE_ is allowed but this 746 * will print them all to visualize if the values are misused 747 */ 748 snprintf(buf, bufsize, 749 "%s%s%s%s%s%s(0x%08x)", 750 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "", 751 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "", 752 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "", 753 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "", 754 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "", 755 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "", 756 rotation); 757 } 758 759 static const char *plane_visibility(const struct intel_plane_state *plane_state) 760 { 761 if (plane_state->uapi.visible) 762 return "visible"; 763 764 if (plane_state->planar_slave) 765 return "planar-slave"; 766 767 return "hidden"; 768 } 769 770 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane) 771 { 772 const struct intel_plane_state *plane_state = 773 to_intel_plane_state(plane->base.state); 774 const struct drm_framebuffer *fb = plane_state->uapi.fb; 775 struct drm_format_name_buf format_name; 776 struct drm_rect src, dst; 777 char rot_str[48]; 778 779 src = drm_plane_state_src(&plane_state->uapi); 780 dst = drm_plane_state_dest(&plane_state->uapi); 781 782 if (fb) 783 drm_get_format_name(fb->format->format, &format_name); 784 785 plane_rotation(rot_str, sizeof(rot_str), 786 plane_state->uapi.rotation); 787 788 seq_printf(m, "\t\tuapi: [FB:%d] %s,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", 789 fb ? fb->base.id : 0, fb ? format_name.str : "n/a", 790 fb ? fb->modifier : 0, 791 fb ? fb->width : 0, fb ? fb->height : 0, 792 plane_visibility(plane_state), 793 DRM_RECT_FP_ARG(&src), 794 DRM_RECT_ARG(&dst), 795 rot_str); 796 797 if (plane_state->planar_linked_plane) 798 seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n", 799 plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name, 800 plane_state->planar_slave ? "slave" : "master"); 801 } 802 803 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane) 804 { 805 const struct intel_plane_state *plane_state = 806 to_intel_plane_state(plane->base.state); 807 const struct drm_framebuffer *fb = plane_state->hw.fb; 808 struct drm_format_name_buf format_name; 809 char rot_str[48]; 810 811 if (!fb) 812 return; 813 814 drm_get_format_name(fb->format->format, &format_name); 815 816 plane_rotation(rot_str, sizeof(rot_str), 817 plane_state->hw.rotation); 818 819 seq_printf(m, "\t\thw: [FB:%d] %s,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", 820 fb->base.id, format_name.str, 821 fb->modifier, fb->width, fb->height, 822 yesno(plane_state->uapi.visible), 823 DRM_RECT_FP_ARG(&plane_state->uapi.src), 824 DRM_RECT_ARG(&plane_state->uapi.dst), 825 rot_str); 826 } 827 828 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc) 829 { 830 struct drm_i915_private *dev_priv = node_to_i915(m->private); 831 struct intel_plane *plane; 832 833 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 834 seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n", 835 plane->base.base.id, plane->base.name, 836 plane_type(plane->base.type)); 837 intel_plane_uapi_info(m, plane); 838 intel_plane_hw_info(m, plane); 839 } 840 } 841 842 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc) 843 { 844 const struct intel_crtc_state *crtc_state = 845 to_intel_crtc_state(crtc->base.state); 846 int num_scalers = crtc->num_scalers; 847 int i; 848 849 /* Not all platformas have a scaler */ 850 if (num_scalers) { 851 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", 852 num_scalers, 853 crtc_state->scaler_state.scaler_users, 854 crtc_state->scaler_state.scaler_id); 855 856 for (i = 0; i < num_scalers; i++) { 857 const struct intel_scaler *sc = 858 &crtc_state->scaler_state.scalers[i]; 859 860 seq_printf(m, ", scalers[%d]: use=%s, mode=%x", 861 i, yesno(sc->in_use), sc->mode); 862 } 863 seq_puts(m, "\n"); 864 } else { 865 seq_puts(m, "\tNo scalers available on this platform\n"); 866 } 867 } 868 869 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) 870 static void crtc_updates_info(struct seq_file *m, 871 struct intel_crtc *crtc, 872 const char *hdr) 873 { 874 u64 count; 875 int row; 876 877 count = 0; 878 for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) 879 count += crtc->debug.vbl.times[row]; 880 seq_printf(m, "%sUpdates: %llu\n", hdr, count); 881 if (!count) 882 return; 883 884 for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) { 885 char columns[80] = " |"; 886 unsigned int x; 887 888 if (row & 1) { 889 const char *units; 890 891 if (row > 10) { 892 x = 1000000; 893 units = "ms"; 894 } else { 895 x = 1000; 896 units = "us"; 897 } 898 899 snprintf(columns, sizeof(columns), "%4ld%s |", 900 DIV_ROUND_CLOSEST(BIT(row + 9), x), units); 901 } 902 903 if (crtc->debug.vbl.times[row]) { 904 x = ilog2(crtc->debug.vbl.times[row]); 905 memset(columns + 8, '*', x); 906 columns[8 + x] = '\0'; 907 } 908 909 seq_printf(m, "%s%s\n", hdr, columns); 910 } 911 912 seq_printf(m, "%sMin update: %lluns\n", 913 hdr, crtc->debug.vbl.min); 914 seq_printf(m, "%sMax update: %lluns\n", 915 hdr, crtc->debug.vbl.max); 916 seq_printf(m, "%sAverage update: %lluns\n", 917 hdr, div64_u64(crtc->debug.vbl.sum, count)); 918 seq_printf(m, "%sOverruns > %uus: %u\n", 919 hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over); 920 } 921 922 static int crtc_updates_show(struct seq_file *m, void *data) 923 { 924 crtc_updates_info(m, m->private, ""); 925 return 0; 926 } 927 928 static int crtc_updates_open(struct inode *inode, struct file *file) 929 { 930 return single_open(file, crtc_updates_show, inode->i_private); 931 } 932 933 static ssize_t crtc_updates_write(struct file *file, 934 const char __user *ubuf, 935 size_t len, loff_t *offp) 936 { 937 struct seq_file *m = file->private_data; 938 struct intel_crtc *crtc = m->private; 939 940 /* May race with an update. Meh. */ 941 memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl)); 942 943 return len; 944 } 945 946 static const struct file_operations crtc_updates_fops = { 947 .owner = THIS_MODULE, 948 .open = crtc_updates_open, 949 .read = seq_read, 950 .llseek = seq_lseek, 951 .release = single_release, 952 .write = crtc_updates_write 953 }; 954 955 static void crtc_updates_add(struct drm_crtc *crtc) 956 { 957 debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry, 958 to_intel_crtc(crtc), &crtc_updates_fops); 959 } 960 961 #else 962 static void crtc_updates_info(struct seq_file *m, 963 struct intel_crtc *crtc, 964 const char *hdr) 965 { 966 } 967 968 static void crtc_updates_add(struct drm_crtc *crtc) 969 { 970 } 971 #endif 972 973 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) 974 { 975 struct drm_i915_private *dev_priv = node_to_i915(m->private); 976 const struct intel_crtc_state *crtc_state = 977 to_intel_crtc_state(crtc->base.state); 978 struct intel_encoder *encoder; 979 980 seq_printf(m, "[CRTC:%d:%s]:\n", 981 crtc->base.base.id, crtc->base.name); 982 983 seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n", 984 yesno(crtc_state->uapi.enable), 985 yesno(crtc_state->uapi.active), 986 DRM_MODE_ARG(&crtc_state->uapi.mode)); 987 988 if (crtc_state->hw.enable) { 989 seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n", 990 yesno(crtc_state->hw.active), 991 DRM_MODE_ARG(&crtc_state->hw.adjusted_mode)); 992 993 seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n", 994 crtc_state->pipe_src_w, crtc_state->pipe_src_h, 995 yesno(crtc_state->dither), crtc_state->pipe_bpp); 996 997 intel_scaler_info(m, crtc); 998 } 999 1000 if (crtc_state->bigjoiner) 1001 seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n", 1002 crtc_state->bigjoiner_linked_crtc->base.base.id, 1003 crtc_state->bigjoiner_linked_crtc->base.name, 1004 crtc_state->bigjoiner_slave ? "slave" : "master"); 1005 1006 for_each_intel_encoder_mask(&dev_priv->drm, encoder, 1007 crtc_state->uapi.encoder_mask) 1008 intel_encoder_info(m, crtc, encoder); 1009 1010 intel_plane_info(m, crtc); 1011 1012 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n", 1013 yesno(!crtc->cpu_fifo_underrun_disabled), 1014 yesno(!crtc->pch_fifo_underrun_disabled)); 1015 1016 crtc_updates_info(m, crtc, "\t"); 1017 } 1018 1019 static int i915_display_info(struct seq_file *m, void *unused) 1020 { 1021 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1022 struct drm_device *dev = &dev_priv->drm; 1023 struct intel_crtc *crtc; 1024 struct drm_connector *connector; 1025 struct drm_connector_list_iter conn_iter; 1026 intel_wakeref_t wakeref; 1027 1028 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 1029 1030 drm_modeset_lock_all(dev); 1031 1032 seq_printf(m, "CRTC info\n"); 1033 seq_printf(m, "---------\n"); 1034 for_each_intel_crtc(dev, crtc) 1035 intel_crtc_info(m, crtc); 1036 1037 seq_printf(m, "\n"); 1038 seq_printf(m, "Connector info\n"); 1039 seq_printf(m, "--------------\n"); 1040 drm_connector_list_iter_begin(dev, &conn_iter); 1041 drm_for_each_connector_iter(connector, &conn_iter) 1042 intel_connector_info(m, connector); 1043 drm_connector_list_iter_end(&conn_iter); 1044 1045 drm_modeset_unlock_all(dev); 1046 1047 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 1048 1049 return 0; 1050 } 1051 1052 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 1053 { 1054 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1055 struct drm_device *dev = &dev_priv->drm; 1056 int i; 1057 1058 drm_modeset_lock_all(dev); 1059 1060 seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n", 1061 dev_priv->dpll.ref_clks.nssc, 1062 dev_priv->dpll.ref_clks.ssc); 1063 1064 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { 1065 struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i]; 1066 1067 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name, 1068 pll->info->id); 1069 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", 1070 pll->state.crtc_mask, pll->active_mask, yesno(pll->on)); 1071 seq_printf(m, " tracked hardware state:\n"); 1072 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); 1073 seq_printf(m, " dpll_md: 0x%08x\n", 1074 pll->state.hw_state.dpll_md); 1075 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0); 1076 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1); 1077 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); 1078 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0); 1079 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1); 1080 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n", 1081 pll->state.hw_state.mg_refclkin_ctl); 1082 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n", 1083 pll->state.hw_state.mg_clktop2_coreclkctl1); 1084 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n", 1085 pll->state.hw_state.mg_clktop2_hsclkctl); 1086 seq_printf(m, " mg_pll_div0: 0x%08x\n", 1087 pll->state.hw_state.mg_pll_div0); 1088 seq_printf(m, " mg_pll_div1: 0x%08x\n", 1089 pll->state.hw_state.mg_pll_div1); 1090 seq_printf(m, " mg_pll_lf: 0x%08x\n", 1091 pll->state.hw_state.mg_pll_lf); 1092 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n", 1093 pll->state.hw_state.mg_pll_frac_lock); 1094 seq_printf(m, " mg_pll_ssc: 0x%08x\n", 1095 pll->state.hw_state.mg_pll_ssc); 1096 seq_printf(m, " mg_pll_bias: 0x%08x\n", 1097 pll->state.hw_state.mg_pll_bias); 1098 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n", 1099 pll->state.hw_state.mg_pll_tdc_coldst_bias); 1100 } 1101 drm_modeset_unlock_all(dev); 1102 1103 return 0; 1104 } 1105 1106 static int i915_ipc_status_show(struct seq_file *m, void *data) 1107 { 1108 struct drm_i915_private *dev_priv = m->private; 1109 1110 seq_printf(m, "Isochronous Priority Control: %s\n", 1111 yesno(dev_priv->ipc_enabled)); 1112 return 0; 1113 } 1114 1115 static int i915_ipc_status_open(struct inode *inode, struct file *file) 1116 { 1117 struct drm_i915_private *dev_priv = inode->i_private; 1118 1119 if (!HAS_IPC(dev_priv)) 1120 return -ENODEV; 1121 1122 return single_open(file, i915_ipc_status_show, dev_priv); 1123 } 1124 1125 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, 1126 size_t len, loff_t *offp) 1127 { 1128 struct seq_file *m = file->private_data; 1129 struct drm_i915_private *dev_priv = m->private; 1130 intel_wakeref_t wakeref; 1131 bool enable; 1132 int ret; 1133 1134 ret = kstrtobool_from_user(ubuf, len, &enable); 1135 if (ret < 0) 1136 return ret; 1137 1138 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { 1139 if (!dev_priv->ipc_enabled && enable) 1140 drm_info(&dev_priv->drm, 1141 "Enabling IPC: WM will be proper only after next commit\n"); 1142 dev_priv->wm.distrust_bios_wm = true; 1143 dev_priv->ipc_enabled = enable; 1144 intel_enable_ipc(dev_priv); 1145 } 1146 1147 return len; 1148 } 1149 1150 static const struct file_operations i915_ipc_status_fops = { 1151 .owner = THIS_MODULE, 1152 .open = i915_ipc_status_open, 1153 .read = seq_read, 1154 .llseek = seq_lseek, 1155 .release = single_release, 1156 .write = i915_ipc_status_write 1157 }; 1158 1159 static int i915_ddb_info(struct seq_file *m, void *unused) 1160 { 1161 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1162 struct drm_device *dev = &dev_priv->drm; 1163 struct skl_ddb_entry *entry; 1164 struct intel_crtc *crtc; 1165 1166 if (INTEL_GEN(dev_priv) < 9) 1167 return -ENODEV; 1168 1169 drm_modeset_lock_all(dev); 1170 1171 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 1172 1173 for_each_intel_crtc(&dev_priv->drm, crtc) { 1174 struct intel_crtc_state *crtc_state = 1175 to_intel_crtc_state(crtc->base.state); 1176 enum pipe pipe = crtc->pipe; 1177 enum plane_id plane_id; 1178 1179 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 1180 1181 for_each_plane_id_on_crtc(crtc, plane_id) { 1182 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id]; 1183 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1, 1184 entry->start, entry->end, 1185 skl_ddb_entry_size(entry)); 1186 } 1187 1188 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; 1189 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 1190 entry->end, skl_ddb_entry_size(entry)); 1191 } 1192 1193 drm_modeset_unlock_all(dev); 1194 1195 return 0; 1196 } 1197 1198 static void drrs_status_per_crtc(struct seq_file *m, 1199 struct drm_device *dev, 1200 struct intel_crtc *intel_crtc) 1201 { 1202 struct drm_i915_private *dev_priv = to_i915(dev); 1203 struct i915_drrs *drrs = &dev_priv->drrs; 1204 int vrefresh = 0; 1205 struct drm_connector *connector; 1206 struct drm_connector_list_iter conn_iter; 1207 1208 drm_connector_list_iter_begin(dev, &conn_iter); 1209 drm_for_each_connector_iter(connector, &conn_iter) { 1210 bool supported = false; 1211 1212 if (connector->state->crtc != &intel_crtc->base) 1213 continue; 1214 1215 seq_printf(m, "%s:\n", connector->name); 1216 1217 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && 1218 drrs->type == SEAMLESS_DRRS_SUPPORT) 1219 supported = true; 1220 1221 seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported)); 1222 } 1223 drm_connector_list_iter_end(&conn_iter); 1224 1225 seq_puts(m, "\n"); 1226 1227 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { 1228 struct intel_panel *panel; 1229 1230 mutex_lock(&drrs->mutex); 1231 /* DRRS Supported */ 1232 seq_puts(m, "\tDRRS Enabled: Yes\n"); 1233 1234 /* disable_drrs() will make drrs->dp NULL */ 1235 if (!drrs->dp) { 1236 seq_puts(m, "Idleness DRRS: Disabled\n"); 1237 if (dev_priv->psr.enabled) 1238 seq_puts(m, 1239 "\tAs PSR is enabled, DRRS is not enabled\n"); 1240 mutex_unlock(&drrs->mutex); 1241 return; 1242 } 1243 1244 panel = &drrs->dp->attached_connector->panel; 1245 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", 1246 drrs->busy_frontbuffer_bits); 1247 1248 seq_puts(m, "\n\t\t"); 1249 if (drrs->refresh_rate_type == DRRS_HIGH_RR) { 1250 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); 1251 vrefresh = drm_mode_vrefresh(panel->fixed_mode); 1252 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { 1253 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); 1254 vrefresh = drm_mode_vrefresh(panel->downclock_mode); 1255 } else { 1256 seq_printf(m, "DRRS_State: Unknown(%d)\n", 1257 drrs->refresh_rate_type); 1258 mutex_unlock(&drrs->mutex); 1259 return; 1260 } 1261 seq_printf(m, "\t\tVrefresh: %d", vrefresh); 1262 1263 seq_puts(m, "\n\t\t"); 1264 mutex_unlock(&drrs->mutex); 1265 } else { 1266 /* DRRS not supported. Print the VBT parameter*/ 1267 seq_puts(m, "\tDRRS Enabled : No"); 1268 } 1269 seq_puts(m, "\n"); 1270 } 1271 1272 static int i915_drrs_status(struct seq_file *m, void *unused) 1273 { 1274 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1275 struct drm_device *dev = &dev_priv->drm; 1276 struct intel_crtc *intel_crtc; 1277 int active_crtc_cnt = 0; 1278 1279 drm_modeset_lock_all(dev); 1280 for_each_intel_crtc(dev, intel_crtc) { 1281 if (intel_crtc->base.state->active) { 1282 active_crtc_cnt++; 1283 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 1284 1285 drrs_status_per_crtc(m, dev, intel_crtc); 1286 } 1287 } 1288 drm_modeset_unlock_all(dev); 1289 1290 if (!active_crtc_cnt) 1291 seq_puts(m, "No active crtc found\n"); 1292 1293 return 0; 1294 } 1295 1296 #define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \ 1297 seq_puts(m, "LPSP: disabled\n")) 1298 1299 static bool 1300 intel_lpsp_power_well_enabled(struct drm_i915_private *i915, 1301 enum i915_power_well_id power_well_id) 1302 { 1303 intel_wakeref_t wakeref; 1304 bool is_enabled; 1305 1306 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 1307 is_enabled = intel_display_power_well_is_enabled(i915, 1308 power_well_id); 1309 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 1310 1311 return is_enabled; 1312 } 1313 1314 static int i915_lpsp_status(struct seq_file *m, void *unused) 1315 { 1316 struct drm_i915_private *i915 = node_to_i915(m->private); 1317 1318 switch (INTEL_GEN(i915)) { 1319 case 12: 1320 case 11: 1321 LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3)); 1322 break; 1323 case 10: 1324 case 9: 1325 LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2)); 1326 break; 1327 default: 1328 /* 1329 * Apart from HASWELL/BROADWELL other legacy platform doesn't 1330 * support lpsp. 1331 */ 1332 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 1333 LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL)); 1334 else 1335 seq_puts(m, "LPSP: not supported\n"); 1336 } 1337 1338 return 0; 1339 } 1340 1341 static int i915_dp_mst_info(struct seq_file *m, void *unused) 1342 { 1343 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1344 struct drm_device *dev = &dev_priv->drm; 1345 struct intel_encoder *intel_encoder; 1346 struct intel_digital_port *dig_port; 1347 struct drm_connector *connector; 1348 struct drm_connector_list_iter conn_iter; 1349 1350 drm_connector_list_iter_begin(dev, &conn_iter); 1351 drm_for_each_connector_iter(connector, &conn_iter) { 1352 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 1353 continue; 1354 1355 intel_encoder = intel_attached_encoder(to_intel_connector(connector)); 1356 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) 1357 continue; 1358 1359 dig_port = enc_to_dig_port(intel_encoder); 1360 if (!dig_port->dp.can_mst) 1361 continue; 1362 1363 seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n", 1364 dig_port->base.base.base.id, 1365 dig_port->base.base.name); 1366 drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr); 1367 } 1368 drm_connector_list_iter_end(&conn_iter); 1369 1370 return 0; 1371 } 1372 1373 static ssize_t i915_displayport_test_active_write(struct file *file, 1374 const char __user *ubuf, 1375 size_t len, loff_t *offp) 1376 { 1377 char *input_buffer; 1378 int status = 0; 1379 struct drm_device *dev; 1380 struct drm_connector *connector; 1381 struct drm_connector_list_iter conn_iter; 1382 struct intel_dp *intel_dp; 1383 int val = 0; 1384 1385 dev = ((struct seq_file *)file->private_data)->private; 1386 1387 if (len == 0) 1388 return 0; 1389 1390 input_buffer = memdup_user_nul(ubuf, len); 1391 if (IS_ERR(input_buffer)) 1392 return PTR_ERR(input_buffer); 1393 1394 drm_dbg(&to_i915(dev)->drm, 1395 "Copied %d bytes from user\n", (unsigned int)len); 1396 1397 drm_connector_list_iter_begin(dev, &conn_iter); 1398 drm_for_each_connector_iter(connector, &conn_iter) { 1399 struct intel_encoder *encoder; 1400 1401 if (connector->connector_type != 1402 DRM_MODE_CONNECTOR_DisplayPort) 1403 continue; 1404 1405 encoder = to_intel_encoder(connector->encoder); 1406 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1407 continue; 1408 1409 if (encoder && connector->status == connector_status_connected) { 1410 intel_dp = enc_to_intel_dp(encoder); 1411 status = kstrtoint(input_buffer, 10, &val); 1412 if (status < 0) 1413 break; 1414 drm_dbg(&to_i915(dev)->drm, 1415 "Got %d for test active\n", val); 1416 /* To prevent erroneous activation of the compliance 1417 * testing code, only accept an actual value of 1 here 1418 */ 1419 if (val == 1) 1420 intel_dp->compliance.test_active = true; 1421 else 1422 intel_dp->compliance.test_active = false; 1423 } 1424 } 1425 drm_connector_list_iter_end(&conn_iter); 1426 kfree(input_buffer); 1427 if (status < 0) 1428 return status; 1429 1430 *offp += len; 1431 return len; 1432 } 1433 1434 static int i915_displayport_test_active_show(struct seq_file *m, void *data) 1435 { 1436 struct drm_i915_private *dev_priv = m->private; 1437 struct drm_device *dev = &dev_priv->drm; 1438 struct drm_connector *connector; 1439 struct drm_connector_list_iter conn_iter; 1440 struct intel_dp *intel_dp; 1441 1442 drm_connector_list_iter_begin(dev, &conn_iter); 1443 drm_for_each_connector_iter(connector, &conn_iter) { 1444 struct intel_encoder *encoder; 1445 1446 if (connector->connector_type != 1447 DRM_MODE_CONNECTOR_DisplayPort) 1448 continue; 1449 1450 encoder = to_intel_encoder(connector->encoder); 1451 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1452 continue; 1453 1454 if (encoder && connector->status == connector_status_connected) { 1455 intel_dp = enc_to_intel_dp(encoder); 1456 if (intel_dp->compliance.test_active) 1457 seq_puts(m, "1"); 1458 else 1459 seq_puts(m, "0"); 1460 } else 1461 seq_puts(m, "0"); 1462 } 1463 drm_connector_list_iter_end(&conn_iter); 1464 1465 return 0; 1466 } 1467 1468 static int i915_displayport_test_active_open(struct inode *inode, 1469 struct file *file) 1470 { 1471 return single_open(file, i915_displayport_test_active_show, 1472 inode->i_private); 1473 } 1474 1475 static const struct file_operations i915_displayport_test_active_fops = { 1476 .owner = THIS_MODULE, 1477 .open = i915_displayport_test_active_open, 1478 .read = seq_read, 1479 .llseek = seq_lseek, 1480 .release = single_release, 1481 .write = i915_displayport_test_active_write 1482 }; 1483 1484 static int i915_displayport_test_data_show(struct seq_file *m, void *data) 1485 { 1486 struct drm_i915_private *dev_priv = m->private; 1487 struct drm_device *dev = &dev_priv->drm; 1488 struct drm_connector *connector; 1489 struct drm_connector_list_iter conn_iter; 1490 struct intel_dp *intel_dp; 1491 1492 drm_connector_list_iter_begin(dev, &conn_iter); 1493 drm_for_each_connector_iter(connector, &conn_iter) { 1494 struct intel_encoder *encoder; 1495 1496 if (connector->connector_type != 1497 DRM_MODE_CONNECTOR_DisplayPort) 1498 continue; 1499 1500 encoder = to_intel_encoder(connector->encoder); 1501 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1502 continue; 1503 1504 if (encoder && connector->status == connector_status_connected) { 1505 intel_dp = enc_to_intel_dp(encoder); 1506 if (intel_dp->compliance.test_type == 1507 DP_TEST_LINK_EDID_READ) 1508 seq_printf(m, "%lx", 1509 intel_dp->compliance.test_data.edid); 1510 else if (intel_dp->compliance.test_type == 1511 DP_TEST_LINK_VIDEO_PATTERN) { 1512 seq_printf(m, "hdisplay: %d\n", 1513 intel_dp->compliance.test_data.hdisplay); 1514 seq_printf(m, "vdisplay: %d\n", 1515 intel_dp->compliance.test_data.vdisplay); 1516 seq_printf(m, "bpc: %u\n", 1517 intel_dp->compliance.test_data.bpc); 1518 } else if (intel_dp->compliance.test_type == 1519 DP_TEST_LINK_PHY_TEST_PATTERN) { 1520 seq_printf(m, "pattern: %d\n", 1521 intel_dp->compliance.test_data.phytest.phy_pattern); 1522 seq_printf(m, "Number of lanes: %d\n", 1523 intel_dp->compliance.test_data.phytest.num_lanes); 1524 seq_printf(m, "Link Rate: %d\n", 1525 intel_dp->compliance.test_data.phytest.link_rate); 1526 seq_printf(m, "level: %02x\n", 1527 intel_dp->train_set[0]); 1528 } 1529 } else 1530 seq_puts(m, "0"); 1531 } 1532 drm_connector_list_iter_end(&conn_iter); 1533 1534 return 0; 1535 } 1536 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data); 1537 1538 static int i915_displayport_test_type_show(struct seq_file *m, void *data) 1539 { 1540 struct drm_i915_private *dev_priv = m->private; 1541 struct drm_device *dev = &dev_priv->drm; 1542 struct drm_connector *connector; 1543 struct drm_connector_list_iter conn_iter; 1544 struct intel_dp *intel_dp; 1545 1546 drm_connector_list_iter_begin(dev, &conn_iter); 1547 drm_for_each_connector_iter(connector, &conn_iter) { 1548 struct intel_encoder *encoder; 1549 1550 if (connector->connector_type != 1551 DRM_MODE_CONNECTOR_DisplayPort) 1552 continue; 1553 1554 encoder = to_intel_encoder(connector->encoder); 1555 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1556 continue; 1557 1558 if (encoder && connector->status == connector_status_connected) { 1559 intel_dp = enc_to_intel_dp(encoder); 1560 seq_printf(m, "%02lx\n", intel_dp->compliance.test_type); 1561 } else 1562 seq_puts(m, "0"); 1563 } 1564 drm_connector_list_iter_end(&conn_iter); 1565 1566 return 0; 1567 } 1568 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); 1569 1570 static void wm_latency_show(struct seq_file *m, const u16 wm[8]) 1571 { 1572 struct drm_i915_private *dev_priv = m->private; 1573 struct drm_device *dev = &dev_priv->drm; 1574 int level; 1575 int num_levels; 1576 1577 if (IS_CHERRYVIEW(dev_priv)) 1578 num_levels = 3; 1579 else if (IS_VALLEYVIEW(dev_priv)) 1580 num_levels = 1; 1581 else if (IS_G4X(dev_priv)) 1582 num_levels = 3; 1583 else 1584 num_levels = ilk_wm_max_level(dev_priv) + 1; 1585 1586 drm_modeset_lock_all(dev); 1587 1588 for (level = 0; level < num_levels; level++) { 1589 unsigned int latency = wm[level]; 1590 1591 /* 1592 * - WM1+ latency values in 0.5us units 1593 * - latencies are in us on gen9/vlv/chv 1594 */ 1595 if (INTEL_GEN(dev_priv) >= 9 || 1596 IS_VALLEYVIEW(dev_priv) || 1597 IS_CHERRYVIEW(dev_priv) || 1598 IS_G4X(dev_priv)) 1599 latency *= 10; 1600 else if (level > 0) 1601 latency *= 5; 1602 1603 seq_printf(m, "WM%d %u (%u.%u usec)\n", 1604 level, wm[level], latency / 10, latency % 10); 1605 } 1606 1607 drm_modeset_unlock_all(dev); 1608 } 1609 1610 static int pri_wm_latency_show(struct seq_file *m, void *data) 1611 { 1612 struct drm_i915_private *dev_priv = m->private; 1613 const u16 *latencies; 1614 1615 if (INTEL_GEN(dev_priv) >= 9) 1616 latencies = dev_priv->wm.skl_latency; 1617 else 1618 latencies = dev_priv->wm.pri_latency; 1619 1620 wm_latency_show(m, latencies); 1621 1622 return 0; 1623 } 1624 1625 static int spr_wm_latency_show(struct seq_file *m, void *data) 1626 { 1627 struct drm_i915_private *dev_priv = m->private; 1628 const u16 *latencies; 1629 1630 if (INTEL_GEN(dev_priv) >= 9) 1631 latencies = dev_priv->wm.skl_latency; 1632 else 1633 latencies = dev_priv->wm.spr_latency; 1634 1635 wm_latency_show(m, latencies); 1636 1637 return 0; 1638 } 1639 1640 static int cur_wm_latency_show(struct seq_file *m, void *data) 1641 { 1642 struct drm_i915_private *dev_priv = m->private; 1643 const u16 *latencies; 1644 1645 if (INTEL_GEN(dev_priv) >= 9) 1646 latencies = dev_priv->wm.skl_latency; 1647 else 1648 latencies = dev_priv->wm.cur_latency; 1649 1650 wm_latency_show(m, latencies); 1651 1652 return 0; 1653 } 1654 1655 static int pri_wm_latency_open(struct inode *inode, struct file *file) 1656 { 1657 struct drm_i915_private *dev_priv = inode->i_private; 1658 1659 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 1660 return -ENODEV; 1661 1662 return single_open(file, pri_wm_latency_show, dev_priv); 1663 } 1664 1665 static int spr_wm_latency_open(struct inode *inode, struct file *file) 1666 { 1667 struct drm_i915_private *dev_priv = inode->i_private; 1668 1669 if (HAS_GMCH(dev_priv)) 1670 return -ENODEV; 1671 1672 return single_open(file, spr_wm_latency_show, dev_priv); 1673 } 1674 1675 static int cur_wm_latency_open(struct inode *inode, struct file *file) 1676 { 1677 struct drm_i915_private *dev_priv = inode->i_private; 1678 1679 if (HAS_GMCH(dev_priv)) 1680 return -ENODEV; 1681 1682 return single_open(file, cur_wm_latency_show, dev_priv); 1683 } 1684 1685 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 1686 size_t len, loff_t *offp, u16 wm[8]) 1687 { 1688 struct seq_file *m = file->private_data; 1689 struct drm_i915_private *dev_priv = m->private; 1690 struct drm_device *dev = &dev_priv->drm; 1691 u16 new[8] = { 0 }; 1692 int num_levels; 1693 int level; 1694 int ret; 1695 char tmp[32]; 1696 1697 if (IS_CHERRYVIEW(dev_priv)) 1698 num_levels = 3; 1699 else if (IS_VALLEYVIEW(dev_priv)) 1700 num_levels = 1; 1701 else if (IS_G4X(dev_priv)) 1702 num_levels = 3; 1703 else 1704 num_levels = ilk_wm_max_level(dev_priv) + 1; 1705 1706 if (len >= sizeof(tmp)) 1707 return -EINVAL; 1708 1709 if (copy_from_user(tmp, ubuf, len)) 1710 return -EFAULT; 1711 1712 tmp[len] = '\0'; 1713 1714 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 1715 &new[0], &new[1], &new[2], &new[3], 1716 &new[4], &new[5], &new[6], &new[7]); 1717 if (ret != num_levels) 1718 return -EINVAL; 1719 1720 drm_modeset_lock_all(dev); 1721 1722 for (level = 0; level < num_levels; level++) 1723 wm[level] = new[level]; 1724 1725 drm_modeset_unlock_all(dev); 1726 1727 return len; 1728 } 1729 1730 1731 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 1732 size_t len, loff_t *offp) 1733 { 1734 struct seq_file *m = file->private_data; 1735 struct drm_i915_private *dev_priv = m->private; 1736 u16 *latencies; 1737 1738 if (INTEL_GEN(dev_priv) >= 9) 1739 latencies = dev_priv->wm.skl_latency; 1740 else 1741 latencies = dev_priv->wm.pri_latency; 1742 1743 return wm_latency_write(file, ubuf, len, offp, latencies); 1744 } 1745 1746 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 1747 size_t len, loff_t *offp) 1748 { 1749 struct seq_file *m = file->private_data; 1750 struct drm_i915_private *dev_priv = m->private; 1751 u16 *latencies; 1752 1753 if (INTEL_GEN(dev_priv) >= 9) 1754 latencies = dev_priv->wm.skl_latency; 1755 else 1756 latencies = dev_priv->wm.spr_latency; 1757 1758 return wm_latency_write(file, ubuf, len, offp, latencies); 1759 } 1760 1761 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 1762 size_t len, loff_t *offp) 1763 { 1764 struct seq_file *m = file->private_data; 1765 struct drm_i915_private *dev_priv = m->private; 1766 u16 *latencies; 1767 1768 if (INTEL_GEN(dev_priv) >= 9) 1769 latencies = dev_priv->wm.skl_latency; 1770 else 1771 latencies = dev_priv->wm.cur_latency; 1772 1773 return wm_latency_write(file, ubuf, len, offp, latencies); 1774 } 1775 1776 static const struct file_operations i915_pri_wm_latency_fops = { 1777 .owner = THIS_MODULE, 1778 .open = pri_wm_latency_open, 1779 .read = seq_read, 1780 .llseek = seq_lseek, 1781 .release = single_release, 1782 .write = pri_wm_latency_write 1783 }; 1784 1785 static const struct file_operations i915_spr_wm_latency_fops = { 1786 .owner = THIS_MODULE, 1787 .open = spr_wm_latency_open, 1788 .read = seq_read, 1789 .llseek = seq_lseek, 1790 .release = single_release, 1791 .write = spr_wm_latency_write 1792 }; 1793 1794 static const struct file_operations i915_cur_wm_latency_fops = { 1795 .owner = THIS_MODULE, 1796 .open = cur_wm_latency_open, 1797 .read = seq_read, 1798 .llseek = seq_lseek, 1799 .release = single_release, 1800 .write = cur_wm_latency_write 1801 }; 1802 1803 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 1804 { 1805 struct drm_i915_private *dev_priv = m->private; 1806 struct i915_hotplug *hotplug = &dev_priv->hotplug; 1807 1808 /* Synchronize with everything first in case there's been an HPD 1809 * storm, but we haven't finished handling it in the kernel yet 1810 */ 1811 intel_synchronize_irq(dev_priv); 1812 flush_work(&dev_priv->hotplug.dig_port_work); 1813 flush_delayed_work(&dev_priv->hotplug.hotplug_work); 1814 1815 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 1816 seq_printf(m, "Detected: %s\n", 1817 yesno(delayed_work_pending(&hotplug->reenable_work))); 1818 1819 return 0; 1820 } 1821 1822 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 1823 const char __user *ubuf, size_t len, 1824 loff_t *offp) 1825 { 1826 struct seq_file *m = file->private_data; 1827 struct drm_i915_private *dev_priv = m->private; 1828 struct i915_hotplug *hotplug = &dev_priv->hotplug; 1829 unsigned int new_threshold; 1830 int i; 1831 char *newline; 1832 char tmp[16]; 1833 1834 if (len >= sizeof(tmp)) 1835 return -EINVAL; 1836 1837 if (copy_from_user(tmp, ubuf, len)) 1838 return -EFAULT; 1839 1840 tmp[len] = '\0'; 1841 1842 /* Strip newline, if any */ 1843 newline = strchr(tmp, '\n'); 1844 if (newline) 1845 *newline = '\0'; 1846 1847 if (strcmp(tmp, "reset") == 0) 1848 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 1849 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 1850 return -EINVAL; 1851 1852 if (new_threshold > 0) 1853 drm_dbg_kms(&dev_priv->drm, 1854 "Setting HPD storm detection threshold to %d\n", 1855 new_threshold); 1856 else 1857 drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n"); 1858 1859 spin_lock_irq(&dev_priv->irq_lock); 1860 hotplug->hpd_storm_threshold = new_threshold; 1861 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1862 for_each_hpd_pin(i) 1863 hotplug->stats[i].count = 0; 1864 spin_unlock_irq(&dev_priv->irq_lock); 1865 1866 /* Re-enable hpd immediately if we were in an irq storm */ 1867 flush_delayed_work(&dev_priv->hotplug.reenable_work); 1868 1869 return len; 1870 } 1871 1872 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 1873 { 1874 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 1875 } 1876 1877 static const struct file_operations i915_hpd_storm_ctl_fops = { 1878 .owner = THIS_MODULE, 1879 .open = i915_hpd_storm_ctl_open, 1880 .read = seq_read, 1881 .llseek = seq_lseek, 1882 .release = single_release, 1883 .write = i915_hpd_storm_ctl_write 1884 }; 1885 1886 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) 1887 { 1888 struct drm_i915_private *dev_priv = m->private; 1889 1890 seq_printf(m, "Enabled: %s\n", 1891 yesno(dev_priv->hotplug.hpd_short_storm_enabled)); 1892 1893 return 0; 1894 } 1895 1896 static int 1897 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) 1898 { 1899 return single_open(file, i915_hpd_short_storm_ctl_show, 1900 inode->i_private); 1901 } 1902 1903 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, 1904 const char __user *ubuf, 1905 size_t len, loff_t *offp) 1906 { 1907 struct seq_file *m = file->private_data; 1908 struct drm_i915_private *dev_priv = m->private; 1909 struct i915_hotplug *hotplug = &dev_priv->hotplug; 1910 char *newline; 1911 char tmp[16]; 1912 int i; 1913 bool new_state; 1914 1915 if (len >= sizeof(tmp)) 1916 return -EINVAL; 1917 1918 if (copy_from_user(tmp, ubuf, len)) 1919 return -EFAULT; 1920 1921 tmp[len] = '\0'; 1922 1923 /* Strip newline, if any */ 1924 newline = strchr(tmp, '\n'); 1925 if (newline) 1926 *newline = '\0'; 1927 1928 /* Reset to the "default" state for this system */ 1929 if (strcmp(tmp, "reset") == 0) 1930 new_state = !HAS_DP_MST(dev_priv); 1931 else if (kstrtobool(tmp, &new_state) != 0) 1932 return -EINVAL; 1933 1934 drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n", 1935 new_state ? "En" : "Dis"); 1936 1937 spin_lock_irq(&dev_priv->irq_lock); 1938 hotplug->hpd_short_storm_enabled = new_state; 1939 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1940 for_each_hpd_pin(i) 1941 hotplug->stats[i].count = 0; 1942 spin_unlock_irq(&dev_priv->irq_lock); 1943 1944 /* Re-enable hpd immediately if we were in an irq storm */ 1945 flush_delayed_work(&dev_priv->hotplug.reenable_work); 1946 1947 return len; 1948 } 1949 1950 static const struct file_operations i915_hpd_short_storm_ctl_fops = { 1951 .owner = THIS_MODULE, 1952 .open = i915_hpd_short_storm_ctl_open, 1953 .read = seq_read, 1954 .llseek = seq_lseek, 1955 .release = single_release, 1956 .write = i915_hpd_short_storm_ctl_write, 1957 }; 1958 1959 static int i915_drrs_ctl_set(void *data, u64 val) 1960 { 1961 struct drm_i915_private *dev_priv = data; 1962 struct drm_device *dev = &dev_priv->drm; 1963 struct intel_crtc *crtc; 1964 1965 if (INTEL_GEN(dev_priv) < 7) 1966 return -ENODEV; 1967 1968 for_each_intel_crtc(dev, crtc) { 1969 struct drm_connector_list_iter conn_iter; 1970 struct intel_crtc_state *crtc_state; 1971 struct drm_connector *connector; 1972 struct drm_crtc_commit *commit; 1973 int ret; 1974 1975 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); 1976 if (ret) 1977 return ret; 1978 1979 crtc_state = to_intel_crtc_state(crtc->base.state); 1980 1981 if (!crtc_state->hw.active || 1982 !crtc_state->has_drrs) 1983 goto out; 1984 1985 commit = crtc_state->uapi.commit; 1986 if (commit) { 1987 ret = wait_for_completion_interruptible(&commit->hw_done); 1988 if (ret) 1989 goto out; 1990 } 1991 1992 drm_connector_list_iter_begin(dev, &conn_iter); 1993 drm_for_each_connector_iter(connector, &conn_iter) { 1994 struct intel_encoder *encoder; 1995 struct intel_dp *intel_dp; 1996 1997 if (!(crtc_state->uapi.connector_mask & 1998 drm_connector_mask(connector))) 1999 continue; 2000 2001 encoder = intel_attached_encoder(to_intel_connector(connector)); 2002 if (encoder->type != INTEL_OUTPUT_EDP) 2003 continue; 2004 2005 drm_dbg(&dev_priv->drm, 2006 "Manually %sabling DRRS. %llu\n", 2007 val ? "en" : "dis", val); 2008 2009 intel_dp = enc_to_intel_dp(encoder); 2010 if (val) 2011 intel_edp_drrs_enable(intel_dp, 2012 crtc_state); 2013 else 2014 intel_edp_drrs_disable(intel_dp, 2015 crtc_state); 2016 } 2017 drm_connector_list_iter_end(&conn_iter); 2018 2019 out: 2020 drm_modeset_unlock(&crtc->base.mutex); 2021 if (ret) 2022 return ret; 2023 } 2024 2025 return 0; 2026 } 2027 2028 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n"); 2029 2030 static ssize_t 2031 i915_fifo_underrun_reset_write(struct file *filp, 2032 const char __user *ubuf, 2033 size_t cnt, loff_t *ppos) 2034 { 2035 struct drm_i915_private *dev_priv = filp->private_data; 2036 struct intel_crtc *intel_crtc; 2037 struct drm_device *dev = &dev_priv->drm; 2038 int ret; 2039 bool reset; 2040 2041 ret = kstrtobool_from_user(ubuf, cnt, &reset); 2042 if (ret) 2043 return ret; 2044 2045 if (!reset) 2046 return cnt; 2047 2048 for_each_intel_crtc(dev, intel_crtc) { 2049 struct drm_crtc_commit *commit; 2050 struct intel_crtc_state *crtc_state; 2051 2052 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex); 2053 if (ret) 2054 return ret; 2055 2056 crtc_state = to_intel_crtc_state(intel_crtc->base.state); 2057 commit = crtc_state->uapi.commit; 2058 if (commit) { 2059 ret = wait_for_completion_interruptible(&commit->hw_done); 2060 if (!ret) 2061 ret = wait_for_completion_interruptible(&commit->flip_done); 2062 } 2063 2064 if (!ret && crtc_state->hw.active) { 2065 drm_dbg_kms(&dev_priv->drm, 2066 "Re-arming FIFO underruns on pipe %c\n", 2067 pipe_name(intel_crtc->pipe)); 2068 2069 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state); 2070 } 2071 2072 drm_modeset_unlock(&intel_crtc->base.mutex); 2073 2074 if (ret) 2075 return ret; 2076 } 2077 2078 ret = intel_fbc_reset_underrun(dev_priv); 2079 if (ret) 2080 return ret; 2081 2082 return cnt; 2083 } 2084 2085 static const struct file_operations i915_fifo_underrun_reset_ops = { 2086 .owner = THIS_MODULE, 2087 .open = simple_open, 2088 .write = i915_fifo_underrun_reset_write, 2089 .llseek = default_llseek, 2090 }; 2091 2092 static const struct drm_info_list intel_display_debugfs_list[] = { 2093 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 2094 {"i915_fbc_status", i915_fbc_status, 0}, 2095 {"i915_ips_status", i915_ips_status, 0}, 2096 {"i915_sr_status", i915_sr_status, 0}, 2097 {"i915_opregion", i915_opregion, 0}, 2098 {"i915_vbt", i915_vbt, 0}, 2099 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 2100 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 2101 {"i915_power_domain_info", i915_power_domain_info, 0}, 2102 {"i915_dmc_info", i915_dmc_info, 0}, 2103 {"i915_display_info", i915_display_info, 0}, 2104 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 2105 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 2106 {"i915_ddb_info", i915_ddb_info, 0}, 2107 {"i915_drrs_status", i915_drrs_status, 0}, 2108 {"i915_lpsp_status", i915_lpsp_status, 0}, 2109 }; 2110 2111 static const struct { 2112 const char *name; 2113 const struct file_operations *fops; 2114 } intel_display_debugfs_files[] = { 2115 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, 2116 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 2117 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 2118 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 2119 {"i915_fbc_false_color", &i915_fbc_false_color_fops}, 2120 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 2121 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 2122 {"i915_dp_test_active", &i915_displayport_test_active_fops}, 2123 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, 2124 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops}, 2125 {"i915_ipc_status", &i915_ipc_status_fops}, 2126 {"i915_drrs_ctl", &i915_drrs_ctl_fops}, 2127 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}, 2128 }; 2129 2130 void intel_display_debugfs_register(struct drm_i915_private *i915) 2131 { 2132 struct drm_minor *minor = i915->drm.primary; 2133 int i; 2134 2135 for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) { 2136 debugfs_create_file(intel_display_debugfs_files[i].name, 2137 S_IRUGO | S_IWUSR, 2138 minor->debugfs_root, 2139 to_i915(minor->dev), 2140 intel_display_debugfs_files[i].fops); 2141 } 2142 2143 drm_debugfs_create_files(intel_display_debugfs_list, 2144 ARRAY_SIZE(intel_display_debugfs_list), 2145 minor->debugfs_root, minor); 2146 } 2147 2148 static int i915_panel_show(struct seq_file *m, void *data) 2149 { 2150 struct drm_connector *connector = m->private; 2151 struct intel_dp *intel_dp = 2152 intel_attached_dp(to_intel_connector(connector)); 2153 2154 if (connector->status != connector_status_connected) 2155 return -ENODEV; 2156 2157 seq_printf(m, "Panel power up delay: %d\n", 2158 intel_dp->panel_power_up_delay); 2159 seq_printf(m, "Panel power down delay: %d\n", 2160 intel_dp->panel_power_down_delay); 2161 seq_printf(m, "Backlight on delay: %d\n", 2162 intel_dp->backlight_on_delay); 2163 seq_printf(m, "Backlight off delay: %d\n", 2164 intel_dp->backlight_off_delay); 2165 2166 return 0; 2167 } 2168 DEFINE_SHOW_ATTRIBUTE(i915_panel); 2169 2170 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) 2171 { 2172 struct drm_connector *connector = m->private; 2173 struct intel_connector *intel_connector = to_intel_connector(connector); 2174 2175 if (connector->status != connector_status_connected) 2176 return -ENODEV; 2177 2178 seq_printf(m, "%s:%d HDCP version: ", connector->name, 2179 connector->base.id); 2180 intel_hdcp_info(m, intel_connector); 2181 2182 return 0; 2183 } 2184 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); 2185 2186 #define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \ 2187 seq_puts(m, "LPSP: incapable\n")) 2188 2189 static int i915_lpsp_capability_show(struct seq_file *m, void *data) 2190 { 2191 struct drm_connector *connector = m->private; 2192 struct drm_i915_private *i915 = to_i915(connector->dev); 2193 struct intel_encoder *encoder; 2194 2195 encoder = intel_attached_encoder(to_intel_connector(connector)); 2196 if (!encoder) 2197 return -ENODEV; 2198 2199 if (connector->status != connector_status_connected) 2200 return -ENODEV; 2201 2202 switch (INTEL_GEN(i915)) { 2203 case 12: 2204 /* 2205 * Actually TGL can drive LPSP on port till DDI_C 2206 * but there is no physical connected DDI_C on TGL sku's, 2207 * even driver is not initilizing DDI_C port for gen12. 2208 */ 2209 LPSP_CAPABLE(encoder->port <= PORT_B); 2210 break; 2211 case 11: 2212 LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI || 2213 connector->connector_type == DRM_MODE_CONNECTOR_eDP); 2214 break; 2215 case 10: 2216 case 9: 2217 LPSP_CAPABLE(encoder->port == PORT_A && 2218 (connector->connector_type == DRM_MODE_CONNECTOR_DSI || 2219 connector->connector_type == DRM_MODE_CONNECTOR_eDP || 2220 connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)); 2221 break; 2222 default: 2223 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 2224 LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP); 2225 } 2226 2227 return 0; 2228 } 2229 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability); 2230 2231 static int i915_dsc_fec_support_show(struct seq_file *m, void *data) 2232 { 2233 struct drm_connector *connector = m->private; 2234 struct drm_device *dev = connector->dev; 2235 struct drm_crtc *crtc; 2236 struct intel_dp *intel_dp; 2237 struct drm_modeset_acquire_ctx ctx; 2238 struct intel_crtc_state *crtc_state = NULL; 2239 int ret = 0; 2240 bool try_again = false; 2241 2242 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 2243 2244 do { 2245 try_again = false; 2246 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, 2247 &ctx); 2248 if (ret) { 2249 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) { 2250 try_again = true; 2251 continue; 2252 } 2253 break; 2254 } 2255 crtc = connector->state->crtc; 2256 if (connector->status != connector_status_connected || !crtc) { 2257 ret = -ENODEV; 2258 break; 2259 } 2260 ret = drm_modeset_lock(&crtc->mutex, &ctx); 2261 if (ret == -EDEADLK) { 2262 ret = drm_modeset_backoff(&ctx); 2263 if (!ret) { 2264 try_again = true; 2265 continue; 2266 } 2267 break; 2268 } else if (ret) { 2269 break; 2270 } 2271 intel_dp = intel_attached_dp(to_intel_connector(connector)); 2272 crtc_state = to_intel_crtc_state(crtc->state); 2273 seq_printf(m, "DSC_Enabled: %s\n", 2274 yesno(crtc_state->dsc.compression_enable)); 2275 seq_printf(m, "DSC_Sink_Support: %s\n", 2276 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd))); 2277 seq_printf(m, "Force_DSC_Enable: %s\n", 2278 yesno(intel_dp->force_dsc_en)); 2279 if (!intel_dp_is_edp(intel_dp)) 2280 seq_printf(m, "FEC_Sink_Support: %s\n", 2281 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable))); 2282 } while (try_again); 2283 2284 drm_modeset_drop_locks(&ctx); 2285 drm_modeset_acquire_fini(&ctx); 2286 2287 return ret; 2288 } 2289 2290 static ssize_t i915_dsc_fec_support_write(struct file *file, 2291 const char __user *ubuf, 2292 size_t len, loff_t *offp) 2293 { 2294 bool dsc_enable = false; 2295 int ret; 2296 struct drm_connector *connector = 2297 ((struct seq_file *)file->private_data)->private; 2298 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 2299 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2300 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2301 2302 if (len == 0) 2303 return 0; 2304 2305 drm_dbg(&i915->drm, 2306 "Copied %zu bytes from user to force DSC\n", len); 2307 2308 ret = kstrtobool_from_user(ubuf, len, &dsc_enable); 2309 if (ret < 0) 2310 return ret; 2311 2312 drm_dbg(&i915->drm, "Got %s for DSC Enable\n", 2313 (dsc_enable) ? "true" : "false"); 2314 intel_dp->force_dsc_en = dsc_enable; 2315 2316 *offp += len; 2317 return len; 2318 } 2319 2320 static int i915_dsc_fec_support_open(struct inode *inode, 2321 struct file *file) 2322 { 2323 return single_open(file, i915_dsc_fec_support_show, 2324 inode->i_private); 2325 } 2326 2327 static const struct file_operations i915_dsc_fec_support_fops = { 2328 .owner = THIS_MODULE, 2329 .open = i915_dsc_fec_support_open, 2330 .read = seq_read, 2331 .llseek = seq_lseek, 2332 .release = single_release, 2333 .write = i915_dsc_fec_support_write 2334 }; 2335 2336 /** 2337 * intel_connector_debugfs_add - add i915 specific connector debugfs files 2338 * @connector: pointer to a registered drm_connector 2339 * 2340 * Cleanup will be done by drm_connector_unregister() through a call to 2341 * drm_debugfs_connector_remove(). 2342 * 2343 * Returns 0 on success, negative error codes on error. 2344 */ 2345 int intel_connector_debugfs_add(struct drm_connector *connector) 2346 { 2347 struct dentry *root = connector->debugfs_entry; 2348 struct drm_i915_private *dev_priv = to_i915(connector->dev); 2349 2350 /* The connector must have been registered beforehands. */ 2351 if (!root) 2352 return -ENODEV; 2353 2354 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 2355 debugfs_create_file("i915_panel_timings", S_IRUGO, root, 2356 connector, &i915_panel_fops); 2357 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root, 2358 connector, &i915_psr_sink_status_fops); 2359 } 2360 2361 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 2362 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || 2363 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { 2364 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root, 2365 connector, &i915_hdcp_sink_capability_fops); 2366 } 2367 2368 if (INTEL_GEN(dev_priv) >= 10 && 2369 ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && 2370 !to_intel_connector(connector)->mst_port) || 2371 connector->connector_type == DRM_MODE_CONNECTOR_eDP)) 2372 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root, 2373 connector, &i915_dsc_fec_support_fops); 2374 2375 /* Legacy panels doesn't lpsp on any platform */ 2376 if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) || 2377 IS_BROADWELL(dev_priv)) && 2378 (connector->connector_type == DRM_MODE_CONNECTOR_DSI || 2379 connector->connector_type == DRM_MODE_CONNECTOR_eDP || 2380 connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 2381 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || 2382 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) 2383 debugfs_create_file("i915_lpsp_capability", 0444, root, 2384 connector, &i915_lpsp_capability_fops); 2385 2386 return 0; 2387 } 2388 2389 /** 2390 * intel_crtc_debugfs_add - add i915 specific crtc debugfs files 2391 * @crtc: pointer to a drm_crtc 2392 * 2393 * Returns 0 on success, negative error codes on error. 2394 * 2395 * Failure to add debugfs entries should generally be ignored. 2396 */ 2397 int intel_crtc_debugfs_add(struct drm_crtc *crtc) 2398 { 2399 if (!crtc->debugfs_entry) 2400 return -ENODEV; 2401 2402 crtc_updates_add(crtc); 2403 return 0; 2404 } 2405