1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <drm/drm_debugfs.h> 7 #include <drm/drm_fourcc.h> 8 9 #include "i915_debugfs.h" 10 #include "intel_csr.h" 11 #include "intel_display_debugfs.h" 12 #include "intel_display_types.h" 13 #include "intel_dp.h" 14 #include "intel_fbc.h" 15 #include "intel_hdcp.h" 16 #include "intel_hdmi.h" 17 #include "intel_pm.h" 18 #include "intel_psr.h" 19 #include "intel_sideband.h" 20 21 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 22 { 23 return to_i915(node->minor->dev); 24 } 25 26 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) 27 { 28 struct drm_i915_private *dev_priv = node_to_i915(m->private); 29 30 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 31 dev_priv->fb_tracking.busy_bits); 32 33 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 34 dev_priv->fb_tracking.flip_bits); 35 36 return 0; 37 } 38 39 static int i915_fbc_status(struct seq_file *m, void *unused) 40 { 41 struct drm_i915_private *dev_priv = node_to_i915(m->private); 42 struct intel_fbc *fbc = &dev_priv->fbc; 43 intel_wakeref_t wakeref; 44 45 if (!HAS_FBC(dev_priv)) 46 return -ENODEV; 47 48 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 49 mutex_lock(&fbc->lock); 50 51 if (intel_fbc_is_active(dev_priv)) 52 seq_puts(m, "FBC enabled\n"); 53 else 54 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); 55 56 if (intel_fbc_is_active(dev_priv)) { 57 u32 mask; 58 59 if (INTEL_GEN(dev_priv) >= 8) 60 mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK; 61 else if (INTEL_GEN(dev_priv) >= 7) 62 mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK; 63 else if (INTEL_GEN(dev_priv) >= 5) 64 mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK; 65 else if (IS_G4X(dev_priv)) 66 mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK; 67 else 68 mask = intel_de_read(dev_priv, FBC_STATUS) & 69 (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED); 70 71 seq_printf(m, "Compressing: %s\n", yesno(mask)); 72 } 73 74 mutex_unlock(&fbc->lock); 75 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 76 77 return 0; 78 } 79 80 static int i915_fbc_false_color_get(void *data, u64 *val) 81 { 82 struct drm_i915_private *dev_priv = data; 83 84 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 85 return -ENODEV; 86 87 *val = dev_priv->fbc.false_color; 88 89 return 0; 90 } 91 92 static int i915_fbc_false_color_set(void *data, u64 val) 93 { 94 struct drm_i915_private *dev_priv = data; 95 u32 reg; 96 97 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 98 return -ENODEV; 99 100 mutex_lock(&dev_priv->fbc.lock); 101 102 reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL); 103 dev_priv->fbc.false_color = val; 104 105 intel_de_write(dev_priv, ILK_DPFC_CONTROL, 106 val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR)); 107 108 mutex_unlock(&dev_priv->fbc.lock); 109 return 0; 110 } 111 112 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, 113 i915_fbc_false_color_get, i915_fbc_false_color_set, 114 "%llu\n"); 115 116 static int i915_ips_status(struct seq_file *m, void *unused) 117 { 118 struct drm_i915_private *dev_priv = node_to_i915(m->private); 119 intel_wakeref_t wakeref; 120 121 if (!HAS_IPS(dev_priv)) 122 return -ENODEV; 123 124 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 125 126 seq_printf(m, "Enabled by kernel parameter: %s\n", 127 yesno(i915_modparams.enable_ips)); 128 129 if (INTEL_GEN(dev_priv) >= 8) { 130 seq_puts(m, "Currently: unknown\n"); 131 } else { 132 if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE) 133 seq_puts(m, "Currently: enabled\n"); 134 else 135 seq_puts(m, "Currently: disabled\n"); 136 } 137 138 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 139 140 return 0; 141 } 142 143 static int i915_sr_status(struct seq_file *m, void *unused) 144 { 145 struct drm_i915_private *dev_priv = node_to_i915(m->private); 146 intel_wakeref_t wakeref; 147 bool sr_enabled = false; 148 149 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 150 151 if (INTEL_GEN(dev_priv) >= 9) 152 /* no global SR status; inspect per-plane WM */; 153 else if (HAS_PCH_SPLIT(dev_priv)) 154 sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN; 155 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || 156 IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 157 sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN; 158 else if (IS_I915GM(dev_priv)) 159 sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN; 160 else if (IS_PINEVIEW(dev_priv)) 161 sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 162 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 163 sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 164 165 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 166 167 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); 168 169 return 0; 170 } 171 172 static int i915_opregion(struct seq_file *m, void *unused) 173 { 174 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; 175 176 if (opregion->header) 177 seq_write(m, opregion->header, OPREGION_SIZE); 178 179 return 0; 180 } 181 182 static int i915_vbt(struct seq_file *m, void *unused) 183 { 184 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; 185 186 if (opregion->vbt) 187 seq_write(m, opregion->vbt, opregion->vbt_size); 188 189 return 0; 190 } 191 192 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 193 { 194 struct drm_i915_private *dev_priv = node_to_i915(m->private); 195 struct drm_device *dev = &dev_priv->drm; 196 struct intel_framebuffer *fbdev_fb = NULL; 197 struct drm_framebuffer *drm_fb; 198 199 #ifdef CONFIG_DRM_FBDEV_EMULATION 200 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) { 201 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb); 202 203 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 204 fbdev_fb->base.width, 205 fbdev_fb->base.height, 206 fbdev_fb->base.format->depth, 207 fbdev_fb->base.format->cpp[0] * 8, 208 fbdev_fb->base.modifier, 209 drm_framebuffer_read_refcount(&fbdev_fb->base)); 210 i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base)); 211 seq_putc(m, '\n'); 212 } 213 #endif 214 215 mutex_lock(&dev->mode_config.fb_lock); 216 drm_for_each_fb(drm_fb, dev) { 217 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 218 if (fb == fbdev_fb) 219 continue; 220 221 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 222 fb->base.width, 223 fb->base.height, 224 fb->base.format->depth, 225 fb->base.format->cpp[0] * 8, 226 fb->base.modifier, 227 drm_framebuffer_read_refcount(&fb->base)); 228 i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base)); 229 seq_putc(m, '\n'); 230 } 231 mutex_unlock(&dev->mode_config.fb_lock); 232 233 return 0; 234 } 235 236 static int i915_psr_sink_status_show(struct seq_file *m, void *data) 237 { 238 u8 val; 239 static const char * const sink_status[] = { 240 "inactive", 241 "transition to active, capture and display", 242 "active, display from RFB", 243 "active, capture and display on sink device timings", 244 "transition to inactive, capture and display, timing re-sync", 245 "reserved", 246 "reserved", 247 "sink internal error", 248 }; 249 struct drm_connector *connector = m->private; 250 struct drm_i915_private *dev_priv = to_i915(connector->dev); 251 struct intel_dp *intel_dp = 252 intel_attached_dp(to_intel_connector(connector)); 253 int ret; 254 255 if (!CAN_PSR(dev_priv)) { 256 seq_puts(m, "PSR Unsupported\n"); 257 return -ENODEV; 258 } 259 260 if (connector->status != connector_status_connected) 261 return -ENODEV; 262 263 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val); 264 265 if (ret == 1) { 266 const char *str = "unknown"; 267 268 val &= DP_PSR_SINK_STATE_MASK; 269 if (val < ARRAY_SIZE(sink_status)) 270 str = sink_status[val]; 271 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); 272 } else { 273 return ret; 274 } 275 276 return 0; 277 } 278 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); 279 280 static void 281 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) 282 { 283 u32 val, status_val; 284 const char *status = "unknown"; 285 286 if (dev_priv->psr.psr2_enabled) { 287 static const char * const live_status[] = { 288 "IDLE", 289 "CAPTURE", 290 "CAPTURE_FS", 291 "SLEEP", 292 "BUFON_FW", 293 "ML_UP", 294 "SU_STANDBY", 295 "FAST_SLEEP", 296 "DEEP_SLEEP", 297 "BUF_ON", 298 "TG_ON" 299 }; 300 val = intel_de_read(dev_priv, 301 EDP_PSR2_STATUS(dev_priv->psr.transcoder)); 302 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >> 303 EDP_PSR2_STATUS_STATE_SHIFT; 304 if (status_val < ARRAY_SIZE(live_status)) 305 status = live_status[status_val]; 306 } else { 307 static const char * const live_status[] = { 308 "IDLE", 309 "SRDONACK", 310 "SRDENT", 311 "BUFOFF", 312 "BUFON", 313 "AUXACK", 314 "SRDOFFACK", 315 "SRDENT_ON", 316 }; 317 val = intel_de_read(dev_priv, 318 EDP_PSR_STATUS(dev_priv->psr.transcoder)); 319 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> 320 EDP_PSR_STATUS_STATE_SHIFT; 321 if (status_val < ARRAY_SIZE(live_status)) 322 status = live_status[status_val]; 323 } 324 325 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); 326 } 327 328 static int i915_edp_psr_status(struct seq_file *m, void *data) 329 { 330 struct drm_i915_private *dev_priv = node_to_i915(m->private); 331 struct i915_psr *psr = &dev_priv->psr; 332 intel_wakeref_t wakeref; 333 const char *status; 334 bool enabled; 335 u32 val; 336 337 if (!HAS_PSR(dev_priv)) 338 return -ENODEV; 339 340 seq_printf(m, "Sink support: %s", yesno(psr->sink_support)); 341 if (psr->dp) 342 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]); 343 seq_puts(m, "\n"); 344 345 if (!psr->sink_support) 346 return 0; 347 348 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 349 mutex_lock(&psr->lock); 350 351 if (psr->enabled) 352 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; 353 else 354 status = "disabled"; 355 seq_printf(m, "PSR mode: %s\n", status); 356 357 if (!psr->enabled) { 358 seq_printf(m, "PSR sink not reliable: %s\n", 359 yesno(psr->sink_not_reliable)); 360 361 goto unlock; 362 } 363 364 if (psr->psr2_enabled) { 365 val = intel_de_read(dev_priv, 366 EDP_PSR2_CTL(dev_priv->psr.transcoder)); 367 enabled = val & EDP_PSR2_ENABLE; 368 } else { 369 val = intel_de_read(dev_priv, 370 EDP_PSR_CTL(dev_priv->psr.transcoder)); 371 enabled = val & EDP_PSR_ENABLE; 372 } 373 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", 374 enableddisabled(enabled), val); 375 psr_source_status(dev_priv, m); 376 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", 377 psr->busy_frontbuffer_bits); 378 379 /* 380 * SKL+ Perf counter is reset to 0 everytime DC state is entered 381 */ 382 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 383 val = intel_de_read(dev_priv, 384 EDP_PSR_PERF_CNT(dev_priv->psr.transcoder)); 385 val &= EDP_PSR_PERF_CNT_MASK; 386 seq_printf(m, "Performance counter: %u\n", val); 387 } 388 389 if (psr->debug & I915_PSR_DEBUG_IRQ) { 390 seq_printf(m, "Last attempted entry at: %lld\n", 391 psr->last_entry_attempt); 392 seq_printf(m, "Last exit at: %lld\n", psr->last_exit); 393 } 394 395 if (psr->psr2_enabled) { 396 u32 su_frames_val[3]; 397 int frame; 398 399 /* 400 * Reading all 3 registers before hand to minimize crossing a 401 * frame boundary between register reads 402 */ 403 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { 404 val = intel_de_read(dev_priv, 405 PSR2_SU_STATUS(dev_priv->psr.transcoder, frame)); 406 su_frames_val[frame / 3] = val; 407 } 408 409 seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); 410 411 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { 412 u32 su_blocks; 413 414 su_blocks = su_frames_val[frame / 3] & 415 PSR2_SU_STATUS_MASK(frame); 416 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); 417 seq_printf(m, "%d\t%d\n", frame, su_blocks); 418 } 419 } 420 421 unlock: 422 mutex_unlock(&psr->lock); 423 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 424 425 return 0; 426 } 427 428 static int 429 i915_edp_psr_debug_set(void *data, u64 val) 430 { 431 struct drm_i915_private *dev_priv = data; 432 intel_wakeref_t wakeref; 433 int ret; 434 435 if (!CAN_PSR(dev_priv)) 436 return -ENODEV; 437 438 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val); 439 440 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 441 442 ret = intel_psr_debug_set(dev_priv, val); 443 444 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 445 446 return ret; 447 } 448 449 static int 450 i915_edp_psr_debug_get(void *data, u64 *val) 451 { 452 struct drm_i915_private *dev_priv = data; 453 454 if (!CAN_PSR(dev_priv)) 455 return -ENODEV; 456 457 *val = READ_ONCE(dev_priv->psr.debug); 458 return 0; 459 } 460 461 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, 462 i915_edp_psr_debug_get, i915_edp_psr_debug_set, 463 "%llu\n"); 464 465 static int i915_power_domain_info(struct seq_file *m, void *unused) 466 { 467 struct drm_i915_private *dev_priv = node_to_i915(m->private); 468 struct i915_power_domains *power_domains = &dev_priv->power_domains; 469 int i; 470 471 mutex_lock(&power_domains->lock); 472 473 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 474 for (i = 0; i < power_domains->power_well_count; i++) { 475 struct i915_power_well *power_well; 476 enum intel_display_power_domain power_domain; 477 478 power_well = &power_domains->power_wells[i]; 479 seq_printf(m, "%-25s %d\n", power_well->desc->name, 480 power_well->count); 481 482 for_each_power_domain(power_domain, power_well->desc->domains) 483 seq_printf(m, " %-23s %d\n", 484 intel_display_power_domain_str(power_domain), 485 power_domains->domain_use_count[power_domain]); 486 } 487 488 mutex_unlock(&power_domains->lock); 489 490 return 0; 491 } 492 493 static int i915_dmc_info(struct seq_file *m, void *unused) 494 { 495 struct drm_i915_private *dev_priv = node_to_i915(m->private); 496 intel_wakeref_t wakeref; 497 struct intel_csr *csr; 498 i915_reg_t dc5_reg, dc6_reg = {}; 499 500 if (!HAS_CSR(dev_priv)) 501 return -ENODEV; 502 503 csr = &dev_priv->csr; 504 505 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 506 507 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); 508 seq_printf(m, "path: %s\n", csr->fw_path); 509 510 if (!csr->dmc_payload) 511 goto out; 512 513 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), 514 CSR_VERSION_MINOR(csr->version)); 515 516 if (INTEL_GEN(dev_priv) >= 12) { 517 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT; 518 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT; 519 /* 520 * NOTE: DMC_DEBUG3 is a general purpose reg. 521 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter 522 * reg for DC3CO debugging and validation, 523 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter. 524 */ 525 seq_printf(m, "DC3CO count: %d\n", 526 intel_de_read(dev_priv, DMC_DEBUG3)); 527 } else { 528 dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT : 529 SKL_CSR_DC3_DC5_COUNT; 530 if (!IS_GEN9_LP(dev_priv)) 531 dc6_reg = SKL_CSR_DC5_DC6_COUNT; 532 } 533 534 seq_printf(m, "DC3 -> DC5 count: %d\n", 535 intel_de_read(dev_priv, dc5_reg)); 536 if (dc6_reg.reg) 537 seq_printf(m, "DC5 -> DC6 count: %d\n", 538 intel_de_read(dev_priv, dc6_reg)); 539 540 out: 541 seq_printf(m, "program base: 0x%08x\n", 542 intel_de_read(dev_priv, CSR_PROGRAM(0))); 543 seq_printf(m, "ssp base: 0x%08x\n", 544 intel_de_read(dev_priv, CSR_SSP_BASE)); 545 seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL)); 546 547 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 548 549 return 0; 550 } 551 552 static void intel_seq_print_mode(struct seq_file *m, int tabs, 553 const struct drm_display_mode *mode) 554 { 555 int i; 556 557 for (i = 0; i < tabs; i++) 558 seq_putc(m, '\t'); 559 560 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); 561 } 562 563 static void intel_encoder_info(struct seq_file *m, 564 struct intel_crtc *crtc, 565 struct intel_encoder *encoder) 566 { 567 struct drm_i915_private *dev_priv = node_to_i915(m->private); 568 struct drm_connector_list_iter conn_iter; 569 struct drm_connector *connector; 570 571 seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n", 572 encoder->base.base.id, encoder->base.name); 573 574 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 575 drm_for_each_connector_iter(connector, &conn_iter) { 576 const struct drm_connector_state *conn_state = 577 connector->state; 578 579 if (conn_state->best_encoder != &encoder->base) 580 continue; 581 582 seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n", 583 connector->base.id, connector->name); 584 } 585 drm_connector_list_iter_end(&conn_iter); 586 } 587 588 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 589 { 590 const struct drm_display_mode *mode = panel->fixed_mode; 591 592 seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); 593 } 594 595 static void intel_hdcp_info(struct seq_file *m, 596 struct intel_connector *intel_connector) 597 { 598 bool hdcp_cap, hdcp2_cap; 599 600 hdcp_cap = intel_hdcp_capable(intel_connector); 601 hdcp2_cap = intel_hdcp2_capable(intel_connector); 602 603 if (hdcp_cap) 604 seq_puts(m, "HDCP1.4 "); 605 if (hdcp2_cap) 606 seq_puts(m, "HDCP2.2 "); 607 608 if (!hdcp_cap && !hdcp2_cap) 609 seq_puts(m, "None"); 610 611 seq_puts(m, "\n"); 612 } 613 614 static void intel_dp_info(struct seq_file *m, 615 struct intel_connector *intel_connector) 616 { 617 struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); 618 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 619 620 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 621 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); 622 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) 623 intel_panel_info(m, &intel_connector->panel); 624 625 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, 626 &intel_dp->aux); 627 if (intel_connector->hdcp.shim) { 628 seq_puts(m, "\tHDCP version: "); 629 intel_hdcp_info(m, intel_connector); 630 } 631 } 632 633 static void intel_dp_mst_info(struct seq_file *m, 634 struct intel_connector *intel_connector) 635 { 636 struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); 637 struct intel_dp_mst_encoder *intel_mst = 638 enc_to_mst(intel_encoder); 639 struct intel_digital_port *intel_dig_port = intel_mst->primary; 640 struct intel_dp *intel_dp = &intel_dig_port->dp; 641 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, 642 intel_connector->port); 643 644 seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); 645 } 646 647 static void intel_hdmi_info(struct seq_file *m, 648 struct intel_connector *intel_connector) 649 { 650 struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); 651 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder); 652 653 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); 654 if (intel_connector->hdcp.shim) { 655 seq_puts(m, "\tHDCP version: "); 656 intel_hdcp_info(m, intel_connector); 657 } 658 } 659 660 static void intel_lvds_info(struct seq_file *m, 661 struct intel_connector *intel_connector) 662 { 663 intel_panel_info(m, &intel_connector->panel); 664 } 665 666 static void intel_connector_info(struct seq_file *m, 667 struct drm_connector *connector) 668 { 669 struct intel_connector *intel_connector = to_intel_connector(connector); 670 const struct drm_connector_state *conn_state = connector->state; 671 struct intel_encoder *encoder = 672 to_intel_encoder(conn_state->best_encoder); 673 const struct drm_display_mode *mode; 674 675 seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n", 676 connector->base.id, connector->name, 677 drm_get_connector_status_name(connector->status)); 678 679 if (connector->status == connector_status_disconnected) 680 return; 681 682 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 683 connector->display_info.width_mm, 684 connector->display_info.height_mm); 685 seq_printf(m, "\tsubpixel order: %s\n", 686 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 687 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); 688 689 if (!encoder) 690 return; 691 692 switch (connector->connector_type) { 693 case DRM_MODE_CONNECTOR_DisplayPort: 694 case DRM_MODE_CONNECTOR_eDP: 695 if (encoder->type == INTEL_OUTPUT_DP_MST) 696 intel_dp_mst_info(m, intel_connector); 697 else 698 intel_dp_info(m, intel_connector); 699 break; 700 case DRM_MODE_CONNECTOR_LVDS: 701 if (encoder->type == INTEL_OUTPUT_LVDS) 702 intel_lvds_info(m, intel_connector); 703 break; 704 case DRM_MODE_CONNECTOR_HDMIA: 705 if (encoder->type == INTEL_OUTPUT_HDMI || 706 encoder->type == INTEL_OUTPUT_DDI) 707 intel_hdmi_info(m, intel_connector); 708 break; 709 default: 710 break; 711 } 712 713 seq_printf(m, "\tmodes:\n"); 714 list_for_each_entry(mode, &connector->modes, head) 715 intel_seq_print_mode(m, 2, mode); 716 } 717 718 static const char *plane_type(enum drm_plane_type type) 719 { 720 switch (type) { 721 case DRM_PLANE_TYPE_OVERLAY: 722 return "OVL"; 723 case DRM_PLANE_TYPE_PRIMARY: 724 return "PRI"; 725 case DRM_PLANE_TYPE_CURSOR: 726 return "CUR"; 727 /* 728 * Deliberately omitting default: to generate compiler warnings 729 * when a new drm_plane_type gets added. 730 */ 731 } 732 733 return "unknown"; 734 } 735 736 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation) 737 { 738 /* 739 * According to doc only one DRM_MODE_ROTATE_ is allowed but this 740 * will print them all to visualize if the values are misused 741 */ 742 snprintf(buf, bufsize, 743 "%s%s%s%s%s%s(0x%08x)", 744 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "", 745 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "", 746 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "", 747 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "", 748 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "", 749 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "", 750 rotation); 751 } 752 753 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane) 754 { 755 const struct intel_plane_state *plane_state = 756 to_intel_plane_state(plane->base.state); 757 const struct drm_framebuffer *fb = plane_state->uapi.fb; 758 struct drm_format_name_buf format_name; 759 struct drm_rect src, dst; 760 char rot_str[48]; 761 762 src = drm_plane_state_src(&plane_state->uapi); 763 dst = drm_plane_state_dest(&plane_state->uapi); 764 765 if (fb) 766 drm_get_format_name(fb->format->format, &format_name); 767 768 plane_rotation(rot_str, sizeof(rot_str), 769 plane_state->uapi.rotation); 770 771 seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", 772 fb ? fb->base.id : 0, fb ? format_name.str : "n/a", 773 fb ? fb->width : 0, fb ? fb->height : 0, 774 DRM_RECT_FP_ARG(&src), 775 DRM_RECT_ARG(&dst), 776 rot_str); 777 } 778 779 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane) 780 { 781 const struct intel_plane_state *plane_state = 782 to_intel_plane_state(plane->base.state); 783 const struct drm_framebuffer *fb = plane_state->hw.fb; 784 struct drm_format_name_buf format_name; 785 char rot_str[48]; 786 787 if (!fb) 788 return; 789 790 drm_get_format_name(fb->format->format, &format_name); 791 792 plane_rotation(rot_str, sizeof(rot_str), 793 plane_state->hw.rotation); 794 795 seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", 796 fb->base.id, format_name.str, 797 fb->width, fb->height, 798 yesno(plane_state->uapi.visible), 799 DRM_RECT_FP_ARG(&plane_state->uapi.src), 800 DRM_RECT_ARG(&plane_state->uapi.dst), 801 rot_str); 802 } 803 804 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc) 805 { 806 struct drm_i915_private *dev_priv = node_to_i915(m->private); 807 struct intel_plane *plane; 808 809 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 810 seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n", 811 plane->base.base.id, plane->base.name, 812 plane_type(plane->base.type)); 813 intel_plane_uapi_info(m, plane); 814 intel_plane_hw_info(m, plane); 815 } 816 } 817 818 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc) 819 { 820 const struct intel_crtc_state *crtc_state = 821 to_intel_crtc_state(crtc->base.state); 822 int num_scalers = crtc->num_scalers; 823 int i; 824 825 /* Not all platformas have a scaler */ 826 if (num_scalers) { 827 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", 828 num_scalers, 829 crtc_state->scaler_state.scaler_users, 830 crtc_state->scaler_state.scaler_id); 831 832 for (i = 0; i < num_scalers; i++) { 833 const struct intel_scaler *sc = 834 &crtc_state->scaler_state.scalers[i]; 835 836 seq_printf(m, ", scalers[%d]: use=%s, mode=%x", 837 i, yesno(sc->in_use), sc->mode); 838 } 839 seq_puts(m, "\n"); 840 } else { 841 seq_puts(m, "\tNo scalers available on this platform\n"); 842 } 843 } 844 845 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) 846 { 847 struct drm_i915_private *dev_priv = node_to_i915(m->private); 848 const struct intel_crtc_state *crtc_state = 849 to_intel_crtc_state(crtc->base.state); 850 struct intel_encoder *encoder; 851 852 seq_printf(m, "[CRTC:%d:%s]:\n", 853 crtc->base.base.id, crtc->base.name); 854 855 seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n", 856 yesno(crtc_state->uapi.enable), 857 yesno(crtc_state->uapi.active), 858 DRM_MODE_ARG(&crtc_state->uapi.mode)); 859 860 if (crtc_state->hw.enable) { 861 seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n", 862 yesno(crtc_state->hw.active), 863 DRM_MODE_ARG(&crtc_state->hw.adjusted_mode)); 864 865 seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n", 866 crtc_state->pipe_src_w, crtc_state->pipe_src_h, 867 yesno(crtc_state->dither), crtc_state->pipe_bpp); 868 869 intel_scaler_info(m, crtc); 870 } 871 872 for_each_intel_encoder_mask(&dev_priv->drm, encoder, 873 crtc_state->uapi.encoder_mask) 874 intel_encoder_info(m, crtc, encoder); 875 876 intel_plane_info(m, crtc); 877 878 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n", 879 yesno(!crtc->cpu_fifo_underrun_disabled), 880 yesno(!crtc->pch_fifo_underrun_disabled)); 881 } 882 883 static int i915_display_info(struct seq_file *m, void *unused) 884 { 885 struct drm_i915_private *dev_priv = node_to_i915(m->private); 886 struct drm_device *dev = &dev_priv->drm; 887 struct intel_crtc *crtc; 888 struct drm_connector *connector; 889 struct drm_connector_list_iter conn_iter; 890 intel_wakeref_t wakeref; 891 892 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 893 894 drm_modeset_lock_all(dev); 895 896 seq_printf(m, "CRTC info\n"); 897 seq_printf(m, "---------\n"); 898 for_each_intel_crtc(dev, crtc) 899 intel_crtc_info(m, crtc); 900 901 seq_printf(m, "\n"); 902 seq_printf(m, "Connector info\n"); 903 seq_printf(m, "--------------\n"); 904 drm_connector_list_iter_begin(dev, &conn_iter); 905 drm_for_each_connector_iter(connector, &conn_iter) 906 intel_connector_info(m, connector); 907 drm_connector_list_iter_end(&conn_iter); 908 909 drm_modeset_unlock_all(dev); 910 911 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 912 913 return 0; 914 } 915 916 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 917 { 918 struct drm_i915_private *dev_priv = node_to_i915(m->private); 919 struct drm_device *dev = &dev_priv->drm; 920 int i; 921 922 drm_modeset_lock_all(dev); 923 924 seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n", 925 dev_priv->dpll.ref_clks.nssc, 926 dev_priv->dpll.ref_clks.ssc); 927 928 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { 929 struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i]; 930 931 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name, 932 pll->info->id); 933 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", 934 pll->state.crtc_mask, pll->active_mask, yesno(pll->on)); 935 seq_printf(m, " tracked hardware state:\n"); 936 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); 937 seq_printf(m, " dpll_md: 0x%08x\n", 938 pll->state.hw_state.dpll_md); 939 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0); 940 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1); 941 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); 942 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0); 943 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1); 944 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n", 945 pll->state.hw_state.mg_refclkin_ctl); 946 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n", 947 pll->state.hw_state.mg_clktop2_coreclkctl1); 948 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n", 949 pll->state.hw_state.mg_clktop2_hsclkctl); 950 seq_printf(m, " mg_pll_div0: 0x%08x\n", 951 pll->state.hw_state.mg_pll_div0); 952 seq_printf(m, " mg_pll_div1: 0x%08x\n", 953 pll->state.hw_state.mg_pll_div1); 954 seq_printf(m, " mg_pll_lf: 0x%08x\n", 955 pll->state.hw_state.mg_pll_lf); 956 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n", 957 pll->state.hw_state.mg_pll_frac_lock); 958 seq_printf(m, " mg_pll_ssc: 0x%08x\n", 959 pll->state.hw_state.mg_pll_ssc); 960 seq_printf(m, " mg_pll_bias: 0x%08x\n", 961 pll->state.hw_state.mg_pll_bias); 962 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n", 963 pll->state.hw_state.mg_pll_tdc_coldst_bias); 964 } 965 drm_modeset_unlock_all(dev); 966 967 return 0; 968 } 969 970 static int i915_ipc_status_show(struct seq_file *m, void *data) 971 { 972 struct drm_i915_private *dev_priv = m->private; 973 974 seq_printf(m, "Isochronous Priority Control: %s\n", 975 yesno(dev_priv->ipc_enabled)); 976 return 0; 977 } 978 979 static int i915_ipc_status_open(struct inode *inode, struct file *file) 980 { 981 struct drm_i915_private *dev_priv = inode->i_private; 982 983 if (!HAS_IPC(dev_priv)) 984 return -ENODEV; 985 986 return single_open(file, i915_ipc_status_show, dev_priv); 987 } 988 989 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, 990 size_t len, loff_t *offp) 991 { 992 struct seq_file *m = file->private_data; 993 struct drm_i915_private *dev_priv = m->private; 994 intel_wakeref_t wakeref; 995 bool enable; 996 int ret; 997 998 ret = kstrtobool_from_user(ubuf, len, &enable); 999 if (ret < 0) 1000 return ret; 1001 1002 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { 1003 if (!dev_priv->ipc_enabled && enable) 1004 drm_info(&dev_priv->drm, 1005 "Enabling IPC: WM will be proper only after next commit\n"); 1006 dev_priv->wm.distrust_bios_wm = true; 1007 dev_priv->ipc_enabled = enable; 1008 intel_enable_ipc(dev_priv); 1009 } 1010 1011 return len; 1012 } 1013 1014 static const struct file_operations i915_ipc_status_fops = { 1015 .owner = THIS_MODULE, 1016 .open = i915_ipc_status_open, 1017 .read = seq_read, 1018 .llseek = seq_lseek, 1019 .release = single_release, 1020 .write = i915_ipc_status_write 1021 }; 1022 1023 static int i915_ddb_info(struct seq_file *m, void *unused) 1024 { 1025 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1026 struct drm_device *dev = &dev_priv->drm; 1027 struct skl_ddb_entry *entry; 1028 struct intel_crtc *crtc; 1029 1030 if (INTEL_GEN(dev_priv) < 9) 1031 return -ENODEV; 1032 1033 drm_modeset_lock_all(dev); 1034 1035 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 1036 1037 for_each_intel_crtc(&dev_priv->drm, crtc) { 1038 struct intel_crtc_state *crtc_state = 1039 to_intel_crtc_state(crtc->base.state); 1040 enum pipe pipe = crtc->pipe; 1041 enum plane_id plane_id; 1042 1043 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 1044 1045 for_each_plane_id_on_crtc(crtc, plane_id) { 1046 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id]; 1047 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1, 1048 entry->start, entry->end, 1049 skl_ddb_entry_size(entry)); 1050 } 1051 1052 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; 1053 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 1054 entry->end, skl_ddb_entry_size(entry)); 1055 } 1056 1057 drm_modeset_unlock_all(dev); 1058 1059 return 0; 1060 } 1061 1062 static void drrs_status_per_crtc(struct seq_file *m, 1063 struct drm_device *dev, 1064 struct intel_crtc *intel_crtc) 1065 { 1066 struct drm_i915_private *dev_priv = to_i915(dev); 1067 struct i915_drrs *drrs = &dev_priv->drrs; 1068 int vrefresh = 0; 1069 struct drm_connector *connector; 1070 struct drm_connector_list_iter conn_iter; 1071 1072 drm_connector_list_iter_begin(dev, &conn_iter); 1073 drm_for_each_connector_iter(connector, &conn_iter) { 1074 if (connector->state->crtc != &intel_crtc->base) 1075 continue; 1076 1077 seq_printf(m, "%s:\n", connector->name); 1078 } 1079 drm_connector_list_iter_end(&conn_iter); 1080 1081 seq_puts(m, "\n"); 1082 1083 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { 1084 struct intel_panel *panel; 1085 1086 mutex_lock(&drrs->mutex); 1087 /* DRRS Supported */ 1088 seq_puts(m, "\tDRRS Supported: Yes\n"); 1089 1090 /* disable_drrs() will make drrs->dp NULL */ 1091 if (!drrs->dp) { 1092 seq_puts(m, "Idleness DRRS: Disabled\n"); 1093 if (dev_priv->psr.enabled) 1094 seq_puts(m, 1095 "\tAs PSR is enabled, DRRS is not enabled\n"); 1096 mutex_unlock(&drrs->mutex); 1097 return; 1098 } 1099 1100 panel = &drrs->dp->attached_connector->panel; 1101 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", 1102 drrs->busy_frontbuffer_bits); 1103 1104 seq_puts(m, "\n\t\t"); 1105 if (drrs->refresh_rate_type == DRRS_HIGH_RR) { 1106 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); 1107 vrefresh = panel->fixed_mode->vrefresh; 1108 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { 1109 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); 1110 vrefresh = panel->downclock_mode->vrefresh; 1111 } else { 1112 seq_printf(m, "DRRS_State: Unknown(%d)\n", 1113 drrs->refresh_rate_type); 1114 mutex_unlock(&drrs->mutex); 1115 return; 1116 } 1117 seq_printf(m, "\t\tVrefresh: %d", vrefresh); 1118 1119 seq_puts(m, "\n\t\t"); 1120 mutex_unlock(&drrs->mutex); 1121 } else { 1122 /* DRRS not supported. Print the VBT parameter*/ 1123 seq_puts(m, "\tDRRS Supported : No"); 1124 } 1125 seq_puts(m, "\n"); 1126 } 1127 1128 static int i915_drrs_status(struct seq_file *m, void *unused) 1129 { 1130 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1131 struct drm_device *dev = &dev_priv->drm; 1132 struct intel_crtc *intel_crtc; 1133 int active_crtc_cnt = 0; 1134 1135 drm_modeset_lock_all(dev); 1136 for_each_intel_crtc(dev, intel_crtc) { 1137 if (intel_crtc->base.state->active) { 1138 active_crtc_cnt++; 1139 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 1140 1141 drrs_status_per_crtc(m, dev, intel_crtc); 1142 } 1143 } 1144 drm_modeset_unlock_all(dev); 1145 1146 if (!active_crtc_cnt) 1147 seq_puts(m, "No active crtc found\n"); 1148 1149 return 0; 1150 } 1151 1152 static int i915_dp_mst_info(struct seq_file *m, void *unused) 1153 { 1154 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1155 struct drm_device *dev = &dev_priv->drm; 1156 struct intel_encoder *intel_encoder; 1157 struct intel_digital_port *intel_dig_port; 1158 struct drm_connector *connector; 1159 struct drm_connector_list_iter conn_iter; 1160 1161 drm_connector_list_iter_begin(dev, &conn_iter); 1162 drm_for_each_connector_iter(connector, &conn_iter) { 1163 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 1164 continue; 1165 1166 intel_encoder = intel_attached_encoder(to_intel_connector(connector)); 1167 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) 1168 continue; 1169 1170 intel_dig_port = enc_to_dig_port(intel_encoder); 1171 if (!intel_dig_port->dp.can_mst) 1172 continue; 1173 1174 seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n", 1175 intel_dig_port->base.base.base.id, 1176 intel_dig_port->base.base.name); 1177 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 1178 } 1179 drm_connector_list_iter_end(&conn_iter); 1180 1181 return 0; 1182 } 1183 1184 static ssize_t i915_displayport_test_active_write(struct file *file, 1185 const char __user *ubuf, 1186 size_t len, loff_t *offp) 1187 { 1188 char *input_buffer; 1189 int status = 0; 1190 struct drm_device *dev; 1191 struct drm_connector *connector; 1192 struct drm_connector_list_iter conn_iter; 1193 struct intel_dp *intel_dp; 1194 int val = 0; 1195 1196 dev = ((struct seq_file *)file->private_data)->private; 1197 1198 if (len == 0) 1199 return 0; 1200 1201 input_buffer = memdup_user_nul(ubuf, len); 1202 if (IS_ERR(input_buffer)) 1203 return PTR_ERR(input_buffer); 1204 1205 drm_dbg(&to_i915(dev)->drm, 1206 "Copied %d bytes from user\n", (unsigned int)len); 1207 1208 drm_connector_list_iter_begin(dev, &conn_iter); 1209 drm_for_each_connector_iter(connector, &conn_iter) { 1210 struct intel_encoder *encoder; 1211 1212 if (connector->connector_type != 1213 DRM_MODE_CONNECTOR_DisplayPort) 1214 continue; 1215 1216 encoder = to_intel_encoder(connector->encoder); 1217 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1218 continue; 1219 1220 if (encoder && connector->status == connector_status_connected) { 1221 intel_dp = enc_to_intel_dp(encoder); 1222 status = kstrtoint(input_buffer, 10, &val); 1223 if (status < 0) 1224 break; 1225 drm_dbg(&to_i915(dev)->drm, 1226 "Got %d for test active\n", val); 1227 /* To prevent erroneous activation of the compliance 1228 * testing code, only accept an actual value of 1 here 1229 */ 1230 if (val == 1) 1231 intel_dp->compliance.test_active = true; 1232 else 1233 intel_dp->compliance.test_active = false; 1234 } 1235 } 1236 drm_connector_list_iter_end(&conn_iter); 1237 kfree(input_buffer); 1238 if (status < 0) 1239 return status; 1240 1241 *offp += len; 1242 return len; 1243 } 1244 1245 static int i915_displayport_test_active_show(struct seq_file *m, void *data) 1246 { 1247 struct drm_i915_private *dev_priv = m->private; 1248 struct drm_device *dev = &dev_priv->drm; 1249 struct drm_connector *connector; 1250 struct drm_connector_list_iter conn_iter; 1251 struct intel_dp *intel_dp; 1252 1253 drm_connector_list_iter_begin(dev, &conn_iter); 1254 drm_for_each_connector_iter(connector, &conn_iter) { 1255 struct intel_encoder *encoder; 1256 1257 if (connector->connector_type != 1258 DRM_MODE_CONNECTOR_DisplayPort) 1259 continue; 1260 1261 encoder = to_intel_encoder(connector->encoder); 1262 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1263 continue; 1264 1265 if (encoder && connector->status == connector_status_connected) { 1266 intel_dp = enc_to_intel_dp(encoder); 1267 if (intel_dp->compliance.test_active) 1268 seq_puts(m, "1"); 1269 else 1270 seq_puts(m, "0"); 1271 } else 1272 seq_puts(m, "0"); 1273 } 1274 drm_connector_list_iter_end(&conn_iter); 1275 1276 return 0; 1277 } 1278 1279 static int i915_displayport_test_active_open(struct inode *inode, 1280 struct file *file) 1281 { 1282 return single_open(file, i915_displayport_test_active_show, 1283 inode->i_private); 1284 } 1285 1286 static const struct file_operations i915_displayport_test_active_fops = { 1287 .owner = THIS_MODULE, 1288 .open = i915_displayport_test_active_open, 1289 .read = seq_read, 1290 .llseek = seq_lseek, 1291 .release = single_release, 1292 .write = i915_displayport_test_active_write 1293 }; 1294 1295 static int i915_displayport_test_data_show(struct seq_file *m, void *data) 1296 { 1297 struct drm_i915_private *dev_priv = m->private; 1298 struct drm_device *dev = &dev_priv->drm; 1299 struct drm_connector *connector; 1300 struct drm_connector_list_iter conn_iter; 1301 struct intel_dp *intel_dp; 1302 1303 drm_connector_list_iter_begin(dev, &conn_iter); 1304 drm_for_each_connector_iter(connector, &conn_iter) { 1305 struct intel_encoder *encoder; 1306 1307 if (connector->connector_type != 1308 DRM_MODE_CONNECTOR_DisplayPort) 1309 continue; 1310 1311 encoder = to_intel_encoder(connector->encoder); 1312 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1313 continue; 1314 1315 if (encoder && connector->status == connector_status_connected) { 1316 intel_dp = enc_to_intel_dp(encoder); 1317 if (intel_dp->compliance.test_type == 1318 DP_TEST_LINK_EDID_READ) 1319 seq_printf(m, "%lx", 1320 intel_dp->compliance.test_data.edid); 1321 else if (intel_dp->compliance.test_type == 1322 DP_TEST_LINK_VIDEO_PATTERN) { 1323 seq_printf(m, "hdisplay: %d\n", 1324 intel_dp->compliance.test_data.hdisplay); 1325 seq_printf(m, "vdisplay: %d\n", 1326 intel_dp->compliance.test_data.vdisplay); 1327 seq_printf(m, "bpc: %u\n", 1328 intel_dp->compliance.test_data.bpc); 1329 } 1330 } else 1331 seq_puts(m, "0"); 1332 } 1333 drm_connector_list_iter_end(&conn_iter); 1334 1335 return 0; 1336 } 1337 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data); 1338 1339 static int i915_displayport_test_type_show(struct seq_file *m, void *data) 1340 { 1341 struct drm_i915_private *dev_priv = m->private; 1342 struct drm_device *dev = &dev_priv->drm; 1343 struct drm_connector *connector; 1344 struct drm_connector_list_iter conn_iter; 1345 struct intel_dp *intel_dp; 1346 1347 drm_connector_list_iter_begin(dev, &conn_iter); 1348 drm_for_each_connector_iter(connector, &conn_iter) { 1349 struct intel_encoder *encoder; 1350 1351 if (connector->connector_type != 1352 DRM_MODE_CONNECTOR_DisplayPort) 1353 continue; 1354 1355 encoder = to_intel_encoder(connector->encoder); 1356 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1357 continue; 1358 1359 if (encoder && connector->status == connector_status_connected) { 1360 intel_dp = enc_to_intel_dp(encoder); 1361 seq_printf(m, "%02lx", intel_dp->compliance.test_type); 1362 } else 1363 seq_puts(m, "0"); 1364 } 1365 drm_connector_list_iter_end(&conn_iter); 1366 1367 return 0; 1368 } 1369 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); 1370 1371 static void wm_latency_show(struct seq_file *m, const u16 wm[8]) 1372 { 1373 struct drm_i915_private *dev_priv = m->private; 1374 struct drm_device *dev = &dev_priv->drm; 1375 int level; 1376 int num_levels; 1377 1378 if (IS_CHERRYVIEW(dev_priv)) 1379 num_levels = 3; 1380 else if (IS_VALLEYVIEW(dev_priv)) 1381 num_levels = 1; 1382 else if (IS_G4X(dev_priv)) 1383 num_levels = 3; 1384 else 1385 num_levels = ilk_wm_max_level(dev_priv) + 1; 1386 1387 drm_modeset_lock_all(dev); 1388 1389 for (level = 0; level < num_levels; level++) { 1390 unsigned int latency = wm[level]; 1391 1392 /* 1393 * - WM1+ latency values in 0.5us units 1394 * - latencies are in us on gen9/vlv/chv 1395 */ 1396 if (INTEL_GEN(dev_priv) >= 9 || 1397 IS_VALLEYVIEW(dev_priv) || 1398 IS_CHERRYVIEW(dev_priv) || 1399 IS_G4X(dev_priv)) 1400 latency *= 10; 1401 else if (level > 0) 1402 latency *= 5; 1403 1404 seq_printf(m, "WM%d %u (%u.%u usec)\n", 1405 level, wm[level], latency / 10, latency % 10); 1406 } 1407 1408 drm_modeset_unlock_all(dev); 1409 } 1410 1411 static int pri_wm_latency_show(struct seq_file *m, void *data) 1412 { 1413 struct drm_i915_private *dev_priv = m->private; 1414 const u16 *latencies; 1415 1416 if (INTEL_GEN(dev_priv) >= 9) 1417 latencies = dev_priv->wm.skl_latency; 1418 else 1419 latencies = dev_priv->wm.pri_latency; 1420 1421 wm_latency_show(m, latencies); 1422 1423 return 0; 1424 } 1425 1426 static int spr_wm_latency_show(struct seq_file *m, void *data) 1427 { 1428 struct drm_i915_private *dev_priv = m->private; 1429 const u16 *latencies; 1430 1431 if (INTEL_GEN(dev_priv) >= 9) 1432 latencies = dev_priv->wm.skl_latency; 1433 else 1434 latencies = dev_priv->wm.spr_latency; 1435 1436 wm_latency_show(m, latencies); 1437 1438 return 0; 1439 } 1440 1441 static int cur_wm_latency_show(struct seq_file *m, void *data) 1442 { 1443 struct drm_i915_private *dev_priv = m->private; 1444 const u16 *latencies; 1445 1446 if (INTEL_GEN(dev_priv) >= 9) 1447 latencies = dev_priv->wm.skl_latency; 1448 else 1449 latencies = dev_priv->wm.cur_latency; 1450 1451 wm_latency_show(m, latencies); 1452 1453 return 0; 1454 } 1455 1456 static int pri_wm_latency_open(struct inode *inode, struct file *file) 1457 { 1458 struct drm_i915_private *dev_priv = inode->i_private; 1459 1460 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 1461 return -ENODEV; 1462 1463 return single_open(file, pri_wm_latency_show, dev_priv); 1464 } 1465 1466 static int spr_wm_latency_open(struct inode *inode, struct file *file) 1467 { 1468 struct drm_i915_private *dev_priv = inode->i_private; 1469 1470 if (HAS_GMCH(dev_priv)) 1471 return -ENODEV; 1472 1473 return single_open(file, spr_wm_latency_show, dev_priv); 1474 } 1475 1476 static int cur_wm_latency_open(struct inode *inode, struct file *file) 1477 { 1478 struct drm_i915_private *dev_priv = inode->i_private; 1479 1480 if (HAS_GMCH(dev_priv)) 1481 return -ENODEV; 1482 1483 return single_open(file, cur_wm_latency_show, dev_priv); 1484 } 1485 1486 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 1487 size_t len, loff_t *offp, u16 wm[8]) 1488 { 1489 struct seq_file *m = file->private_data; 1490 struct drm_i915_private *dev_priv = m->private; 1491 struct drm_device *dev = &dev_priv->drm; 1492 u16 new[8] = { 0 }; 1493 int num_levels; 1494 int level; 1495 int ret; 1496 char tmp[32]; 1497 1498 if (IS_CHERRYVIEW(dev_priv)) 1499 num_levels = 3; 1500 else if (IS_VALLEYVIEW(dev_priv)) 1501 num_levels = 1; 1502 else if (IS_G4X(dev_priv)) 1503 num_levels = 3; 1504 else 1505 num_levels = ilk_wm_max_level(dev_priv) + 1; 1506 1507 if (len >= sizeof(tmp)) 1508 return -EINVAL; 1509 1510 if (copy_from_user(tmp, ubuf, len)) 1511 return -EFAULT; 1512 1513 tmp[len] = '\0'; 1514 1515 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 1516 &new[0], &new[1], &new[2], &new[3], 1517 &new[4], &new[5], &new[6], &new[7]); 1518 if (ret != num_levels) 1519 return -EINVAL; 1520 1521 drm_modeset_lock_all(dev); 1522 1523 for (level = 0; level < num_levels; level++) 1524 wm[level] = new[level]; 1525 1526 drm_modeset_unlock_all(dev); 1527 1528 return len; 1529 } 1530 1531 1532 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 1533 size_t len, loff_t *offp) 1534 { 1535 struct seq_file *m = file->private_data; 1536 struct drm_i915_private *dev_priv = m->private; 1537 u16 *latencies; 1538 1539 if (INTEL_GEN(dev_priv) >= 9) 1540 latencies = dev_priv->wm.skl_latency; 1541 else 1542 latencies = dev_priv->wm.pri_latency; 1543 1544 return wm_latency_write(file, ubuf, len, offp, latencies); 1545 } 1546 1547 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 1548 size_t len, loff_t *offp) 1549 { 1550 struct seq_file *m = file->private_data; 1551 struct drm_i915_private *dev_priv = m->private; 1552 u16 *latencies; 1553 1554 if (INTEL_GEN(dev_priv) >= 9) 1555 latencies = dev_priv->wm.skl_latency; 1556 else 1557 latencies = dev_priv->wm.spr_latency; 1558 1559 return wm_latency_write(file, ubuf, len, offp, latencies); 1560 } 1561 1562 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 1563 size_t len, loff_t *offp) 1564 { 1565 struct seq_file *m = file->private_data; 1566 struct drm_i915_private *dev_priv = m->private; 1567 u16 *latencies; 1568 1569 if (INTEL_GEN(dev_priv) >= 9) 1570 latencies = dev_priv->wm.skl_latency; 1571 else 1572 latencies = dev_priv->wm.cur_latency; 1573 1574 return wm_latency_write(file, ubuf, len, offp, latencies); 1575 } 1576 1577 static const struct file_operations i915_pri_wm_latency_fops = { 1578 .owner = THIS_MODULE, 1579 .open = pri_wm_latency_open, 1580 .read = seq_read, 1581 .llseek = seq_lseek, 1582 .release = single_release, 1583 .write = pri_wm_latency_write 1584 }; 1585 1586 static const struct file_operations i915_spr_wm_latency_fops = { 1587 .owner = THIS_MODULE, 1588 .open = spr_wm_latency_open, 1589 .read = seq_read, 1590 .llseek = seq_lseek, 1591 .release = single_release, 1592 .write = spr_wm_latency_write 1593 }; 1594 1595 static const struct file_operations i915_cur_wm_latency_fops = { 1596 .owner = THIS_MODULE, 1597 .open = cur_wm_latency_open, 1598 .read = seq_read, 1599 .llseek = seq_lseek, 1600 .release = single_release, 1601 .write = cur_wm_latency_write 1602 }; 1603 1604 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 1605 { 1606 struct drm_i915_private *dev_priv = m->private; 1607 struct i915_hotplug *hotplug = &dev_priv->hotplug; 1608 1609 /* Synchronize with everything first in case there's been an HPD 1610 * storm, but we haven't finished handling it in the kernel yet 1611 */ 1612 intel_synchronize_irq(dev_priv); 1613 flush_work(&dev_priv->hotplug.dig_port_work); 1614 flush_delayed_work(&dev_priv->hotplug.hotplug_work); 1615 1616 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 1617 seq_printf(m, "Detected: %s\n", 1618 yesno(delayed_work_pending(&hotplug->reenable_work))); 1619 1620 return 0; 1621 } 1622 1623 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 1624 const char __user *ubuf, size_t len, 1625 loff_t *offp) 1626 { 1627 struct seq_file *m = file->private_data; 1628 struct drm_i915_private *dev_priv = m->private; 1629 struct i915_hotplug *hotplug = &dev_priv->hotplug; 1630 unsigned int new_threshold; 1631 int i; 1632 char *newline; 1633 char tmp[16]; 1634 1635 if (len >= sizeof(tmp)) 1636 return -EINVAL; 1637 1638 if (copy_from_user(tmp, ubuf, len)) 1639 return -EFAULT; 1640 1641 tmp[len] = '\0'; 1642 1643 /* Strip newline, if any */ 1644 newline = strchr(tmp, '\n'); 1645 if (newline) 1646 *newline = '\0'; 1647 1648 if (strcmp(tmp, "reset") == 0) 1649 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 1650 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 1651 return -EINVAL; 1652 1653 if (new_threshold > 0) 1654 drm_dbg_kms(&dev_priv->drm, 1655 "Setting HPD storm detection threshold to %d\n", 1656 new_threshold); 1657 else 1658 drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n"); 1659 1660 spin_lock_irq(&dev_priv->irq_lock); 1661 hotplug->hpd_storm_threshold = new_threshold; 1662 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1663 for_each_hpd_pin(i) 1664 hotplug->stats[i].count = 0; 1665 spin_unlock_irq(&dev_priv->irq_lock); 1666 1667 /* Re-enable hpd immediately if we were in an irq storm */ 1668 flush_delayed_work(&dev_priv->hotplug.reenable_work); 1669 1670 return len; 1671 } 1672 1673 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 1674 { 1675 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 1676 } 1677 1678 static const struct file_operations i915_hpd_storm_ctl_fops = { 1679 .owner = THIS_MODULE, 1680 .open = i915_hpd_storm_ctl_open, 1681 .read = seq_read, 1682 .llseek = seq_lseek, 1683 .release = single_release, 1684 .write = i915_hpd_storm_ctl_write 1685 }; 1686 1687 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) 1688 { 1689 struct drm_i915_private *dev_priv = m->private; 1690 1691 seq_printf(m, "Enabled: %s\n", 1692 yesno(dev_priv->hotplug.hpd_short_storm_enabled)); 1693 1694 return 0; 1695 } 1696 1697 static int 1698 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) 1699 { 1700 return single_open(file, i915_hpd_short_storm_ctl_show, 1701 inode->i_private); 1702 } 1703 1704 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, 1705 const char __user *ubuf, 1706 size_t len, loff_t *offp) 1707 { 1708 struct seq_file *m = file->private_data; 1709 struct drm_i915_private *dev_priv = m->private; 1710 struct i915_hotplug *hotplug = &dev_priv->hotplug; 1711 char *newline; 1712 char tmp[16]; 1713 int i; 1714 bool new_state; 1715 1716 if (len >= sizeof(tmp)) 1717 return -EINVAL; 1718 1719 if (copy_from_user(tmp, ubuf, len)) 1720 return -EFAULT; 1721 1722 tmp[len] = '\0'; 1723 1724 /* Strip newline, if any */ 1725 newline = strchr(tmp, '\n'); 1726 if (newline) 1727 *newline = '\0'; 1728 1729 /* Reset to the "default" state for this system */ 1730 if (strcmp(tmp, "reset") == 0) 1731 new_state = !HAS_DP_MST(dev_priv); 1732 else if (kstrtobool(tmp, &new_state) != 0) 1733 return -EINVAL; 1734 1735 drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n", 1736 new_state ? "En" : "Dis"); 1737 1738 spin_lock_irq(&dev_priv->irq_lock); 1739 hotplug->hpd_short_storm_enabled = new_state; 1740 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1741 for_each_hpd_pin(i) 1742 hotplug->stats[i].count = 0; 1743 spin_unlock_irq(&dev_priv->irq_lock); 1744 1745 /* Re-enable hpd immediately if we were in an irq storm */ 1746 flush_delayed_work(&dev_priv->hotplug.reenable_work); 1747 1748 return len; 1749 } 1750 1751 static const struct file_operations i915_hpd_short_storm_ctl_fops = { 1752 .owner = THIS_MODULE, 1753 .open = i915_hpd_short_storm_ctl_open, 1754 .read = seq_read, 1755 .llseek = seq_lseek, 1756 .release = single_release, 1757 .write = i915_hpd_short_storm_ctl_write, 1758 }; 1759 1760 static int i915_drrs_ctl_set(void *data, u64 val) 1761 { 1762 struct drm_i915_private *dev_priv = data; 1763 struct drm_device *dev = &dev_priv->drm; 1764 struct intel_crtc *crtc; 1765 1766 if (INTEL_GEN(dev_priv) < 7) 1767 return -ENODEV; 1768 1769 for_each_intel_crtc(dev, crtc) { 1770 struct drm_connector_list_iter conn_iter; 1771 struct intel_crtc_state *crtc_state; 1772 struct drm_connector *connector; 1773 struct drm_crtc_commit *commit; 1774 int ret; 1775 1776 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); 1777 if (ret) 1778 return ret; 1779 1780 crtc_state = to_intel_crtc_state(crtc->base.state); 1781 1782 if (!crtc_state->hw.active || 1783 !crtc_state->has_drrs) 1784 goto out; 1785 1786 commit = crtc_state->uapi.commit; 1787 if (commit) { 1788 ret = wait_for_completion_interruptible(&commit->hw_done); 1789 if (ret) 1790 goto out; 1791 } 1792 1793 drm_connector_list_iter_begin(dev, &conn_iter); 1794 drm_for_each_connector_iter(connector, &conn_iter) { 1795 struct intel_encoder *encoder; 1796 struct intel_dp *intel_dp; 1797 1798 if (!(crtc_state->uapi.connector_mask & 1799 drm_connector_mask(connector))) 1800 continue; 1801 1802 encoder = intel_attached_encoder(to_intel_connector(connector)); 1803 if (encoder->type != INTEL_OUTPUT_EDP) 1804 continue; 1805 1806 drm_dbg(&dev_priv->drm, 1807 "Manually %sabling DRRS. %llu\n", 1808 val ? "en" : "dis", val); 1809 1810 intel_dp = enc_to_intel_dp(encoder); 1811 if (val) 1812 intel_edp_drrs_enable(intel_dp, 1813 crtc_state); 1814 else 1815 intel_edp_drrs_disable(intel_dp, 1816 crtc_state); 1817 } 1818 drm_connector_list_iter_end(&conn_iter); 1819 1820 out: 1821 drm_modeset_unlock(&crtc->base.mutex); 1822 if (ret) 1823 return ret; 1824 } 1825 1826 return 0; 1827 } 1828 1829 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n"); 1830 1831 static ssize_t 1832 i915_fifo_underrun_reset_write(struct file *filp, 1833 const char __user *ubuf, 1834 size_t cnt, loff_t *ppos) 1835 { 1836 struct drm_i915_private *dev_priv = filp->private_data; 1837 struct intel_crtc *intel_crtc; 1838 struct drm_device *dev = &dev_priv->drm; 1839 int ret; 1840 bool reset; 1841 1842 ret = kstrtobool_from_user(ubuf, cnt, &reset); 1843 if (ret) 1844 return ret; 1845 1846 if (!reset) 1847 return cnt; 1848 1849 for_each_intel_crtc(dev, intel_crtc) { 1850 struct drm_crtc_commit *commit; 1851 struct intel_crtc_state *crtc_state; 1852 1853 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex); 1854 if (ret) 1855 return ret; 1856 1857 crtc_state = to_intel_crtc_state(intel_crtc->base.state); 1858 commit = crtc_state->uapi.commit; 1859 if (commit) { 1860 ret = wait_for_completion_interruptible(&commit->hw_done); 1861 if (!ret) 1862 ret = wait_for_completion_interruptible(&commit->flip_done); 1863 } 1864 1865 if (!ret && crtc_state->hw.active) { 1866 drm_dbg_kms(&dev_priv->drm, 1867 "Re-arming FIFO underruns on pipe %c\n", 1868 pipe_name(intel_crtc->pipe)); 1869 1870 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state); 1871 } 1872 1873 drm_modeset_unlock(&intel_crtc->base.mutex); 1874 1875 if (ret) 1876 return ret; 1877 } 1878 1879 ret = intel_fbc_reset_underrun(dev_priv); 1880 if (ret) 1881 return ret; 1882 1883 return cnt; 1884 } 1885 1886 static const struct file_operations i915_fifo_underrun_reset_ops = { 1887 .owner = THIS_MODULE, 1888 .open = simple_open, 1889 .write = i915_fifo_underrun_reset_write, 1890 .llseek = default_llseek, 1891 }; 1892 1893 static const struct drm_info_list intel_display_debugfs_list[] = { 1894 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 1895 {"i915_fbc_status", i915_fbc_status, 0}, 1896 {"i915_ips_status", i915_ips_status, 0}, 1897 {"i915_sr_status", i915_sr_status, 0}, 1898 {"i915_opregion", i915_opregion, 0}, 1899 {"i915_vbt", i915_vbt, 0}, 1900 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 1901 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 1902 {"i915_power_domain_info", i915_power_domain_info, 0}, 1903 {"i915_dmc_info", i915_dmc_info, 0}, 1904 {"i915_display_info", i915_display_info, 0}, 1905 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 1906 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 1907 {"i915_ddb_info", i915_ddb_info, 0}, 1908 {"i915_drrs_status", i915_drrs_status, 0}, 1909 }; 1910 1911 static const struct { 1912 const char *name; 1913 const struct file_operations *fops; 1914 } intel_display_debugfs_files[] = { 1915 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, 1916 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 1917 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 1918 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 1919 {"i915_fbc_false_color", &i915_fbc_false_color_fops}, 1920 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 1921 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 1922 {"i915_dp_test_active", &i915_displayport_test_active_fops}, 1923 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, 1924 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops}, 1925 {"i915_ipc_status", &i915_ipc_status_fops}, 1926 {"i915_drrs_ctl", &i915_drrs_ctl_fops}, 1927 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}, 1928 }; 1929 1930 int intel_display_debugfs_register(struct drm_i915_private *i915) 1931 { 1932 struct drm_minor *minor = i915->drm.primary; 1933 int i; 1934 1935 for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) { 1936 debugfs_create_file(intel_display_debugfs_files[i].name, 1937 S_IRUGO | S_IWUSR, 1938 minor->debugfs_root, 1939 to_i915(minor->dev), 1940 intel_display_debugfs_files[i].fops); 1941 } 1942 1943 return drm_debugfs_create_files(intel_display_debugfs_list, 1944 ARRAY_SIZE(intel_display_debugfs_list), 1945 minor->debugfs_root, minor); 1946 } 1947 1948 static int i915_panel_show(struct seq_file *m, void *data) 1949 { 1950 struct drm_connector *connector = m->private; 1951 struct intel_dp *intel_dp = 1952 intel_attached_dp(to_intel_connector(connector)); 1953 1954 if (connector->status != connector_status_connected) 1955 return -ENODEV; 1956 1957 seq_printf(m, "Panel power up delay: %d\n", 1958 intel_dp->panel_power_up_delay); 1959 seq_printf(m, "Panel power down delay: %d\n", 1960 intel_dp->panel_power_down_delay); 1961 seq_printf(m, "Backlight on delay: %d\n", 1962 intel_dp->backlight_on_delay); 1963 seq_printf(m, "Backlight off delay: %d\n", 1964 intel_dp->backlight_off_delay); 1965 1966 return 0; 1967 } 1968 DEFINE_SHOW_ATTRIBUTE(i915_panel); 1969 1970 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) 1971 { 1972 struct drm_connector *connector = m->private; 1973 struct intel_connector *intel_connector = to_intel_connector(connector); 1974 1975 if (connector->status != connector_status_connected) 1976 return -ENODEV; 1977 1978 /* HDCP is supported by connector */ 1979 if (!intel_connector->hdcp.shim) 1980 return -EINVAL; 1981 1982 seq_printf(m, "%s:%d HDCP version: ", connector->name, 1983 connector->base.id); 1984 intel_hdcp_info(m, intel_connector); 1985 1986 return 0; 1987 } 1988 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); 1989 1990 static int i915_dsc_fec_support_show(struct seq_file *m, void *data) 1991 { 1992 struct drm_connector *connector = m->private; 1993 struct drm_device *dev = connector->dev; 1994 struct drm_crtc *crtc; 1995 struct intel_dp *intel_dp; 1996 struct drm_modeset_acquire_ctx ctx; 1997 struct intel_crtc_state *crtc_state = NULL; 1998 int ret = 0; 1999 bool try_again = false; 2000 2001 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 2002 2003 do { 2004 try_again = false; 2005 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, 2006 &ctx); 2007 if (ret) { 2008 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) { 2009 try_again = true; 2010 continue; 2011 } 2012 break; 2013 } 2014 crtc = connector->state->crtc; 2015 if (connector->status != connector_status_connected || !crtc) { 2016 ret = -ENODEV; 2017 break; 2018 } 2019 ret = drm_modeset_lock(&crtc->mutex, &ctx); 2020 if (ret == -EDEADLK) { 2021 ret = drm_modeset_backoff(&ctx); 2022 if (!ret) { 2023 try_again = true; 2024 continue; 2025 } 2026 break; 2027 } else if (ret) { 2028 break; 2029 } 2030 intel_dp = intel_attached_dp(to_intel_connector(connector)); 2031 crtc_state = to_intel_crtc_state(crtc->state); 2032 seq_printf(m, "DSC_Enabled: %s\n", 2033 yesno(crtc_state->dsc.compression_enable)); 2034 seq_printf(m, "DSC_Sink_Support: %s\n", 2035 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd))); 2036 seq_printf(m, "Force_DSC_Enable: %s\n", 2037 yesno(intel_dp->force_dsc_en)); 2038 if (!intel_dp_is_edp(intel_dp)) 2039 seq_printf(m, "FEC_Sink_Support: %s\n", 2040 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable))); 2041 } while (try_again); 2042 2043 drm_modeset_drop_locks(&ctx); 2044 drm_modeset_acquire_fini(&ctx); 2045 2046 return ret; 2047 } 2048 2049 static ssize_t i915_dsc_fec_support_write(struct file *file, 2050 const char __user *ubuf, 2051 size_t len, loff_t *offp) 2052 { 2053 bool dsc_enable = false; 2054 int ret; 2055 struct drm_connector *connector = 2056 ((struct seq_file *)file->private_data)->private; 2057 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 2058 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2059 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2060 2061 if (len == 0) 2062 return 0; 2063 2064 drm_dbg(&i915->drm, 2065 "Copied %zu bytes from user to force DSC\n", len); 2066 2067 ret = kstrtobool_from_user(ubuf, len, &dsc_enable); 2068 if (ret < 0) 2069 return ret; 2070 2071 drm_dbg(&i915->drm, "Got %s for DSC Enable\n", 2072 (dsc_enable) ? "true" : "false"); 2073 intel_dp->force_dsc_en = dsc_enable; 2074 2075 *offp += len; 2076 return len; 2077 } 2078 2079 static int i915_dsc_fec_support_open(struct inode *inode, 2080 struct file *file) 2081 { 2082 return single_open(file, i915_dsc_fec_support_show, 2083 inode->i_private); 2084 } 2085 2086 static const struct file_operations i915_dsc_fec_support_fops = { 2087 .owner = THIS_MODULE, 2088 .open = i915_dsc_fec_support_open, 2089 .read = seq_read, 2090 .llseek = seq_lseek, 2091 .release = single_release, 2092 .write = i915_dsc_fec_support_write 2093 }; 2094 2095 /** 2096 * intel_connector_debugfs_add - add i915 specific connector debugfs files 2097 * @connector: pointer to a registered drm_connector 2098 * 2099 * Cleanup will be done by drm_connector_unregister() through a call to 2100 * drm_debugfs_connector_remove(). 2101 * 2102 * Returns 0 on success, negative error codes on error. 2103 */ 2104 int intel_connector_debugfs_add(struct drm_connector *connector) 2105 { 2106 struct dentry *root = connector->debugfs_entry; 2107 struct drm_i915_private *dev_priv = to_i915(connector->dev); 2108 2109 /* The connector must have been registered beforehands. */ 2110 if (!root) 2111 return -ENODEV; 2112 2113 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 2114 debugfs_create_file("i915_panel_timings", S_IRUGO, root, 2115 connector, &i915_panel_fops); 2116 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root, 2117 connector, &i915_psr_sink_status_fops); 2118 } 2119 2120 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 2121 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || 2122 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { 2123 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root, 2124 connector, &i915_hdcp_sink_capability_fops); 2125 } 2126 2127 if (INTEL_GEN(dev_priv) >= 10 && 2128 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 2129 connector->connector_type == DRM_MODE_CONNECTOR_eDP)) 2130 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root, 2131 connector, &i915_dsc_fec_support_fops); 2132 2133 return 0; 2134 } 2135