1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <drm/drm_atomic_helper.h> 25 26 #include "display/intel_dp.h" 27 28 #include "i915_drv.h" 29 #include "intel_drv.h" 30 #include "intel_psr.h" 31 #include "intel_sprite.h" 32 33 /** 34 * DOC: Panel Self Refresh (PSR/SRD) 35 * 36 * Since Haswell Display controller supports Panel Self-Refresh on display 37 * panels witch have a remote frame buffer (RFB) implemented according to PSR 38 * spec in eDP1.3. PSR feature allows the display to go to lower standby states 39 * when system is idle but display is on as it eliminates display refresh 40 * request to DDR memory completely as long as the frame buffer for that 41 * display is unchanged. 42 * 43 * Panel Self Refresh must be supported by both Hardware (source) and 44 * Panel (sink). 45 * 46 * PSR saves power by caching the framebuffer in the panel RFB, which allows us 47 * to power down the link and memory controller. For DSI panels the same idea 48 * is called "manual mode". 49 * 50 * The implementation uses the hardware-based PSR support which automatically 51 * enters/exits self-refresh mode. The hardware takes care of sending the 52 * required DP aux message and could even retrain the link (that part isn't 53 * enabled yet though). The hardware also keeps track of any frontbuffer 54 * changes to know when to exit self-refresh mode again. Unfortunately that 55 * part doesn't work too well, hence why the i915 PSR support uses the 56 * software frontbuffer tracking to make sure it doesn't miss a screen 57 * update. For this integration intel_psr_invalidate() and intel_psr_flush() 58 * get called by the frontbuffer tracking code. Note that because of locking 59 * issues the self-refresh re-enable code is done from a work queue, which 60 * must be correctly synchronized/cancelled when shutting down the pipe." 61 */ 62 63 static bool psr_global_enabled(u32 debug) 64 { 65 switch (debug & I915_PSR_DEBUG_MODE_MASK) { 66 case I915_PSR_DEBUG_DEFAULT: 67 return i915_modparams.enable_psr; 68 case I915_PSR_DEBUG_DISABLE: 69 return false; 70 default: 71 return true; 72 } 73 } 74 75 static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, 76 const struct intel_crtc_state *crtc_state) 77 { 78 /* Cannot enable DSC and PSR2 simultaneously */ 79 WARN_ON(crtc_state->dsc_params.compression_enable && 80 crtc_state->has_psr2); 81 82 switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { 83 case I915_PSR_DEBUG_DISABLE: 84 case I915_PSR_DEBUG_FORCE_PSR1: 85 return false; 86 default: 87 return crtc_state->has_psr2; 88 } 89 } 90 91 static int edp_psr_shift(enum transcoder cpu_transcoder) 92 { 93 switch (cpu_transcoder) { 94 case TRANSCODER_A: 95 return EDP_PSR_TRANSCODER_A_SHIFT; 96 case TRANSCODER_B: 97 return EDP_PSR_TRANSCODER_B_SHIFT; 98 case TRANSCODER_C: 99 return EDP_PSR_TRANSCODER_C_SHIFT; 100 default: 101 MISSING_CASE(cpu_transcoder); 102 /* fallthrough */ 103 case TRANSCODER_EDP: 104 return EDP_PSR_TRANSCODER_EDP_SHIFT; 105 } 106 } 107 108 void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug) 109 { 110 u32 debug_mask, mask; 111 enum transcoder cpu_transcoder; 112 u32 transcoders = BIT(TRANSCODER_EDP); 113 114 if (INTEL_GEN(dev_priv) >= 8) 115 transcoders |= BIT(TRANSCODER_A) | 116 BIT(TRANSCODER_B) | 117 BIT(TRANSCODER_C); 118 119 debug_mask = 0; 120 mask = 0; 121 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { 122 int shift = edp_psr_shift(cpu_transcoder); 123 124 mask |= EDP_PSR_ERROR(shift); 125 debug_mask |= EDP_PSR_POST_EXIT(shift) | 126 EDP_PSR_PRE_ENTRY(shift); 127 } 128 129 if (debug & I915_PSR_DEBUG_IRQ) 130 mask |= debug_mask; 131 132 I915_WRITE(EDP_PSR_IMR, ~mask); 133 } 134 135 static void psr_event_print(u32 val, bool psr2_enabled) 136 { 137 DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val); 138 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE) 139 DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n"); 140 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled) 141 DRM_DEBUG_KMS("\tPSR2 disabled\n"); 142 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN) 143 DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n"); 144 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN) 145 DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n"); 146 if (val & PSR_EVENT_GRAPHICS_RESET) 147 DRM_DEBUG_KMS("\tGraphics reset\n"); 148 if (val & PSR_EVENT_PCH_INTERRUPT) 149 DRM_DEBUG_KMS("\tPCH interrupt\n"); 150 if (val & PSR_EVENT_MEMORY_UP) 151 DRM_DEBUG_KMS("\tMemory up\n"); 152 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY) 153 DRM_DEBUG_KMS("\tFront buffer modification\n"); 154 if (val & PSR_EVENT_WD_TIMER_EXPIRE) 155 DRM_DEBUG_KMS("\tPSR watchdog timer expired\n"); 156 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE) 157 DRM_DEBUG_KMS("\tPIPE registers updated\n"); 158 if (val & PSR_EVENT_REGISTER_UPDATE) 159 DRM_DEBUG_KMS("\tRegister updated\n"); 160 if (val & PSR_EVENT_HDCP_ENABLE) 161 DRM_DEBUG_KMS("\tHDCP enabled\n"); 162 if (val & PSR_EVENT_KVMR_SESSION_ENABLE) 163 DRM_DEBUG_KMS("\tKVMR session enabled\n"); 164 if (val & PSR_EVENT_VBI_ENABLE) 165 DRM_DEBUG_KMS("\tVBI enabled\n"); 166 if (val & PSR_EVENT_LPSP_MODE_EXIT) 167 DRM_DEBUG_KMS("\tLPSP mode exited\n"); 168 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled) 169 DRM_DEBUG_KMS("\tPSR disabled\n"); 170 } 171 172 void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) 173 { 174 u32 transcoders = BIT(TRANSCODER_EDP); 175 enum transcoder cpu_transcoder; 176 ktime_t time_ns = ktime_get(); 177 u32 mask = 0; 178 179 if (INTEL_GEN(dev_priv) >= 8) 180 transcoders |= BIT(TRANSCODER_A) | 181 BIT(TRANSCODER_B) | 182 BIT(TRANSCODER_C); 183 184 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { 185 int shift = edp_psr_shift(cpu_transcoder); 186 187 if (psr_iir & EDP_PSR_ERROR(shift)) { 188 DRM_WARN("[transcoder %s] PSR aux error\n", 189 transcoder_name(cpu_transcoder)); 190 191 dev_priv->psr.irq_aux_error = true; 192 193 /* 194 * If this interruption is not masked it will keep 195 * interrupting so fast that it prevents the scheduled 196 * work to run. 197 * Also after a PSR error, we don't want to arm PSR 198 * again so we don't care about unmask the interruption 199 * or unset irq_aux_error. 200 */ 201 mask |= EDP_PSR_ERROR(shift); 202 } 203 204 if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) { 205 dev_priv->psr.last_entry_attempt = time_ns; 206 DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", 207 transcoder_name(cpu_transcoder)); 208 } 209 210 if (psr_iir & EDP_PSR_POST_EXIT(shift)) { 211 dev_priv->psr.last_exit = time_ns; 212 DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", 213 transcoder_name(cpu_transcoder)); 214 215 if (INTEL_GEN(dev_priv) >= 9) { 216 u32 val = I915_READ(PSR_EVENT(cpu_transcoder)); 217 bool psr2_enabled = dev_priv->psr.psr2_enabled; 218 219 I915_WRITE(PSR_EVENT(cpu_transcoder), val); 220 psr_event_print(val, psr2_enabled); 221 } 222 } 223 } 224 225 if (mask) { 226 mask |= I915_READ(EDP_PSR_IMR); 227 I915_WRITE(EDP_PSR_IMR, mask); 228 229 schedule_work(&dev_priv->psr.work); 230 } 231 } 232 233 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) 234 { 235 u8 alpm_caps = 0; 236 237 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, 238 &alpm_caps) != 1) 239 return false; 240 return alpm_caps & DP_ALPM_CAP; 241 } 242 243 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) 244 { 245 u8 val = 8; /* assume the worst if we can't read the value */ 246 247 if (drm_dp_dpcd_readb(&intel_dp->aux, 248 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) 249 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; 250 else 251 DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n"); 252 return val; 253 } 254 255 static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp) 256 { 257 u16 val; 258 ssize_t r; 259 260 /* 261 * Returning the default X granularity if granularity not required or 262 * if DPCD read fails 263 */ 264 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) 265 return 4; 266 267 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2); 268 if (r != 2) 269 DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n"); 270 271 /* 272 * Spec says that if the value read is 0 the default granularity should 273 * be used instead. 274 */ 275 if (r != 2 || val == 0) 276 val = 4; 277 278 return val; 279 } 280 281 void intel_psr_init_dpcd(struct intel_dp *intel_dp) 282 { 283 struct drm_i915_private *dev_priv = 284 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 285 286 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, 287 sizeof(intel_dp->psr_dpcd)); 288 289 if (!intel_dp->psr_dpcd[0]) 290 return; 291 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", 292 intel_dp->psr_dpcd[0]); 293 294 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { 295 DRM_DEBUG_KMS("PSR support not currently available for this panel\n"); 296 return; 297 } 298 299 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { 300 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); 301 return; 302 } 303 304 dev_priv->psr.sink_support = true; 305 dev_priv->psr.sink_sync_latency = 306 intel_dp_get_sink_sync_latency(intel_dp); 307 308 WARN_ON(dev_priv->psr.dp); 309 dev_priv->psr.dp = intel_dp; 310 311 if (INTEL_GEN(dev_priv) >= 9 && 312 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { 313 bool y_req = intel_dp->psr_dpcd[1] & 314 DP_PSR2_SU_Y_COORDINATE_REQUIRED; 315 bool alpm = intel_dp_get_alpm_status(intel_dp); 316 317 /* 318 * All panels that supports PSR version 03h (PSR2 + 319 * Y-coordinate) can handle Y-coordinates in VSC but we are 320 * only sure that it is going to be used when required by the 321 * panel. This way panel is capable to do selective update 322 * without a aux frame sync. 323 * 324 * To support PSR version 02h and PSR version 03h without 325 * Y-coordinate requirement panels we would need to enable 326 * GTC first. 327 */ 328 dev_priv->psr.sink_psr2_support = y_req && alpm; 329 DRM_DEBUG_KMS("PSR2 %ssupported\n", 330 dev_priv->psr.sink_psr2_support ? "" : "not "); 331 332 if (dev_priv->psr.sink_psr2_support) { 333 dev_priv->psr.colorimetry_support = 334 intel_dp_get_colorimetry_status(intel_dp); 335 dev_priv->psr.su_x_granularity = 336 intel_dp_get_su_x_granulartiy(intel_dp); 337 } 338 } 339 } 340 341 static void intel_psr_setup_vsc(struct intel_dp *intel_dp, 342 const struct intel_crtc_state *crtc_state) 343 { 344 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 345 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 346 struct dp_sdp psr_vsc; 347 348 if (dev_priv->psr.psr2_enabled) { 349 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */ 350 memset(&psr_vsc, 0, sizeof(psr_vsc)); 351 psr_vsc.sdp_header.HB0 = 0; 352 psr_vsc.sdp_header.HB1 = 0x7; 353 if (dev_priv->psr.colorimetry_support) { 354 psr_vsc.sdp_header.HB2 = 0x5; 355 psr_vsc.sdp_header.HB3 = 0x13; 356 } else { 357 psr_vsc.sdp_header.HB2 = 0x4; 358 psr_vsc.sdp_header.HB3 = 0xe; 359 } 360 } else { 361 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ 362 memset(&psr_vsc, 0, sizeof(psr_vsc)); 363 psr_vsc.sdp_header.HB0 = 0; 364 psr_vsc.sdp_header.HB1 = 0x7; 365 psr_vsc.sdp_header.HB2 = 0x2; 366 psr_vsc.sdp_header.HB3 = 0x8; 367 } 368 369 intel_dig_port->write_infoframe(&intel_dig_port->base, 370 crtc_state, 371 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc)); 372 } 373 374 static void hsw_psr_setup_aux(struct intel_dp *intel_dp) 375 { 376 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 377 u32 aux_clock_divider, aux_ctl; 378 int i; 379 static const u8 aux_msg[] = { 380 [0] = DP_AUX_NATIVE_WRITE << 4, 381 [1] = DP_SET_POWER >> 8, 382 [2] = DP_SET_POWER & 0xff, 383 [3] = 1 - 1, 384 [4] = DP_SET_POWER_D0, 385 }; 386 u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK | 387 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | 388 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | 389 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; 390 391 BUILD_BUG_ON(sizeof(aux_msg) > 20); 392 for (i = 0; i < sizeof(aux_msg); i += 4) 393 I915_WRITE(EDP_PSR_AUX_DATA(i >> 2), 394 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); 395 396 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); 397 398 /* Start with bits set for DDI_AUX_CTL register */ 399 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), 400 aux_clock_divider); 401 402 /* Select only valid bits for SRD_AUX_CTL */ 403 aux_ctl &= psr_aux_mask; 404 I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl); 405 } 406 407 static void intel_psr_enable_sink(struct intel_dp *intel_dp) 408 { 409 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 410 u8 dpcd_val = DP_PSR_ENABLE; 411 412 /* Enable ALPM at sink for psr2 */ 413 if (dev_priv->psr.psr2_enabled) { 414 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 415 DP_ALPM_ENABLE); 416 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; 417 } else { 418 if (dev_priv->psr.link_standby) 419 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; 420 421 if (INTEL_GEN(dev_priv) >= 8) 422 dpcd_val |= DP_PSR_CRC_VERIFICATION; 423 } 424 425 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); 426 427 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 428 } 429 430 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) 431 { 432 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 433 u32 val = 0; 434 435 if (INTEL_GEN(dev_priv) >= 11) 436 val |= EDP_PSR_TP4_TIME_0US; 437 438 if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) 439 val |= EDP_PSR_TP1_TIME_0us; 440 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) 441 val |= EDP_PSR_TP1_TIME_100us; 442 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500) 443 val |= EDP_PSR_TP1_TIME_500us; 444 else 445 val |= EDP_PSR_TP1_TIME_2500us; 446 447 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0) 448 val |= EDP_PSR_TP2_TP3_TIME_0us; 449 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) 450 val |= EDP_PSR_TP2_TP3_TIME_100us; 451 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) 452 val |= EDP_PSR_TP2_TP3_TIME_500us; 453 else 454 val |= EDP_PSR_TP2_TP3_TIME_2500us; 455 456 if (intel_dp_source_supports_hbr2(intel_dp) && 457 drm_dp_tps3_supported(intel_dp->dpcd)) 458 val |= EDP_PSR_TP1_TP3_SEL; 459 else 460 val |= EDP_PSR_TP1_TP2_SEL; 461 462 return val; 463 } 464 465 static void hsw_activate_psr1(struct intel_dp *intel_dp) 466 { 467 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 468 u32 max_sleep_time = 0x1f; 469 u32 val = EDP_PSR_ENABLE; 470 471 /* Let's use 6 as the minimum to cover all known cases including the 472 * off-by-one issue that HW has in some cases. 473 */ 474 int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 475 476 /* sink_sync_latency of 8 means source has to wait for more than 8 477 * frames, we'll go with 9 frames for now 478 */ 479 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); 480 val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; 481 482 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; 483 if (IS_HASWELL(dev_priv)) 484 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; 485 486 if (dev_priv->psr.link_standby) 487 val |= EDP_PSR_LINK_STANDBY; 488 489 val |= intel_psr1_get_tp_time(intel_dp); 490 491 if (INTEL_GEN(dev_priv) >= 8) 492 val |= EDP_PSR_CRC_ENABLE; 493 494 val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK; 495 I915_WRITE(EDP_PSR_CTL, val); 496 } 497 498 static void hsw_activate_psr2(struct intel_dp *intel_dp) 499 { 500 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 501 u32 val; 502 503 /* Let's use 6 as the minimum to cover all known cases including the 504 * off-by-one issue that HW has in some cases. 505 */ 506 int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 507 508 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); 509 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT; 510 511 val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; 512 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 513 val |= EDP_Y_COORDINATE_ENABLE; 514 515 val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); 516 517 if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && 518 dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) 519 val |= EDP_PSR2_TP2_TIME_50us; 520 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100) 521 val |= EDP_PSR2_TP2_TIME_100us; 522 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500) 523 val |= EDP_PSR2_TP2_TIME_500us; 524 else 525 val |= EDP_PSR2_TP2_TIME_2500us; 526 527 /* 528 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is 529 * recommending keep this bit unset while PSR2 is enabled. 530 */ 531 I915_WRITE(EDP_PSR_CTL, 0); 532 533 I915_WRITE(EDP_PSR2_CTL, val); 534 } 535 536 static bool intel_psr2_config_valid(struct intel_dp *intel_dp, 537 struct intel_crtc_state *crtc_state) 538 { 539 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 540 int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay; 541 int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay; 542 int psr_max_h = 0, psr_max_v = 0; 543 544 if (!dev_priv->psr.sink_psr2_support) 545 return false; 546 547 /* 548 * DSC and PSR2 cannot be enabled simultaneously. If a requested 549 * resolution requires DSC to be enabled, priority is given to DSC 550 * over PSR2. 551 */ 552 if (crtc_state->dsc_params.compression_enable) { 553 DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n"); 554 return false; 555 } 556 557 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 558 psr_max_h = 4096; 559 psr_max_v = 2304; 560 } else if (IS_GEN(dev_priv, 9)) { 561 psr_max_h = 3640; 562 psr_max_v = 2304; 563 } 564 565 if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) { 566 DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", 567 crtc_hdisplay, crtc_vdisplay, 568 psr_max_h, psr_max_v); 569 return false; 570 } 571 572 /* 573 * HW sends SU blocks of size four scan lines, which means the starting 574 * X coordinate and Y granularity requirements will always be met. We 575 * only need to validate the SU block width is a multiple of 576 * x granularity. 577 */ 578 if (crtc_hdisplay % dev_priv->psr.su_x_granularity) { 579 DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n", 580 crtc_hdisplay, dev_priv->psr.su_x_granularity); 581 return false; 582 } 583 584 if (crtc_state->crc_enabled) { 585 DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n"); 586 return false; 587 } 588 589 return true; 590 } 591 592 void intel_psr_compute_config(struct intel_dp *intel_dp, 593 struct intel_crtc_state *crtc_state) 594 { 595 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 596 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 597 const struct drm_display_mode *adjusted_mode = 598 &crtc_state->base.adjusted_mode; 599 int psr_setup_time; 600 601 if (!CAN_PSR(dev_priv)) 602 return; 603 604 if (intel_dp != dev_priv->psr.dp) 605 return; 606 607 /* 608 * HSW spec explicitly says PSR is tied to port A. 609 * BDW+ platforms with DDI implementation of PSR have different 610 * PSR registers per transcoder and we only implement transcoder EDP 611 * ones. Since by Display design transcoder EDP is tied to port A 612 * we can safely escape based on the port A. 613 */ 614 if (dig_port->base.port != PORT_A) { 615 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); 616 return; 617 } 618 619 if (dev_priv->psr.sink_not_reliable) { 620 DRM_DEBUG_KMS("PSR sink implementation is not reliable\n"); 621 return; 622 } 623 624 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 625 DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n"); 626 return; 627 } 628 629 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); 630 if (psr_setup_time < 0) { 631 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n", 632 intel_dp->psr_dpcd[1]); 633 return; 634 } 635 636 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > 637 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { 638 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n", 639 psr_setup_time); 640 return; 641 } 642 643 crtc_state->has_psr = true; 644 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); 645 } 646 647 static void intel_psr_activate(struct intel_dp *intel_dp) 648 { 649 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 650 651 if (INTEL_GEN(dev_priv) >= 9) 652 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); 653 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); 654 WARN_ON(dev_priv->psr.active); 655 lockdep_assert_held(&dev_priv->psr.lock); 656 657 /* psr1 and psr2 are mutually exclusive.*/ 658 if (dev_priv->psr.psr2_enabled) 659 hsw_activate_psr2(intel_dp); 660 else 661 hsw_activate_psr1(intel_dp); 662 663 dev_priv->psr.active = true; 664 } 665 666 static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv, 667 enum transcoder cpu_transcoder) 668 { 669 static const i915_reg_t regs[] = { 670 [TRANSCODER_A] = CHICKEN_TRANS_A, 671 [TRANSCODER_B] = CHICKEN_TRANS_B, 672 [TRANSCODER_C] = CHICKEN_TRANS_C, 673 [TRANSCODER_EDP] = CHICKEN_TRANS_EDP, 674 }; 675 676 WARN_ON(INTEL_GEN(dev_priv) < 9); 677 678 if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) || 679 !regs[cpu_transcoder].reg)) 680 cpu_transcoder = TRANSCODER_A; 681 682 return regs[cpu_transcoder]; 683 } 684 685 static void intel_psr_enable_source(struct intel_dp *intel_dp, 686 const struct intel_crtc_state *crtc_state) 687 { 688 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 689 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 690 u32 mask; 691 692 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ 693 * use hardcoded values PSR AUX transactions 694 */ 695 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 696 hsw_psr_setup_aux(intel_dp); 697 698 if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) && 699 !IS_GEMINILAKE(dev_priv))) { 700 i915_reg_t reg = gen9_chicken_trans_reg(dev_priv, 701 cpu_transcoder); 702 u32 chicken = I915_READ(reg); 703 704 chicken |= PSR2_VSC_ENABLE_PROG_HEADER | 705 PSR2_ADD_VERTICAL_LINE_COUNT; 706 I915_WRITE(reg, chicken); 707 } 708 709 /* 710 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also 711 * mask LPSP to avoid dependency on other drivers that might block 712 * runtime_pm besides preventing other hw tracking issues now we 713 * can rely on frontbuffer tracking. 714 */ 715 mask = EDP_PSR_DEBUG_MASK_MEMUP | 716 EDP_PSR_DEBUG_MASK_HPD | 717 EDP_PSR_DEBUG_MASK_LPSP | 718 EDP_PSR_DEBUG_MASK_MAX_SLEEP; 719 720 if (INTEL_GEN(dev_priv) < 11) 721 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; 722 723 I915_WRITE(EDP_PSR_DEBUG, mask); 724 } 725 726 static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, 727 const struct intel_crtc_state *crtc_state) 728 { 729 struct intel_dp *intel_dp = dev_priv->psr.dp; 730 731 WARN_ON(dev_priv->psr.enabled); 732 733 dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); 734 dev_priv->psr.busy_frontbuffer_bits = 0; 735 dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; 736 737 DRM_DEBUG_KMS("Enabling PSR%s\n", 738 dev_priv->psr.psr2_enabled ? "2" : "1"); 739 intel_psr_setup_vsc(intel_dp, crtc_state); 740 intel_psr_enable_sink(intel_dp); 741 intel_psr_enable_source(intel_dp, crtc_state); 742 dev_priv->psr.enabled = true; 743 744 intel_psr_activate(intel_dp); 745 } 746 747 /** 748 * intel_psr_enable - Enable PSR 749 * @intel_dp: Intel DP 750 * @crtc_state: new CRTC state 751 * 752 * This function can only be called after the pipe is fully trained and enabled. 753 */ 754 void intel_psr_enable(struct intel_dp *intel_dp, 755 const struct intel_crtc_state *crtc_state) 756 { 757 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 758 759 if (!crtc_state->has_psr) 760 return; 761 762 if (WARN_ON(!CAN_PSR(dev_priv))) 763 return; 764 765 WARN_ON(dev_priv->drrs.dp); 766 767 mutex_lock(&dev_priv->psr.lock); 768 769 if (!psr_global_enabled(dev_priv->psr.debug)) { 770 DRM_DEBUG_KMS("PSR disabled by flag\n"); 771 goto unlock; 772 } 773 774 intel_psr_enable_locked(dev_priv, crtc_state); 775 776 unlock: 777 mutex_unlock(&dev_priv->psr.lock); 778 } 779 780 static void intel_psr_exit(struct drm_i915_private *dev_priv) 781 { 782 u32 val; 783 784 if (!dev_priv->psr.active) { 785 if (INTEL_GEN(dev_priv) >= 9) 786 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); 787 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); 788 return; 789 } 790 791 if (dev_priv->psr.psr2_enabled) { 792 val = I915_READ(EDP_PSR2_CTL); 793 WARN_ON(!(val & EDP_PSR2_ENABLE)); 794 I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); 795 } else { 796 val = I915_READ(EDP_PSR_CTL); 797 WARN_ON(!(val & EDP_PSR_ENABLE)); 798 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); 799 } 800 dev_priv->psr.active = false; 801 } 802 803 static void intel_psr_disable_locked(struct intel_dp *intel_dp) 804 { 805 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 806 i915_reg_t psr_status; 807 u32 psr_status_mask; 808 809 lockdep_assert_held(&dev_priv->psr.lock); 810 811 if (!dev_priv->psr.enabled) 812 return; 813 814 DRM_DEBUG_KMS("Disabling PSR%s\n", 815 dev_priv->psr.psr2_enabled ? "2" : "1"); 816 817 intel_psr_exit(dev_priv); 818 819 if (dev_priv->psr.psr2_enabled) { 820 psr_status = EDP_PSR2_STATUS; 821 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; 822 } else { 823 psr_status = EDP_PSR_STATUS; 824 psr_status_mask = EDP_PSR_STATUS_STATE_MASK; 825 } 826 827 /* Wait till PSR is idle */ 828 if (intel_wait_for_register(&dev_priv->uncore, 829 psr_status, psr_status_mask, 0, 2000)) 830 DRM_ERROR("Timed out waiting PSR idle state\n"); 831 832 /* Disable PSR on Sink */ 833 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); 834 835 dev_priv->psr.enabled = false; 836 } 837 838 /** 839 * intel_psr_disable - Disable PSR 840 * @intel_dp: Intel DP 841 * @old_crtc_state: old CRTC state 842 * 843 * This function needs to be called before disabling pipe. 844 */ 845 void intel_psr_disable(struct intel_dp *intel_dp, 846 const struct intel_crtc_state *old_crtc_state) 847 { 848 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 849 850 if (!old_crtc_state->has_psr) 851 return; 852 853 if (WARN_ON(!CAN_PSR(dev_priv))) 854 return; 855 856 mutex_lock(&dev_priv->psr.lock); 857 858 intel_psr_disable_locked(intel_dp); 859 860 mutex_unlock(&dev_priv->psr.lock); 861 cancel_work_sync(&dev_priv->psr.work); 862 } 863 864 static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) 865 { 866 if (INTEL_GEN(dev_priv) >= 9) 867 /* 868 * Display WA #0884: skl+ 869 * This documented WA for bxt can be safely applied 870 * broadly so we can force HW tracking to exit PSR 871 * instead of disabling and re-enabling. 872 * Workaround tells us to write 0 to CUR_SURFLIVE_A, 873 * but it makes more sense write to the current active 874 * pipe. 875 */ 876 I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0); 877 else 878 /* 879 * A write to CURSURFLIVE do not cause HW tracking to exit PSR 880 * on older gens so doing the manual exit instead. 881 */ 882 intel_psr_exit(dev_priv); 883 } 884 885 /** 886 * intel_psr_update - Update PSR state 887 * @intel_dp: Intel DP 888 * @crtc_state: new CRTC state 889 * 890 * This functions will update PSR states, disabling, enabling or switching PSR 891 * version when executing fastsets. For full modeset, intel_psr_disable() and 892 * intel_psr_enable() should be called instead. 893 */ 894 void intel_psr_update(struct intel_dp *intel_dp, 895 const struct intel_crtc_state *crtc_state) 896 { 897 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 898 struct i915_psr *psr = &dev_priv->psr; 899 bool enable, psr2_enable; 900 901 if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp) 902 return; 903 904 mutex_lock(&dev_priv->psr.lock); 905 906 enable = crtc_state->has_psr && psr_global_enabled(psr->debug); 907 psr2_enable = intel_psr2_enabled(dev_priv, crtc_state); 908 909 if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) { 910 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ 911 if (crtc_state->crc_enabled && psr->enabled) 912 psr_force_hw_tracking_exit(dev_priv); 913 else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) { 914 /* 915 * Activate PSR again after a force exit when enabling 916 * CRC in older gens 917 */ 918 if (!dev_priv->psr.active && 919 !dev_priv->psr.busy_frontbuffer_bits) 920 schedule_work(&dev_priv->psr.work); 921 } 922 923 goto unlock; 924 } 925 926 if (psr->enabled) 927 intel_psr_disable_locked(intel_dp); 928 929 if (enable) 930 intel_psr_enable_locked(dev_priv, crtc_state); 931 932 unlock: 933 mutex_unlock(&dev_priv->psr.lock); 934 } 935 936 /** 937 * intel_psr_wait_for_idle - wait for PSR1 to idle 938 * @new_crtc_state: new CRTC state 939 * @out_value: PSR status in case of failure 940 * 941 * This function is expected to be called from pipe_update_start() where it is 942 * not expected to race with PSR enable or disable. 943 * 944 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle. 945 */ 946 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, 947 u32 *out_value) 948 { 949 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 950 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 951 952 if (!dev_priv->psr.enabled || !new_crtc_state->has_psr) 953 return 0; 954 955 /* FIXME: Update this for PSR2 if we need to wait for idle */ 956 if (READ_ONCE(dev_priv->psr.psr2_enabled)) 957 return 0; 958 959 /* 960 * From bspec: Panel Self Refresh (BDW+) 961 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of 962 * exit training time + 1.5 ms of aux channel handshake. 50 ms is 963 * defensive enough to cover everything. 964 */ 965 966 return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS, 967 EDP_PSR_STATUS_STATE_MASK, 968 EDP_PSR_STATUS_STATE_IDLE, 2, 50, 969 out_value); 970 } 971 972 static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) 973 { 974 i915_reg_t reg; 975 u32 mask; 976 int err; 977 978 if (!dev_priv->psr.enabled) 979 return false; 980 981 if (dev_priv->psr.psr2_enabled) { 982 reg = EDP_PSR2_STATUS; 983 mask = EDP_PSR2_STATUS_STATE_MASK; 984 } else { 985 reg = EDP_PSR_STATUS; 986 mask = EDP_PSR_STATUS_STATE_MASK; 987 } 988 989 mutex_unlock(&dev_priv->psr.lock); 990 991 err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50); 992 if (err) 993 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); 994 995 /* After the unlocked wait, verify that PSR is still wanted! */ 996 mutex_lock(&dev_priv->psr.lock); 997 return err == 0 && dev_priv->psr.enabled; 998 } 999 1000 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) 1001 { 1002 struct drm_device *dev = &dev_priv->drm; 1003 struct drm_modeset_acquire_ctx ctx; 1004 struct drm_atomic_state *state; 1005 struct drm_crtc *crtc; 1006 int err; 1007 1008 state = drm_atomic_state_alloc(dev); 1009 if (!state) 1010 return -ENOMEM; 1011 1012 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 1013 state->acquire_ctx = &ctx; 1014 1015 retry: 1016 drm_for_each_crtc(crtc, dev) { 1017 struct drm_crtc_state *crtc_state; 1018 struct intel_crtc_state *intel_crtc_state; 1019 1020 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1021 if (IS_ERR(crtc_state)) { 1022 err = PTR_ERR(crtc_state); 1023 goto error; 1024 } 1025 1026 intel_crtc_state = to_intel_crtc_state(crtc_state); 1027 1028 if (crtc_state->active && intel_crtc_state->has_psr) { 1029 /* Mark mode as changed to trigger a pipe->update() */ 1030 crtc_state->mode_changed = true; 1031 break; 1032 } 1033 } 1034 1035 err = drm_atomic_commit(state); 1036 1037 error: 1038 if (err == -EDEADLK) { 1039 drm_atomic_state_clear(state); 1040 err = drm_modeset_backoff(&ctx); 1041 if (!err) 1042 goto retry; 1043 } 1044 1045 drm_modeset_drop_locks(&ctx); 1046 drm_modeset_acquire_fini(&ctx); 1047 drm_atomic_state_put(state); 1048 1049 return err; 1050 } 1051 1052 int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val) 1053 { 1054 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK; 1055 u32 old_mode; 1056 int ret; 1057 1058 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) || 1059 mode > I915_PSR_DEBUG_FORCE_PSR1) { 1060 DRM_DEBUG_KMS("Invalid debug mask %llx\n", val); 1061 return -EINVAL; 1062 } 1063 1064 ret = mutex_lock_interruptible(&dev_priv->psr.lock); 1065 if (ret) 1066 return ret; 1067 1068 old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK; 1069 dev_priv->psr.debug = val; 1070 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 1071 1072 mutex_unlock(&dev_priv->psr.lock); 1073 1074 if (old_mode != mode) 1075 ret = intel_psr_fastset_force(dev_priv); 1076 1077 return ret; 1078 } 1079 1080 static void intel_psr_handle_irq(struct drm_i915_private *dev_priv) 1081 { 1082 struct i915_psr *psr = &dev_priv->psr; 1083 1084 intel_psr_disable_locked(psr->dp); 1085 psr->sink_not_reliable = true; 1086 /* let's make sure that sink is awaken */ 1087 drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 1088 } 1089 1090 static void intel_psr_work(struct work_struct *work) 1091 { 1092 struct drm_i915_private *dev_priv = 1093 container_of(work, typeof(*dev_priv), psr.work); 1094 1095 mutex_lock(&dev_priv->psr.lock); 1096 1097 if (!dev_priv->psr.enabled) 1098 goto unlock; 1099 1100 if (READ_ONCE(dev_priv->psr.irq_aux_error)) 1101 intel_psr_handle_irq(dev_priv); 1102 1103 /* 1104 * We have to make sure PSR is ready for re-enable 1105 * otherwise it keeps disabled until next full enable/disable cycle. 1106 * PSR might take some time to get fully disabled 1107 * and be ready for re-enable. 1108 */ 1109 if (!__psr_wait_for_idle_locked(dev_priv)) 1110 goto unlock; 1111 1112 /* 1113 * The delayed work can race with an invalidate hence we need to 1114 * recheck. Since psr_flush first clears this and then reschedules we 1115 * won't ever miss a flush when bailing out here. 1116 */ 1117 if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active) 1118 goto unlock; 1119 1120 intel_psr_activate(dev_priv->psr.dp); 1121 unlock: 1122 mutex_unlock(&dev_priv->psr.lock); 1123 } 1124 1125 /** 1126 * intel_psr_invalidate - Invalidade PSR 1127 * @dev_priv: i915 device 1128 * @frontbuffer_bits: frontbuffer plane tracking bits 1129 * @origin: which operation caused the invalidate 1130 * 1131 * Since the hardware frontbuffer tracking has gaps we need to integrate 1132 * with the software frontbuffer tracking. This function gets called every 1133 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be 1134 * disabled if the frontbuffer mask contains a buffer relevant to PSR. 1135 * 1136 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." 1137 */ 1138 void intel_psr_invalidate(struct drm_i915_private *dev_priv, 1139 unsigned frontbuffer_bits, enum fb_op_origin origin) 1140 { 1141 if (!CAN_PSR(dev_priv)) 1142 return; 1143 1144 if (origin == ORIGIN_FLIP) 1145 return; 1146 1147 mutex_lock(&dev_priv->psr.lock); 1148 if (!dev_priv->psr.enabled) { 1149 mutex_unlock(&dev_priv->psr.lock); 1150 return; 1151 } 1152 1153 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); 1154 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; 1155 1156 if (frontbuffer_bits) 1157 intel_psr_exit(dev_priv); 1158 1159 mutex_unlock(&dev_priv->psr.lock); 1160 } 1161 1162 /** 1163 * intel_psr_flush - Flush PSR 1164 * @dev_priv: i915 device 1165 * @frontbuffer_bits: frontbuffer plane tracking bits 1166 * @origin: which operation caused the flush 1167 * 1168 * Since the hardware frontbuffer tracking has gaps we need to integrate 1169 * with the software frontbuffer tracking. This function gets called every 1170 * time frontbuffer rendering has completed and flushed out to memory. PSR 1171 * can be enabled again if no other frontbuffer relevant to PSR is dirty. 1172 * 1173 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. 1174 */ 1175 void intel_psr_flush(struct drm_i915_private *dev_priv, 1176 unsigned frontbuffer_bits, enum fb_op_origin origin) 1177 { 1178 if (!CAN_PSR(dev_priv)) 1179 return; 1180 1181 if (origin == ORIGIN_FLIP) 1182 return; 1183 1184 mutex_lock(&dev_priv->psr.lock); 1185 if (!dev_priv->psr.enabled) { 1186 mutex_unlock(&dev_priv->psr.lock); 1187 return; 1188 } 1189 1190 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); 1191 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; 1192 1193 /* By definition flush = invalidate + flush */ 1194 if (frontbuffer_bits) 1195 psr_force_hw_tracking_exit(dev_priv); 1196 1197 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) 1198 schedule_work(&dev_priv->psr.work); 1199 mutex_unlock(&dev_priv->psr.lock); 1200 } 1201 1202 /** 1203 * intel_psr_init - Init basic PSR work and mutex. 1204 * @dev_priv: i915 device private 1205 * 1206 * This function is called only once at driver load to initialize basic 1207 * PSR stuff. 1208 */ 1209 void intel_psr_init(struct drm_i915_private *dev_priv) 1210 { 1211 u32 val; 1212 1213 if (!HAS_PSR(dev_priv)) 1214 return; 1215 1216 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? 1217 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; 1218 1219 if (!dev_priv->psr.sink_support) 1220 return; 1221 1222 if (i915_modparams.enable_psr == -1) 1223 if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable) 1224 i915_modparams.enable_psr = 0; 1225 1226 /* 1227 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR 1228 * will still keep the error set even after the reset done in the 1229 * irq_preinstall and irq_uninstall hooks. 1230 * And enabling in this situation cause the screen to freeze in the 1231 * first time that PSR HW tries to activate so lets keep PSR disabled 1232 * to avoid any rendering problems. 1233 */ 1234 val = I915_READ(EDP_PSR_IIR); 1235 val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP)); 1236 if (val) { 1237 DRM_DEBUG_KMS("PSR interruption error set\n"); 1238 dev_priv->psr.sink_not_reliable = true; 1239 } 1240 1241 /* Set link_standby x link_off defaults */ 1242 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1243 /* HSW and BDW require workarounds that we don't implement. */ 1244 dev_priv->psr.link_standby = false; 1245 else 1246 /* For new platforms let's respect VBT back again */ 1247 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; 1248 1249 INIT_WORK(&dev_priv->psr.work, intel_psr_work); 1250 mutex_init(&dev_priv->psr.lock); 1251 } 1252 1253 void intel_psr_short_pulse(struct intel_dp *intel_dp) 1254 { 1255 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1256 struct i915_psr *psr = &dev_priv->psr; 1257 u8 val; 1258 const u8 errors = DP_PSR_RFB_STORAGE_ERROR | 1259 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | 1260 DP_PSR_LINK_CRC_ERROR; 1261 1262 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) 1263 return; 1264 1265 mutex_lock(&psr->lock); 1266 1267 if (!psr->enabled || psr->dp != intel_dp) 1268 goto exit; 1269 1270 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) { 1271 DRM_ERROR("PSR_STATUS dpcd read failed\n"); 1272 goto exit; 1273 } 1274 1275 if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) { 1276 DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n"); 1277 intel_psr_disable_locked(intel_dp); 1278 psr->sink_not_reliable = true; 1279 } 1280 1281 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) { 1282 DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n"); 1283 goto exit; 1284 } 1285 1286 if (val & DP_PSR_RFB_STORAGE_ERROR) 1287 DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n"); 1288 if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) 1289 DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n"); 1290 if (val & DP_PSR_LINK_CRC_ERROR) 1291 DRM_ERROR("PSR Link CRC error, disabling PSR\n"); 1292 1293 if (val & ~errors) 1294 DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n", 1295 val & ~errors); 1296 if (val & errors) { 1297 intel_psr_disable_locked(intel_dp); 1298 psr->sink_not_reliable = true; 1299 } 1300 /* clear status register */ 1301 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val); 1302 exit: 1303 mutex_unlock(&psr->lock); 1304 } 1305 1306 bool intel_psr_enabled(struct intel_dp *intel_dp) 1307 { 1308 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1309 bool ret; 1310 1311 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) 1312 return false; 1313 1314 mutex_lock(&dev_priv->psr.lock); 1315 ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled); 1316 mutex_unlock(&dev_priv->psr.lock); 1317 1318 return ret; 1319 } 1320