1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <drm/drm_atomic_helper.h> 25 26 #include "display/intel_dp.h" 27 28 #include "i915_drv.h" 29 #include "intel_atomic.h" 30 #include "intel_display_types.h" 31 #include "intel_psr.h" 32 #include "intel_sprite.h" 33 34 /** 35 * DOC: Panel Self Refresh (PSR/SRD) 36 * 37 * Since Haswell Display controller supports Panel Self-Refresh on display 38 * panels witch have a remote frame buffer (RFB) implemented according to PSR 39 * spec in eDP1.3. PSR feature allows the display to go to lower standby states 40 * when system is idle but display is on as it eliminates display refresh 41 * request to DDR memory completely as long as the frame buffer for that 42 * display is unchanged. 43 * 44 * Panel Self Refresh must be supported by both Hardware (source) and 45 * Panel (sink). 46 * 47 * PSR saves power by caching the framebuffer in the panel RFB, which allows us 48 * to power down the link and memory controller. For DSI panels the same idea 49 * is called "manual mode". 50 * 51 * The implementation uses the hardware-based PSR support which automatically 52 * enters/exits self-refresh mode. The hardware takes care of sending the 53 * required DP aux message and could even retrain the link (that part isn't 54 * enabled yet though). The hardware also keeps track of any frontbuffer 55 * changes to know when to exit self-refresh mode again. Unfortunately that 56 * part doesn't work too well, hence why the i915 PSR support uses the 57 * software frontbuffer tracking to make sure it doesn't miss a screen 58 * update. For this integration intel_psr_invalidate() and intel_psr_flush() 59 * get called by the frontbuffer tracking code. Note that because of locking 60 * issues the self-refresh re-enable code is done from a work queue, which 61 * must be correctly synchronized/cancelled when shutting down the pipe." 62 */ 63 64 static bool psr_global_enabled(u32 debug) 65 { 66 switch (debug & I915_PSR_DEBUG_MODE_MASK) { 67 case I915_PSR_DEBUG_DEFAULT: 68 return i915_modparams.enable_psr; 69 case I915_PSR_DEBUG_DISABLE: 70 return false; 71 default: 72 return true; 73 } 74 } 75 76 static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, 77 const struct intel_crtc_state *crtc_state) 78 { 79 /* Cannot enable DSC and PSR2 simultaneously */ 80 WARN_ON(crtc_state->dsc.compression_enable && 81 crtc_state->has_psr2); 82 83 switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { 84 case I915_PSR_DEBUG_DISABLE: 85 case I915_PSR_DEBUG_FORCE_PSR1: 86 return false; 87 default: 88 return crtc_state->has_psr2; 89 } 90 } 91 92 static void psr_irq_control(struct drm_i915_private *dev_priv) 93 { 94 enum transcoder trans_shift; 95 u32 mask, val; 96 i915_reg_t imr_reg; 97 98 /* 99 * gen12+ has registers relative to transcoder and one per transcoder 100 * using the same bit definition: handle it as TRANSCODER_EDP to force 101 * 0 shift in bit definition 102 */ 103 if (INTEL_GEN(dev_priv) >= 12) { 104 trans_shift = 0; 105 imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder); 106 } else { 107 trans_shift = dev_priv->psr.transcoder; 108 imr_reg = EDP_PSR_IMR; 109 } 110 111 mask = EDP_PSR_ERROR(trans_shift); 112 if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ) 113 mask |= EDP_PSR_POST_EXIT(trans_shift) | 114 EDP_PSR_PRE_ENTRY(trans_shift); 115 116 /* Warning: it is masking/setting reserved bits too */ 117 val = I915_READ(imr_reg); 118 val &= ~EDP_PSR_TRANS_MASK(trans_shift); 119 val |= ~mask; 120 I915_WRITE(imr_reg, val); 121 } 122 123 static void psr_event_print(u32 val, bool psr2_enabled) 124 { 125 DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val); 126 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE) 127 DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n"); 128 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled) 129 DRM_DEBUG_KMS("\tPSR2 disabled\n"); 130 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN) 131 DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n"); 132 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN) 133 DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n"); 134 if (val & PSR_EVENT_GRAPHICS_RESET) 135 DRM_DEBUG_KMS("\tGraphics reset\n"); 136 if (val & PSR_EVENT_PCH_INTERRUPT) 137 DRM_DEBUG_KMS("\tPCH interrupt\n"); 138 if (val & PSR_EVENT_MEMORY_UP) 139 DRM_DEBUG_KMS("\tMemory up\n"); 140 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY) 141 DRM_DEBUG_KMS("\tFront buffer modification\n"); 142 if (val & PSR_EVENT_WD_TIMER_EXPIRE) 143 DRM_DEBUG_KMS("\tPSR watchdog timer expired\n"); 144 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE) 145 DRM_DEBUG_KMS("\tPIPE registers updated\n"); 146 if (val & PSR_EVENT_REGISTER_UPDATE) 147 DRM_DEBUG_KMS("\tRegister updated\n"); 148 if (val & PSR_EVENT_HDCP_ENABLE) 149 DRM_DEBUG_KMS("\tHDCP enabled\n"); 150 if (val & PSR_EVENT_KVMR_SESSION_ENABLE) 151 DRM_DEBUG_KMS("\tKVMR session enabled\n"); 152 if (val & PSR_EVENT_VBI_ENABLE) 153 DRM_DEBUG_KMS("\tVBI enabled\n"); 154 if (val & PSR_EVENT_LPSP_MODE_EXIT) 155 DRM_DEBUG_KMS("\tLPSP mode exited\n"); 156 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled) 157 DRM_DEBUG_KMS("\tPSR disabled\n"); 158 } 159 160 void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) 161 { 162 enum transcoder cpu_transcoder = dev_priv->psr.transcoder; 163 enum transcoder trans_shift; 164 i915_reg_t imr_reg; 165 ktime_t time_ns = ktime_get(); 166 167 if (INTEL_GEN(dev_priv) >= 12) { 168 trans_shift = 0; 169 imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder); 170 } else { 171 trans_shift = dev_priv->psr.transcoder; 172 imr_reg = EDP_PSR_IMR; 173 } 174 175 if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) { 176 dev_priv->psr.last_entry_attempt = time_ns; 177 DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", 178 transcoder_name(cpu_transcoder)); 179 } 180 181 if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) { 182 dev_priv->psr.last_exit = time_ns; 183 DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", 184 transcoder_name(cpu_transcoder)); 185 186 if (INTEL_GEN(dev_priv) >= 9) { 187 u32 val = I915_READ(PSR_EVENT(cpu_transcoder)); 188 bool psr2_enabled = dev_priv->psr.psr2_enabled; 189 190 I915_WRITE(PSR_EVENT(cpu_transcoder), val); 191 psr_event_print(val, psr2_enabled); 192 } 193 } 194 195 if (psr_iir & EDP_PSR_ERROR(trans_shift)) { 196 u32 val; 197 198 DRM_WARN("[transcoder %s] PSR aux error\n", 199 transcoder_name(cpu_transcoder)); 200 201 dev_priv->psr.irq_aux_error = true; 202 203 /* 204 * If this interruption is not masked it will keep 205 * interrupting so fast that it prevents the scheduled 206 * work to run. 207 * Also after a PSR error, we don't want to arm PSR 208 * again so we don't care about unmask the interruption 209 * or unset irq_aux_error. 210 */ 211 val = I915_READ(imr_reg); 212 val |= EDP_PSR_ERROR(trans_shift); 213 I915_WRITE(imr_reg, val); 214 215 schedule_work(&dev_priv->psr.work); 216 } 217 } 218 219 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) 220 { 221 u8 alpm_caps = 0; 222 223 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, 224 &alpm_caps) != 1) 225 return false; 226 return alpm_caps & DP_ALPM_CAP; 227 } 228 229 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) 230 { 231 u8 val = 8; /* assume the worst if we can't read the value */ 232 233 if (drm_dp_dpcd_readb(&intel_dp->aux, 234 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) 235 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; 236 else 237 DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n"); 238 return val; 239 } 240 241 static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp) 242 { 243 u16 val; 244 ssize_t r; 245 246 /* 247 * Returning the default X granularity if granularity not required or 248 * if DPCD read fails 249 */ 250 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) 251 return 4; 252 253 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2); 254 if (r != 2) 255 DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n"); 256 257 /* 258 * Spec says that if the value read is 0 the default granularity should 259 * be used instead. 260 */ 261 if (r != 2 || val == 0) 262 val = 4; 263 264 return val; 265 } 266 267 void intel_psr_init_dpcd(struct intel_dp *intel_dp) 268 { 269 struct drm_i915_private *dev_priv = 270 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 271 272 if (dev_priv->psr.dp) { 273 DRM_WARN("More than one eDP panel found, PSR support should be extended\n"); 274 return; 275 } 276 277 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, 278 sizeof(intel_dp->psr_dpcd)); 279 280 if (!intel_dp->psr_dpcd[0]) 281 return; 282 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", 283 intel_dp->psr_dpcd[0]); 284 285 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { 286 DRM_DEBUG_KMS("PSR support not currently available for this panel\n"); 287 return; 288 } 289 290 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { 291 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); 292 return; 293 } 294 295 dev_priv->psr.sink_support = true; 296 dev_priv->psr.sink_sync_latency = 297 intel_dp_get_sink_sync_latency(intel_dp); 298 299 dev_priv->psr.dp = intel_dp; 300 301 if (INTEL_GEN(dev_priv) >= 9 && 302 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { 303 bool y_req = intel_dp->psr_dpcd[1] & 304 DP_PSR2_SU_Y_COORDINATE_REQUIRED; 305 bool alpm = intel_dp_get_alpm_status(intel_dp); 306 307 /* 308 * All panels that supports PSR version 03h (PSR2 + 309 * Y-coordinate) can handle Y-coordinates in VSC but we are 310 * only sure that it is going to be used when required by the 311 * panel. This way panel is capable to do selective update 312 * without a aux frame sync. 313 * 314 * To support PSR version 02h and PSR version 03h without 315 * Y-coordinate requirement panels we would need to enable 316 * GTC first. 317 */ 318 dev_priv->psr.sink_psr2_support = y_req && alpm; 319 DRM_DEBUG_KMS("PSR2 %ssupported\n", 320 dev_priv->psr.sink_psr2_support ? "" : "not "); 321 322 if (dev_priv->psr.sink_psr2_support) { 323 dev_priv->psr.colorimetry_support = 324 intel_dp_get_colorimetry_status(intel_dp); 325 dev_priv->psr.su_x_granularity = 326 intel_dp_get_su_x_granulartiy(intel_dp); 327 } 328 } 329 } 330 331 static void intel_psr_setup_vsc(struct intel_dp *intel_dp, 332 const struct intel_crtc_state *crtc_state) 333 { 334 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 335 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 336 struct dp_sdp psr_vsc; 337 338 if (dev_priv->psr.psr2_enabled) { 339 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */ 340 memset(&psr_vsc, 0, sizeof(psr_vsc)); 341 psr_vsc.sdp_header.HB0 = 0; 342 psr_vsc.sdp_header.HB1 = 0x7; 343 if (dev_priv->psr.colorimetry_support) { 344 psr_vsc.sdp_header.HB2 = 0x5; 345 psr_vsc.sdp_header.HB3 = 0x13; 346 } else { 347 psr_vsc.sdp_header.HB2 = 0x4; 348 psr_vsc.sdp_header.HB3 = 0xe; 349 } 350 } else { 351 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ 352 memset(&psr_vsc, 0, sizeof(psr_vsc)); 353 psr_vsc.sdp_header.HB0 = 0; 354 psr_vsc.sdp_header.HB1 = 0x7; 355 psr_vsc.sdp_header.HB2 = 0x2; 356 psr_vsc.sdp_header.HB3 = 0x8; 357 } 358 359 intel_dig_port->write_infoframe(&intel_dig_port->base, 360 crtc_state, 361 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc)); 362 } 363 364 static void hsw_psr_setup_aux(struct intel_dp *intel_dp) 365 { 366 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 367 u32 aux_clock_divider, aux_ctl; 368 int i; 369 static const u8 aux_msg[] = { 370 [0] = DP_AUX_NATIVE_WRITE << 4, 371 [1] = DP_SET_POWER >> 8, 372 [2] = DP_SET_POWER & 0xff, 373 [3] = 1 - 1, 374 [4] = DP_SET_POWER_D0, 375 }; 376 u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK | 377 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | 378 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | 379 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; 380 381 BUILD_BUG_ON(sizeof(aux_msg) > 20); 382 for (i = 0; i < sizeof(aux_msg); i += 4) 383 I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2), 384 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); 385 386 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); 387 388 /* Start with bits set for DDI_AUX_CTL register */ 389 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), 390 aux_clock_divider); 391 392 /* Select only valid bits for SRD_AUX_CTL */ 393 aux_ctl &= psr_aux_mask; 394 I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl); 395 } 396 397 static void intel_psr_enable_sink(struct intel_dp *intel_dp) 398 { 399 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 400 u8 dpcd_val = DP_PSR_ENABLE; 401 402 /* Enable ALPM at sink for psr2 */ 403 if (dev_priv->psr.psr2_enabled) { 404 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 405 DP_ALPM_ENABLE | 406 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE); 407 408 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; 409 } else { 410 if (dev_priv->psr.link_standby) 411 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; 412 413 if (INTEL_GEN(dev_priv) >= 8) 414 dpcd_val |= DP_PSR_CRC_VERIFICATION; 415 } 416 417 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); 418 419 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 420 } 421 422 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) 423 { 424 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 425 u32 val = 0; 426 427 if (INTEL_GEN(dev_priv) >= 11) 428 val |= EDP_PSR_TP4_TIME_0US; 429 430 if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) 431 val |= EDP_PSR_TP1_TIME_0us; 432 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) 433 val |= EDP_PSR_TP1_TIME_100us; 434 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500) 435 val |= EDP_PSR_TP1_TIME_500us; 436 else 437 val |= EDP_PSR_TP1_TIME_2500us; 438 439 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0) 440 val |= EDP_PSR_TP2_TP3_TIME_0us; 441 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) 442 val |= EDP_PSR_TP2_TP3_TIME_100us; 443 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) 444 val |= EDP_PSR_TP2_TP3_TIME_500us; 445 else 446 val |= EDP_PSR_TP2_TP3_TIME_2500us; 447 448 if (intel_dp_source_supports_hbr2(intel_dp) && 449 drm_dp_tps3_supported(intel_dp->dpcd)) 450 val |= EDP_PSR_TP1_TP3_SEL; 451 else 452 val |= EDP_PSR_TP1_TP2_SEL; 453 454 return val; 455 } 456 457 static void hsw_activate_psr1(struct intel_dp *intel_dp) 458 { 459 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 460 u32 max_sleep_time = 0x1f; 461 u32 val = EDP_PSR_ENABLE; 462 463 /* Let's use 6 as the minimum to cover all known cases including the 464 * off-by-one issue that HW has in some cases. 465 */ 466 int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 467 468 /* sink_sync_latency of 8 means source has to wait for more than 8 469 * frames, we'll go with 9 frames for now 470 */ 471 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); 472 val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; 473 474 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; 475 if (IS_HASWELL(dev_priv)) 476 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; 477 478 if (dev_priv->psr.link_standby) 479 val |= EDP_PSR_LINK_STANDBY; 480 481 val |= intel_psr1_get_tp_time(intel_dp); 482 483 if (INTEL_GEN(dev_priv) >= 8) 484 val |= EDP_PSR_CRC_ENABLE; 485 486 val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & 487 EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK); 488 I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val); 489 } 490 491 static void hsw_activate_psr2(struct intel_dp *intel_dp) 492 { 493 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 494 u32 val; 495 496 /* Let's use 6 as the minimum to cover all known cases including the 497 * off-by-one issue that HW has in some cases. 498 */ 499 int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 500 501 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); 502 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT; 503 504 val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; 505 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 506 val |= EDP_Y_COORDINATE_ENABLE; 507 508 val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); 509 510 if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && 511 dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) 512 val |= EDP_PSR2_TP2_TIME_50us; 513 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100) 514 val |= EDP_PSR2_TP2_TIME_100us; 515 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500) 516 val |= EDP_PSR2_TP2_TIME_500us; 517 else 518 val |= EDP_PSR2_TP2_TIME_2500us; 519 520 /* 521 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is 522 * recommending keep this bit unset while PSR2 is enabled. 523 */ 524 I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0); 525 526 I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); 527 } 528 529 static bool 530 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) 531 { 532 if (INTEL_GEN(dev_priv) < 9) 533 return false; 534 else if (INTEL_GEN(dev_priv) >= 12) 535 return trans == TRANSCODER_A; 536 else 537 return trans == TRANSCODER_EDP; 538 } 539 540 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate) 541 { 542 if (!cstate || !cstate->hw.active) 543 return 0; 544 545 return DIV_ROUND_UP(1000 * 1000, 546 drm_mode_vrefresh(&cstate->hw.adjusted_mode)); 547 } 548 549 static void psr2_program_idle_frames(struct drm_i915_private *dev_priv, 550 u32 idle_frames) 551 { 552 u32 val; 553 554 idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT; 555 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); 556 val &= ~EDP_PSR2_IDLE_FRAME_MASK; 557 val |= idle_frames; 558 I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); 559 } 560 561 static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv) 562 { 563 psr2_program_idle_frames(dev_priv, 0); 564 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO); 565 } 566 567 static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv) 568 { 569 int idle_frames; 570 571 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 572 /* 573 * Restore PSR2 idle frame let's use 6 as the minimum to cover all known 574 * cases including the off-by-one issue that HW has in some cases. 575 */ 576 idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 577 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); 578 psr2_program_idle_frames(dev_priv, idle_frames); 579 } 580 581 static void tgl_dc5_idle_thread(struct work_struct *work) 582 { 583 struct drm_i915_private *dev_priv = 584 container_of(work, typeof(*dev_priv), psr.idle_work.work); 585 586 mutex_lock(&dev_priv->psr.lock); 587 /* If delayed work is pending, it is not idle */ 588 if (delayed_work_pending(&dev_priv->psr.idle_work)) 589 goto unlock; 590 591 DRM_DEBUG_KMS("DC5/6 idle thread\n"); 592 tgl_psr2_disable_dc3co(dev_priv); 593 unlock: 594 mutex_unlock(&dev_priv->psr.lock); 595 } 596 597 static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv) 598 { 599 if (!dev_priv->psr.dc3co_enabled) 600 return; 601 602 cancel_delayed_work(&dev_priv->psr.idle_work); 603 /* Before PSR2 exit disallow dc3co*/ 604 tgl_psr2_disable_dc3co(dev_priv); 605 } 606 607 static bool intel_psr2_config_valid(struct intel_dp *intel_dp, 608 struct intel_crtc_state *crtc_state) 609 { 610 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 611 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; 612 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; 613 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0; 614 615 if (!dev_priv->psr.sink_psr2_support) 616 return false; 617 618 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) { 619 DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n", 620 transcoder_name(crtc_state->cpu_transcoder)); 621 return false; 622 } 623 624 /* 625 * DSC and PSR2 cannot be enabled simultaneously. If a requested 626 * resolution requires DSC to be enabled, priority is given to DSC 627 * over PSR2. 628 */ 629 if (crtc_state->dsc.compression_enable) { 630 DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n"); 631 return false; 632 } 633 634 if (INTEL_GEN(dev_priv) >= 12) { 635 psr_max_h = 5120; 636 psr_max_v = 3200; 637 max_bpp = 30; 638 } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 639 psr_max_h = 4096; 640 psr_max_v = 2304; 641 max_bpp = 24; 642 } else if (IS_GEN(dev_priv, 9)) { 643 psr_max_h = 3640; 644 psr_max_v = 2304; 645 max_bpp = 24; 646 } 647 648 if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) { 649 DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", 650 crtc_hdisplay, crtc_vdisplay, 651 psr_max_h, psr_max_v); 652 return false; 653 } 654 655 if (crtc_state->pipe_bpp > max_bpp) { 656 DRM_DEBUG_KMS("PSR2 not enabled, pipe bpp %d > max supported %d\n", 657 crtc_state->pipe_bpp, max_bpp); 658 return false; 659 } 660 661 /* 662 * HW sends SU blocks of size four scan lines, which means the starting 663 * X coordinate and Y granularity requirements will always be met. We 664 * only need to validate the SU block width is a multiple of 665 * x granularity. 666 */ 667 if (crtc_hdisplay % dev_priv->psr.su_x_granularity) { 668 DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n", 669 crtc_hdisplay, dev_priv->psr.su_x_granularity); 670 return false; 671 } 672 673 if (crtc_state->crc_enabled) { 674 DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n"); 675 return false; 676 } 677 678 return true; 679 } 680 681 void intel_psr_compute_config(struct intel_dp *intel_dp, 682 struct intel_crtc_state *crtc_state) 683 { 684 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 685 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 686 const struct drm_display_mode *adjusted_mode = 687 &crtc_state->hw.adjusted_mode; 688 int psr_setup_time; 689 690 if (!CAN_PSR(dev_priv)) 691 return; 692 693 if (intel_dp != dev_priv->psr.dp) 694 return; 695 696 /* 697 * HSW spec explicitly says PSR is tied to port A. 698 * BDW+ platforms have a instance of PSR registers per transcoder but 699 * for now it only supports one instance of PSR, so lets keep it 700 * hardcoded to PORT_A 701 */ 702 if (dig_port->base.port != PORT_A) { 703 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); 704 return; 705 } 706 707 if (dev_priv->psr.sink_not_reliable) { 708 DRM_DEBUG_KMS("PSR sink implementation is not reliable\n"); 709 return; 710 } 711 712 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 713 DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n"); 714 return; 715 } 716 717 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); 718 if (psr_setup_time < 0) { 719 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n", 720 intel_dp->psr_dpcd[1]); 721 return; 722 } 723 724 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > 725 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { 726 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n", 727 psr_setup_time); 728 return; 729 } 730 731 crtc_state->has_psr = true; 732 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); 733 } 734 735 static void intel_psr_activate(struct intel_dp *intel_dp) 736 { 737 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 738 739 if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) 740 WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE); 741 742 WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE); 743 WARN_ON(dev_priv->psr.active); 744 lockdep_assert_held(&dev_priv->psr.lock); 745 746 /* psr1 and psr2 are mutually exclusive.*/ 747 if (dev_priv->psr.psr2_enabled) 748 hsw_activate_psr2(intel_dp); 749 else 750 hsw_activate_psr1(intel_dp); 751 752 dev_priv->psr.active = true; 753 } 754 755 static void intel_psr_enable_source(struct intel_dp *intel_dp, 756 const struct intel_crtc_state *crtc_state) 757 { 758 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 759 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 760 u32 mask; 761 762 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ 763 * use hardcoded values PSR AUX transactions 764 */ 765 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 766 hsw_psr_setup_aux(intel_dp); 767 768 if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) && 769 !IS_GEMINILAKE(dev_priv))) { 770 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); 771 u32 chicken = I915_READ(reg); 772 773 chicken |= PSR2_VSC_ENABLE_PROG_HEADER | 774 PSR2_ADD_VERTICAL_LINE_COUNT; 775 I915_WRITE(reg, chicken); 776 } 777 778 /* 779 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also 780 * mask LPSP to avoid dependency on other drivers that might block 781 * runtime_pm besides preventing other hw tracking issues now we 782 * can rely on frontbuffer tracking. 783 */ 784 mask = EDP_PSR_DEBUG_MASK_MEMUP | 785 EDP_PSR_DEBUG_MASK_HPD | 786 EDP_PSR_DEBUG_MASK_LPSP | 787 EDP_PSR_DEBUG_MASK_MAX_SLEEP; 788 789 if (INTEL_GEN(dev_priv) < 11) 790 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; 791 792 I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask); 793 794 psr_irq_control(dev_priv); 795 } 796 797 static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, 798 const struct intel_crtc_state *crtc_state) 799 { 800 struct intel_dp *intel_dp = dev_priv->psr.dp; 801 u32 val; 802 803 WARN_ON(dev_priv->psr.enabled); 804 805 dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); 806 dev_priv->psr.busy_frontbuffer_bits = 0; 807 dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; 808 dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline; 809 dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state); 810 dev_priv->psr.transcoder = crtc_state->cpu_transcoder; 811 812 /* 813 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR 814 * will still keep the error set even after the reset done in the 815 * irq_preinstall and irq_uninstall hooks. 816 * And enabling in this situation cause the screen to freeze in the 817 * first time that PSR HW tries to activate so lets keep PSR disabled 818 * to avoid any rendering problems. 819 */ 820 if (INTEL_GEN(dev_priv) >= 12) { 821 val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder)); 822 val &= EDP_PSR_ERROR(0); 823 } else { 824 val = I915_READ(EDP_PSR_IIR); 825 val &= EDP_PSR_ERROR(dev_priv->psr.transcoder); 826 } 827 if (val) { 828 dev_priv->psr.sink_not_reliable = true; 829 DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n"); 830 return; 831 } 832 833 DRM_DEBUG_KMS("Enabling PSR%s\n", 834 dev_priv->psr.psr2_enabled ? "2" : "1"); 835 intel_psr_setup_vsc(intel_dp, crtc_state); 836 intel_psr_enable_sink(intel_dp); 837 intel_psr_enable_source(intel_dp, crtc_state); 838 dev_priv->psr.enabled = true; 839 840 intel_psr_activate(intel_dp); 841 } 842 843 /** 844 * intel_psr_enable - Enable PSR 845 * @intel_dp: Intel DP 846 * @crtc_state: new CRTC state 847 * 848 * This function can only be called after the pipe is fully trained and enabled. 849 */ 850 void intel_psr_enable(struct intel_dp *intel_dp, 851 const struct intel_crtc_state *crtc_state) 852 { 853 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 854 855 if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp) 856 return; 857 858 dev_priv->psr.force_mode_changed = false; 859 860 if (!crtc_state->has_psr) 861 return; 862 863 WARN_ON(dev_priv->drrs.dp); 864 865 mutex_lock(&dev_priv->psr.lock); 866 867 if (!psr_global_enabled(dev_priv->psr.debug)) { 868 DRM_DEBUG_KMS("PSR disabled by flag\n"); 869 goto unlock; 870 } 871 872 intel_psr_enable_locked(dev_priv, crtc_state); 873 874 unlock: 875 mutex_unlock(&dev_priv->psr.lock); 876 } 877 878 static void intel_psr_exit(struct drm_i915_private *dev_priv) 879 { 880 u32 val; 881 882 if (!dev_priv->psr.active) { 883 if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) { 884 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); 885 WARN_ON(val & EDP_PSR2_ENABLE); 886 } 887 888 val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); 889 WARN_ON(val & EDP_PSR_ENABLE); 890 891 return; 892 } 893 894 if (dev_priv->psr.psr2_enabled) { 895 tgl_disallow_dc3co_on_psr2_exit(dev_priv); 896 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); 897 WARN_ON(!(val & EDP_PSR2_ENABLE)); 898 val &= ~EDP_PSR2_ENABLE; 899 I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); 900 } else { 901 val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); 902 WARN_ON(!(val & EDP_PSR_ENABLE)); 903 val &= ~EDP_PSR_ENABLE; 904 I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val); 905 } 906 dev_priv->psr.active = false; 907 } 908 909 static void intel_psr_disable_locked(struct intel_dp *intel_dp) 910 { 911 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 912 i915_reg_t psr_status; 913 u32 psr_status_mask; 914 915 lockdep_assert_held(&dev_priv->psr.lock); 916 917 if (!dev_priv->psr.enabled) 918 return; 919 920 DRM_DEBUG_KMS("Disabling PSR%s\n", 921 dev_priv->psr.psr2_enabled ? "2" : "1"); 922 923 intel_psr_exit(dev_priv); 924 925 if (dev_priv->psr.psr2_enabled) { 926 psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder); 927 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; 928 } else { 929 psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder); 930 psr_status_mask = EDP_PSR_STATUS_STATE_MASK; 931 } 932 933 /* Wait till PSR is idle */ 934 if (intel_de_wait_for_clear(dev_priv, psr_status, 935 psr_status_mask, 2000)) 936 DRM_ERROR("Timed out waiting PSR idle state\n"); 937 938 /* Disable PSR on Sink */ 939 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); 940 941 if (dev_priv->psr.psr2_enabled) 942 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0); 943 944 dev_priv->psr.enabled = false; 945 } 946 947 /** 948 * intel_psr_disable - Disable PSR 949 * @intel_dp: Intel DP 950 * @old_crtc_state: old CRTC state 951 * 952 * This function needs to be called before disabling pipe. 953 */ 954 void intel_psr_disable(struct intel_dp *intel_dp, 955 const struct intel_crtc_state *old_crtc_state) 956 { 957 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 958 959 if (!old_crtc_state->has_psr) 960 return; 961 962 if (WARN_ON(!CAN_PSR(dev_priv))) 963 return; 964 965 mutex_lock(&dev_priv->psr.lock); 966 967 intel_psr_disable_locked(intel_dp); 968 969 mutex_unlock(&dev_priv->psr.lock); 970 cancel_work_sync(&dev_priv->psr.work); 971 cancel_delayed_work_sync(&dev_priv->psr.idle_work); 972 } 973 974 static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) 975 { 976 if (INTEL_GEN(dev_priv) >= 9) 977 /* 978 * Display WA #0884: skl+ 979 * This documented WA for bxt can be safely applied 980 * broadly so we can force HW tracking to exit PSR 981 * instead of disabling and re-enabling. 982 * Workaround tells us to write 0 to CUR_SURFLIVE_A, 983 * but it makes more sense write to the current active 984 * pipe. 985 */ 986 I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0); 987 else 988 /* 989 * A write to CURSURFLIVE do not cause HW tracking to exit PSR 990 * on older gens so doing the manual exit instead. 991 */ 992 intel_psr_exit(dev_priv); 993 } 994 995 /** 996 * intel_psr_update - Update PSR state 997 * @intel_dp: Intel DP 998 * @crtc_state: new CRTC state 999 * 1000 * This functions will update PSR states, disabling, enabling or switching PSR 1001 * version when executing fastsets. For full modeset, intel_psr_disable() and 1002 * intel_psr_enable() should be called instead. 1003 */ 1004 void intel_psr_update(struct intel_dp *intel_dp, 1005 const struct intel_crtc_state *crtc_state) 1006 { 1007 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1008 struct i915_psr *psr = &dev_priv->psr; 1009 bool enable, psr2_enable; 1010 1011 if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp) 1012 return; 1013 1014 dev_priv->psr.force_mode_changed = false; 1015 1016 mutex_lock(&dev_priv->psr.lock); 1017 1018 enable = crtc_state->has_psr && psr_global_enabled(psr->debug); 1019 psr2_enable = intel_psr2_enabled(dev_priv, crtc_state); 1020 1021 if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) { 1022 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ 1023 if (crtc_state->crc_enabled && psr->enabled) 1024 psr_force_hw_tracking_exit(dev_priv); 1025 else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) { 1026 /* 1027 * Activate PSR again after a force exit when enabling 1028 * CRC in older gens 1029 */ 1030 if (!dev_priv->psr.active && 1031 !dev_priv->psr.busy_frontbuffer_bits) 1032 schedule_work(&dev_priv->psr.work); 1033 } 1034 1035 goto unlock; 1036 } 1037 1038 if (psr->enabled) 1039 intel_psr_disable_locked(intel_dp); 1040 1041 if (enable) 1042 intel_psr_enable_locked(dev_priv, crtc_state); 1043 1044 unlock: 1045 mutex_unlock(&dev_priv->psr.lock); 1046 } 1047 1048 /** 1049 * intel_psr_wait_for_idle - wait for PSR1 to idle 1050 * @new_crtc_state: new CRTC state 1051 * @out_value: PSR status in case of failure 1052 * 1053 * This function is expected to be called from pipe_update_start() where it is 1054 * not expected to race with PSR enable or disable. 1055 * 1056 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle. 1057 */ 1058 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, 1059 u32 *out_value) 1060 { 1061 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 1062 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1063 1064 if (!dev_priv->psr.enabled || !new_crtc_state->has_psr) 1065 return 0; 1066 1067 /* FIXME: Update this for PSR2 if we need to wait for idle */ 1068 if (READ_ONCE(dev_priv->psr.psr2_enabled)) 1069 return 0; 1070 1071 /* 1072 * From bspec: Panel Self Refresh (BDW+) 1073 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of 1074 * exit training time + 1.5 ms of aux channel handshake. 50 ms is 1075 * defensive enough to cover everything. 1076 */ 1077 1078 return __intel_wait_for_register(&dev_priv->uncore, 1079 EDP_PSR_STATUS(dev_priv->psr.transcoder), 1080 EDP_PSR_STATUS_STATE_MASK, 1081 EDP_PSR_STATUS_STATE_IDLE, 2, 50, 1082 out_value); 1083 } 1084 1085 static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) 1086 { 1087 i915_reg_t reg; 1088 u32 mask; 1089 int err; 1090 1091 if (!dev_priv->psr.enabled) 1092 return false; 1093 1094 if (dev_priv->psr.psr2_enabled) { 1095 reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder); 1096 mask = EDP_PSR2_STATUS_STATE_MASK; 1097 } else { 1098 reg = EDP_PSR_STATUS(dev_priv->psr.transcoder); 1099 mask = EDP_PSR_STATUS_STATE_MASK; 1100 } 1101 1102 mutex_unlock(&dev_priv->psr.lock); 1103 1104 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50); 1105 if (err) 1106 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); 1107 1108 /* After the unlocked wait, verify that PSR is still wanted! */ 1109 mutex_lock(&dev_priv->psr.lock); 1110 return err == 0 && dev_priv->psr.enabled; 1111 } 1112 1113 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) 1114 { 1115 struct drm_device *dev = &dev_priv->drm; 1116 struct drm_modeset_acquire_ctx ctx; 1117 struct drm_atomic_state *state; 1118 struct intel_crtc *crtc; 1119 int err; 1120 1121 state = drm_atomic_state_alloc(dev); 1122 if (!state) 1123 return -ENOMEM; 1124 1125 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 1126 state->acquire_ctx = &ctx; 1127 1128 retry: 1129 for_each_intel_crtc(dev, crtc) { 1130 struct intel_crtc_state *crtc_state = 1131 intel_atomic_get_crtc_state(state, crtc); 1132 1133 if (IS_ERR(crtc_state)) { 1134 err = PTR_ERR(crtc_state); 1135 goto error; 1136 } 1137 1138 if (crtc_state->hw.active && crtc_state->has_psr) { 1139 /* Mark mode as changed to trigger a pipe->update() */ 1140 crtc_state->uapi.mode_changed = true; 1141 break; 1142 } 1143 } 1144 1145 err = drm_atomic_commit(state); 1146 1147 error: 1148 if (err == -EDEADLK) { 1149 drm_atomic_state_clear(state); 1150 err = drm_modeset_backoff(&ctx); 1151 if (!err) 1152 goto retry; 1153 } 1154 1155 drm_modeset_drop_locks(&ctx); 1156 drm_modeset_acquire_fini(&ctx); 1157 drm_atomic_state_put(state); 1158 1159 return err; 1160 } 1161 1162 int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val) 1163 { 1164 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK; 1165 u32 old_mode; 1166 int ret; 1167 1168 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) || 1169 mode > I915_PSR_DEBUG_FORCE_PSR1) { 1170 DRM_DEBUG_KMS("Invalid debug mask %llx\n", val); 1171 return -EINVAL; 1172 } 1173 1174 ret = mutex_lock_interruptible(&dev_priv->psr.lock); 1175 if (ret) 1176 return ret; 1177 1178 old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK; 1179 dev_priv->psr.debug = val; 1180 1181 /* 1182 * Do it right away if it's already enabled, otherwise it will be done 1183 * when enabling the source. 1184 */ 1185 if (dev_priv->psr.enabled) 1186 psr_irq_control(dev_priv); 1187 1188 mutex_unlock(&dev_priv->psr.lock); 1189 1190 if (old_mode != mode) 1191 ret = intel_psr_fastset_force(dev_priv); 1192 1193 return ret; 1194 } 1195 1196 static void intel_psr_handle_irq(struct drm_i915_private *dev_priv) 1197 { 1198 struct i915_psr *psr = &dev_priv->psr; 1199 1200 intel_psr_disable_locked(psr->dp); 1201 psr->sink_not_reliable = true; 1202 /* let's make sure that sink is awaken */ 1203 drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 1204 } 1205 1206 static void intel_psr_work(struct work_struct *work) 1207 { 1208 struct drm_i915_private *dev_priv = 1209 container_of(work, typeof(*dev_priv), psr.work); 1210 1211 mutex_lock(&dev_priv->psr.lock); 1212 1213 if (!dev_priv->psr.enabled) 1214 goto unlock; 1215 1216 if (READ_ONCE(dev_priv->psr.irq_aux_error)) 1217 intel_psr_handle_irq(dev_priv); 1218 1219 /* 1220 * We have to make sure PSR is ready for re-enable 1221 * otherwise it keeps disabled until next full enable/disable cycle. 1222 * PSR might take some time to get fully disabled 1223 * and be ready for re-enable. 1224 */ 1225 if (!__psr_wait_for_idle_locked(dev_priv)) 1226 goto unlock; 1227 1228 /* 1229 * The delayed work can race with an invalidate hence we need to 1230 * recheck. Since psr_flush first clears this and then reschedules we 1231 * won't ever miss a flush when bailing out here. 1232 */ 1233 if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active) 1234 goto unlock; 1235 1236 intel_psr_activate(dev_priv->psr.dp); 1237 unlock: 1238 mutex_unlock(&dev_priv->psr.lock); 1239 } 1240 1241 /** 1242 * intel_psr_invalidate - Invalidade PSR 1243 * @dev_priv: i915 device 1244 * @frontbuffer_bits: frontbuffer plane tracking bits 1245 * @origin: which operation caused the invalidate 1246 * 1247 * Since the hardware frontbuffer tracking has gaps we need to integrate 1248 * with the software frontbuffer tracking. This function gets called every 1249 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be 1250 * disabled if the frontbuffer mask contains a buffer relevant to PSR. 1251 * 1252 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." 1253 */ 1254 void intel_psr_invalidate(struct drm_i915_private *dev_priv, 1255 unsigned frontbuffer_bits, enum fb_op_origin origin) 1256 { 1257 if (!CAN_PSR(dev_priv)) 1258 return; 1259 1260 if (origin == ORIGIN_FLIP) 1261 return; 1262 1263 mutex_lock(&dev_priv->psr.lock); 1264 if (!dev_priv->psr.enabled) { 1265 mutex_unlock(&dev_priv->psr.lock); 1266 return; 1267 } 1268 1269 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); 1270 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; 1271 1272 if (frontbuffer_bits) 1273 intel_psr_exit(dev_priv); 1274 1275 mutex_unlock(&dev_priv->psr.lock); 1276 } 1277 1278 /* 1279 * When we will be completely rely on PSR2 S/W tracking in future, 1280 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP 1281 * event also therefore tgl_dc3co_flush() require to be changed 1282 * accrodingly in future. 1283 */ 1284 static void 1285 tgl_dc3co_flush(struct drm_i915_private *dev_priv, 1286 unsigned int frontbuffer_bits, enum fb_op_origin origin) 1287 { 1288 u32 delay; 1289 1290 mutex_lock(&dev_priv->psr.lock); 1291 1292 if (!dev_priv->psr.dc3co_enabled) 1293 goto unlock; 1294 1295 if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active) 1296 goto unlock; 1297 1298 /* 1299 * At every frontbuffer flush flip event modified delay of delayed work, 1300 * when delayed work schedules that means display has been idle. 1301 */ 1302 if (!(frontbuffer_bits & 1303 INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe))) 1304 goto unlock; 1305 1306 tgl_psr2_enable_dc3co(dev_priv); 1307 /* DC5/DC6 required idle frames = 6 */ 1308 delay = 6 * dev_priv->psr.dc3co_exit_delay; 1309 mod_delayed_work(system_wq, &dev_priv->psr.idle_work, 1310 usecs_to_jiffies(delay)); 1311 1312 unlock: 1313 mutex_unlock(&dev_priv->psr.lock); 1314 } 1315 1316 /** 1317 * intel_psr_flush - Flush PSR 1318 * @dev_priv: i915 device 1319 * @frontbuffer_bits: frontbuffer plane tracking bits 1320 * @origin: which operation caused the flush 1321 * 1322 * Since the hardware frontbuffer tracking has gaps we need to integrate 1323 * with the software frontbuffer tracking. This function gets called every 1324 * time frontbuffer rendering has completed and flushed out to memory. PSR 1325 * can be enabled again if no other frontbuffer relevant to PSR is dirty. 1326 * 1327 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. 1328 */ 1329 void intel_psr_flush(struct drm_i915_private *dev_priv, 1330 unsigned frontbuffer_bits, enum fb_op_origin origin) 1331 { 1332 if (!CAN_PSR(dev_priv)) 1333 return; 1334 1335 if (origin == ORIGIN_FLIP) { 1336 tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin); 1337 return; 1338 } 1339 1340 mutex_lock(&dev_priv->psr.lock); 1341 if (!dev_priv->psr.enabled) { 1342 mutex_unlock(&dev_priv->psr.lock); 1343 return; 1344 } 1345 1346 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); 1347 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; 1348 1349 /* By definition flush = invalidate + flush */ 1350 if (frontbuffer_bits) 1351 psr_force_hw_tracking_exit(dev_priv); 1352 1353 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) 1354 schedule_work(&dev_priv->psr.work); 1355 mutex_unlock(&dev_priv->psr.lock); 1356 } 1357 1358 /** 1359 * intel_psr_init - Init basic PSR work and mutex. 1360 * @dev_priv: i915 device private 1361 * 1362 * This function is called only once at driver load to initialize basic 1363 * PSR stuff. 1364 */ 1365 void intel_psr_init(struct drm_i915_private *dev_priv) 1366 { 1367 if (!HAS_PSR(dev_priv)) 1368 return; 1369 1370 if (!dev_priv->psr.sink_support) 1371 return; 1372 1373 if (IS_HASWELL(dev_priv)) 1374 /* 1375 * HSW don't have PSR registers on the same space as transcoder 1376 * so set this to a value that when subtract to the register 1377 * in transcoder space results in the right offset for HSW 1378 */ 1379 dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE; 1380 1381 if (i915_modparams.enable_psr == -1) 1382 if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable) 1383 i915_modparams.enable_psr = 0; 1384 1385 /* Set link_standby x link_off defaults */ 1386 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1387 /* HSW and BDW require workarounds that we don't implement. */ 1388 dev_priv->psr.link_standby = false; 1389 else if (INTEL_GEN(dev_priv) < 12) 1390 /* For new platforms up to TGL let's respect VBT back again */ 1391 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; 1392 1393 INIT_WORK(&dev_priv->psr.work, intel_psr_work); 1394 INIT_DELAYED_WORK(&dev_priv->psr.idle_work, tgl_dc5_idle_thread); 1395 mutex_init(&dev_priv->psr.lock); 1396 } 1397 1398 static int psr_get_status_and_error_status(struct intel_dp *intel_dp, 1399 u8 *status, u8 *error_status) 1400 { 1401 struct drm_dp_aux *aux = &intel_dp->aux; 1402 int ret; 1403 1404 ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status); 1405 if (ret != 1) 1406 return ret; 1407 1408 ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status); 1409 if (ret != 1) 1410 return ret; 1411 1412 *status = *status & DP_PSR_SINK_STATE_MASK; 1413 1414 return 0; 1415 } 1416 1417 static void psr_alpm_check(struct intel_dp *intel_dp) 1418 { 1419 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1420 struct drm_dp_aux *aux = &intel_dp->aux; 1421 struct i915_psr *psr = &dev_priv->psr; 1422 u8 val; 1423 int r; 1424 1425 if (!psr->psr2_enabled) 1426 return; 1427 1428 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val); 1429 if (r != 1) { 1430 DRM_ERROR("Error reading ALPM status\n"); 1431 return; 1432 } 1433 1434 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) { 1435 intel_psr_disable_locked(intel_dp); 1436 psr->sink_not_reliable = true; 1437 DRM_DEBUG_KMS("ALPM lock timeout error, disabling PSR\n"); 1438 1439 /* Clearing error */ 1440 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val); 1441 } 1442 } 1443 1444 static void psr_capability_changed_check(struct intel_dp *intel_dp) 1445 { 1446 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1447 struct i915_psr *psr = &dev_priv->psr; 1448 u8 val; 1449 int r; 1450 1451 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val); 1452 if (r != 1) { 1453 DRM_ERROR("Error reading DP_PSR_ESI\n"); 1454 return; 1455 } 1456 1457 if (val & DP_PSR_CAPS_CHANGE) { 1458 intel_psr_disable_locked(intel_dp); 1459 psr->sink_not_reliable = true; 1460 DRM_DEBUG_KMS("Sink PSR capability changed, disabling PSR\n"); 1461 1462 /* Clearing it */ 1463 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val); 1464 } 1465 } 1466 1467 void intel_psr_short_pulse(struct intel_dp *intel_dp) 1468 { 1469 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1470 struct i915_psr *psr = &dev_priv->psr; 1471 u8 status, error_status; 1472 const u8 errors = DP_PSR_RFB_STORAGE_ERROR | 1473 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | 1474 DP_PSR_LINK_CRC_ERROR; 1475 1476 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) 1477 return; 1478 1479 mutex_lock(&psr->lock); 1480 1481 if (!psr->enabled || psr->dp != intel_dp) 1482 goto exit; 1483 1484 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) { 1485 DRM_ERROR("Error reading PSR status or error status\n"); 1486 goto exit; 1487 } 1488 1489 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) { 1490 intel_psr_disable_locked(intel_dp); 1491 psr->sink_not_reliable = true; 1492 } 1493 1494 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status) 1495 DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n"); 1496 if (error_status & DP_PSR_RFB_STORAGE_ERROR) 1497 DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n"); 1498 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) 1499 DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n"); 1500 if (error_status & DP_PSR_LINK_CRC_ERROR) 1501 DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n"); 1502 1503 if (error_status & ~errors) 1504 DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n", 1505 error_status & ~errors); 1506 /* clear status register */ 1507 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status); 1508 1509 psr_alpm_check(intel_dp); 1510 psr_capability_changed_check(intel_dp); 1511 1512 exit: 1513 mutex_unlock(&psr->lock); 1514 } 1515 1516 bool intel_psr_enabled(struct intel_dp *intel_dp) 1517 { 1518 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1519 bool ret; 1520 1521 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) 1522 return false; 1523 1524 mutex_lock(&dev_priv->psr.lock); 1525 ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled); 1526 mutex_unlock(&dev_priv->psr.lock); 1527 1528 return ret; 1529 } 1530 1531 void intel_psr_atomic_check(struct drm_connector *connector, 1532 struct drm_connector_state *old_state, 1533 struct drm_connector_state *new_state) 1534 { 1535 struct drm_i915_private *dev_priv = to_i915(connector->dev); 1536 struct intel_connector *intel_connector; 1537 struct intel_digital_port *dig_port; 1538 struct drm_crtc_state *crtc_state; 1539 1540 if (!CAN_PSR(dev_priv) || !new_state->crtc || 1541 !dev_priv->psr.force_mode_changed) 1542 return; 1543 1544 intel_connector = to_intel_connector(connector); 1545 dig_port = enc_to_dig_port(intel_connector->encoder); 1546 if (dev_priv->psr.dp != &dig_port->dp) 1547 return; 1548 1549 crtc_state = drm_atomic_get_new_crtc_state(new_state->state, 1550 new_state->crtc); 1551 crtc_state->mode_changed = true; 1552 } 1553 1554 void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp) 1555 { 1556 struct drm_i915_private *dev_priv; 1557 1558 if (!intel_dp) 1559 return; 1560 1561 dev_priv = dp_to_i915(intel_dp); 1562 if (!CAN_PSR(dev_priv) || intel_dp != dev_priv->psr.dp) 1563 return; 1564 1565 dev_priv->psr.force_mode_changed = true; 1566 } 1567