1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <drm/drm_atomic_helper.h> 25 26 #include "display/intel_dp.h" 27 28 #include "i915_drv.h" 29 #include "intel_atomic.h" 30 #include "intel_display_types.h" 31 #include "intel_psr.h" 32 #include "intel_sprite.h" 33 34 /** 35 * DOC: Panel Self Refresh (PSR/SRD) 36 * 37 * Since Haswell Display controller supports Panel Self-Refresh on display 38 * panels witch have a remote frame buffer (RFB) implemented according to PSR 39 * spec in eDP1.3. PSR feature allows the display to go to lower standby states 40 * when system is idle but display is on as it eliminates display refresh 41 * request to DDR memory completely as long as the frame buffer for that 42 * display is unchanged. 43 * 44 * Panel Self Refresh must be supported by both Hardware (source) and 45 * Panel (sink). 46 * 47 * PSR saves power by caching the framebuffer in the panel RFB, which allows us 48 * to power down the link and memory controller. For DSI panels the same idea 49 * is called "manual mode". 50 * 51 * The implementation uses the hardware-based PSR support which automatically 52 * enters/exits self-refresh mode. The hardware takes care of sending the 53 * required DP aux message and could even retrain the link (that part isn't 54 * enabled yet though). The hardware also keeps track of any frontbuffer 55 * changes to know when to exit self-refresh mode again. Unfortunately that 56 * part doesn't work too well, hence why the i915 PSR support uses the 57 * software frontbuffer tracking to make sure it doesn't miss a screen 58 * update. For this integration intel_psr_invalidate() and intel_psr_flush() 59 * get called by the frontbuffer tracking code. Note that because of locking 60 * issues the self-refresh re-enable code is done from a work queue, which 61 * must be correctly synchronized/cancelled when shutting down the pipe." 62 */ 63 64 static bool psr_global_enabled(u32 debug) 65 { 66 switch (debug & I915_PSR_DEBUG_MODE_MASK) { 67 case I915_PSR_DEBUG_DEFAULT: 68 return i915_modparams.enable_psr; 69 case I915_PSR_DEBUG_DISABLE: 70 return false; 71 default: 72 return true; 73 } 74 } 75 76 static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, 77 const struct intel_crtc_state *crtc_state) 78 { 79 /* Cannot enable DSC and PSR2 simultaneously */ 80 WARN_ON(crtc_state->dsc.compression_enable && 81 crtc_state->has_psr2); 82 83 switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { 84 case I915_PSR_DEBUG_DISABLE: 85 case I915_PSR_DEBUG_FORCE_PSR1: 86 return false; 87 default: 88 return crtc_state->has_psr2; 89 } 90 } 91 92 static void psr_irq_control(struct drm_i915_private *dev_priv) 93 { 94 enum transcoder trans_shift; 95 u32 mask, val; 96 i915_reg_t imr_reg; 97 98 /* 99 * gen12+ has registers relative to transcoder and one per transcoder 100 * using the same bit definition: handle it as TRANSCODER_EDP to force 101 * 0 shift in bit definition 102 */ 103 if (INTEL_GEN(dev_priv) >= 12) { 104 trans_shift = 0; 105 imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder); 106 } else { 107 trans_shift = dev_priv->psr.transcoder; 108 imr_reg = EDP_PSR_IMR; 109 } 110 111 mask = EDP_PSR_ERROR(trans_shift); 112 if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ) 113 mask |= EDP_PSR_POST_EXIT(trans_shift) | 114 EDP_PSR_PRE_ENTRY(trans_shift); 115 116 /* Warning: it is masking/setting reserved bits too */ 117 val = I915_READ(imr_reg); 118 val &= ~EDP_PSR_TRANS_MASK(trans_shift); 119 val |= ~mask; 120 I915_WRITE(imr_reg, val); 121 } 122 123 static void psr_event_print(u32 val, bool psr2_enabled) 124 { 125 DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val); 126 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE) 127 DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n"); 128 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled) 129 DRM_DEBUG_KMS("\tPSR2 disabled\n"); 130 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN) 131 DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n"); 132 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN) 133 DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n"); 134 if (val & PSR_EVENT_GRAPHICS_RESET) 135 DRM_DEBUG_KMS("\tGraphics reset\n"); 136 if (val & PSR_EVENT_PCH_INTERRUPT) 137 DRM_DEBUG_KMS("\tPCH interrupt\n"); 138 if (val & PSR_EVENT_MEMORY_UP) 139 DRM_DEBUG_KMS("\tMemory up\n"); 140 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY) 141 DRM_DEBUG_KMS("\tFront buffer modification\n"); 142 if (val & PSR_EVENT_WD_TIMER_EXPIRE) 143 DRM_DEBUG_KMS("\tPSR watchdog timer expired\n"); 144 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE) 145 DRM_DEBUG_KMS("\tPIPE registers updated\n"); 146 if (val & PSR_EVENT_REGISTER_UPDATE) 147 DRM_DEBUG_KMS("\tRegister updated\n"); 148 if (val & PSR_EVENT_HDCP_ENABLE) 149 DRM_DEBUG_KMS("\tHDCP enabled\n"); 150 if (val & PSR_EVENT_KVMR_SESSION_ENABLE) 151 DRM_DEBUG_KMS("\tKVMR session enabled\n"); 152 if (val & PSR_EVENT_VBI_ENABLE) 153 DRM_DEBUG_KMS("\tVBI enabled\n"); 154 if (val & PSR_EVENT_LPSP_MODE_EXIT) 155 DRM_DEBUG_KMS("\tLPSP mode exited\n"); 156 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled) 157 DRM_DEBUG_KMS("\tPSR disabled\n"); 158 } 159 160 void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) 161 { 162 enum transcoder cpu_transcoder = dev_priv->psr.transcoder; 163 enum transcoder trans_shift; 164 i915_reg_t imr_reg; 165 ktime_t time_ns = ktime_get(); 166 167 if (INTEL_GEN(dev_priv) >= 12) { 168 trans_shift = 0; 169 imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder); 170 } else { 171 trans_shift = dev_priv->psr.transcoder; 172 imr_reg = EDP_PSR_IMR; 173 } 174 175 if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) { 176 dev_priv->psr.last_entry_attempt = time_ns; 177 DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", 178 transcoder_name(cpu_transcoder)); 179 } 180 181 if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) { 182 dev_priv->psr.last_exit = time_ns; 183 DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", 184 transcoder_name(cpu_transcoder)); 185 186 if (INTEL_GEN(dev_priv) >= 9) { 187 u32 val = I915_READ(PSR_EVENT(cpu_transcoder)); 188 bool psr2_enabled = dev_priv->psr.psr2_enabled; 189 190 I915_WRITE(PSR_EVENT(cpu_transcoder), val); 191 psr_event_print(val, psr2_enabled); 192 } 193 } 194 195 if (psr_iir & EDP_PSR_ERROR(trans_shift)) { 196 u32 val; 197 198 DRM_WARN("[transcoder %s] PSR aux error\n", 199 transcoder_name(cpu_transcoder)); 200 201 dev_priv->psr.irq_aux_error = true; 202 203 /* 204 * If this interruption is not masked it will keep 205 * interrupting so fast that it prevents the scheduled 206 * work to run. 207 * Also after a PSR error, we don't want to arm PSR 208 * again so we don't care about unmask the interruption 209 * or unset irq_aux_error. 210 */ 211 val = I915_READ(imr_reg); 212 val |= EDP_PSR_ERROR(trans_shift); 213 I915_WRITE(imr_reg, val); 214 215 schedule_work(&dev_priv->psr.work); 216 } 217 } 218 219 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) 220 { 221 u8 alpm_caps = 0; 222 223 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, 224 &alpm_caps) != 1) 225 return false; 226 return alpm_caps & DP_ALPM_CAP; 227 } 228 229 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) 230 { 231 u8 val = 8; /* assume the worst if we can't read the value */ 232 233 if (drm_dp_dpcd_readb(&intel_dp->aux, 234 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) 235 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; 236 else 237 DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n"); 238 return val; 239 } 240 241 static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp) 242 { 243 u16 val; 244 ssize_t r; 245 246 /* 247 * Returning the default X granularity if granularity not required or 248 * if DPCD read fails 249 */ 250 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) 251 return 4; 252 253 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2); 254 if (r != 2) 255 DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n"); 256 257 /* 258 * Spec says that if the value read is 0 the default granularity should 259 * be used instead. 260 */ 261 if (r != 2 || val == 0) 262 val = 4; 263 264 return val; 265 } 266 267 void intel_psr_init_dpcd(struct intel_dp *intel_dp) 268 { 269 struct drm_i915_private *dev_priv = 270 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 271 272 if (dev_priv->psr.dp) { 273 DRM_WARN("More than one eDP panel found, PSR support should be extended\n"); 274 return; 275 } 276 277 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, 278 sizeof(intel_dp->psr_dpcd)); 279 280 if (!intel_dp->psr_dpcd[0]) 281 return; 282 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", 283 intel_dp->psr_dpcd[0]); 284 285 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { 286 DRM_DEBUG_KMS("PSR support not currently available for this panel\n"); 287 return; 288 } 289 290 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { 291 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); 292 return; 293 } 294 295 dev_priv->psr.sink_support = true; 296 dev_priv->psr.sink_sync_latency = 297 intel_dp_get_sink_sync_latency(intel_dp); 298 299 dev_priv->psr.dp = intel_dp; 300 301 if (INTEL_GEN(dev_priv) >= 9 && 302 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { 303 bool y_req = intel_dp->psr_dpcd[1] & 304 DP_PSR2_SU_Y_COORDINATE_REQUIRED; 305 bool alpm = intel_dp_get_alpm_status(intel_dp); 306 307 /* 308 * All panels that supports PSR version 03h (PSR2 + 309 * Y-coordinate) can handle Y-coordinates in VSC but we are 310 * only sure that it is going to be used when required by the 311 * panel. This way panel is capable to do selective update 312 * without a aux frame sync. 313 * 314 * To support PSR version 02h and PSR version 03h without 315 * Y-coordinate requirement panels we would need to enable 316 * GTC first. 317 */ 318 dev_priv->psr.sink_psr2_support = y_req && alpm; 319 DRM_DEBUG_KMS("PSR2 %ssupported\n", 320 dev_priv->psr.sink_psr2_support ? "" : "not "); 321 322 if (dev_priv->psr.sink_psr2_support) { 323 dev_priv->psr.colorimetry_support = 324 intel_dp_get_colorimetry_status(intel_dp); 325 dev_priv->psr.su_x_granularity = 326 intel_dp_get_su_x_granulartiy(intel_dp); 327 } 328 } 329 } 330 331 static void intel_psr_setup_vsc(struct intel_dp *intel_dp, 332 const struct intel_crtc_state *crtc_state) 333 { 334 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 335 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 336 struct dp_sdp psr_vsc; 337 338 if (dev_priv->psr.psr2_enabled) { 339 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */ 340 memset(&psr_vsc, 0, sizeof(psr_vsc)); 341 psr_vsc.sdp_header.HB0 = 0; 342 psr_vsc.sdp_header.HB1 = 0x7; 343 if (dev_priv->psr.colorimetry_support) { 344 psr_vsc.sdp_header.HB2 = 0x5; 345 psr_vsc.sdp_header.HB3 = 0x13; 346 } else { 347 psr_vsc.sdp_header.HB2 = 0x4; 348 psr_vsc.sdp_header.HB3 = 0xe; 349 } 350 } else { 351 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ 352 memset(&psr_vsc, 0, sizeof(psr_vsc)); 353 psr_vsc.sdp_header.HB0 = 0; 354 psr_vsc.sdp_header.HB1 = 0x7; 355 psr_vsc.sdp_header.HB2 = 0x2; 356 psr_vsc.sdp_header.HB3 = 0x8; 357 } 358 359 intel_dig_port->write_infoframe(&intel_dig_port->base, 360 crtc_state, 361 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc)); 362 } 363 364 static void hsw_psr_setup_aux(struct intel_dp *intel_dp) 365 { 366 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 367 u32 aux_clock_divider, aux_ctl; 368 int i; 369 static const u8 aux_msg[] = { 370 [0] = DP_AUX_NATIVE_WRITE << 4, 371 [1] = DP_SET_POWER >> 8, 372 [2] = DP_SET_POWER & 0xff, 373 [3] = 1 - 1, 374 [4] = DP_SET_POWER_D0, 375 }; 376 u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK | 377 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | 378 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | 379 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; 380 381 BUILD_BUG_ON(sizeof(aux_msg) > 20); 382 for (i = 0; i < sizeof(aux_msg); i += 4) 383 I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2), 384 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); 385 386 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); 387 388 /* Start with bits set for DDI_AUX_CTL register */ 389 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), 390 aux_clock_divider); 391 392 /* Select only valid bits for SRD_AUX_CTL */ 393 aux_ctl &= psr_aux_mask; 394 I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl); 395 } 396 397 static void intel_psr_enable_sink(struct intel_dp *intel_dp) 398 { 399 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 400 u8 dpcd_val = DP_PSR_ENABLE; 401 402 /* Enable ALPM at sink for psr2 */ 403 if (dev_priv->psr.psr2_enabled) { 404 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 405 DP_ALPM_ENABLE | 406 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE); 407 408 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; 409 } else { 410 if (dev_priv->psr.link_standby) 411 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; 412 413 if (INTEL_GEN(dev_priv) >= 8) 414 dpcd_val |= DP_PSR_CRC_VERIFICATION; 415 } 416 417 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); 418 419 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 420 } 421 422 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) 423 { 424 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 425 u32 val = 0; 426 427 if (INTEL_GEN(dev_priv) >= 11) 428 val |= EDP_PSR_TP4_TIME_0US; 429 430 if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) 431 val |= EDP_PSR_TP1_TIME_0us; 432 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) 433 val |= EDP_PSR_TP1_TIME_100us; 434 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500) 435 val |= EDP_PSR_TP1_TIME_500us; 436 else 437 val |= EDP_PSR_TP1_TIME_2500us; 438 439 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0) 440 val |= EDP_PSR_TP2_TP3_TIME_0us; 441 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) 442 val |= EDP_PSR_TP2_TP3_TIME_100us; 443 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) 444 val |= EDP_PSR_TP2_TP3_TIME_500us; 445 else 446 val |= EDP_PSR_TP2_TP3_TIME_2500us; 447 448 if (intel_dp_source_supports_hbr2(intel_dp) && 449 drm_dp_tps3_supported(intel_dp->dpcd)) 450 val |= EDP_PSR_TP1_TP3_SEL; 451 else 452 val |= EDP_PSR_TP1_TP2_SEL; 453 454 return val; 455 } 456 457 static void hsw_activate_psr1(struct intel_dp *intel_dp) 458 { 459 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 460 u32 max_sleep_time = 0x1f; 461 u32 val = EDP_PSR_ENABLE; 462 463 /* Let's use 6 as the minimum to cover all known cases including the 464 * off-by-one issue that HW has in some cases. 465 */ 466 int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 467 468 /* sink_sync_latency of 8 means source has to wait for more than 8 469 * frames, we'll go with 9 frames for now 470 */ 471 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); 472 val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; 473 474 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; 475 if (IS_HASWELL(dev_priv)) 476 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; 477 478 if (dev_priv->psr.link_standby) 479 val |= EDP_PSR_LINK_STANDBY; 480 481 val |= intel_psr1_get_tp_time(intel_dp); 482 483 if (INTEL_GEN(dev_priv) >= 8) 484 val |= EDP_PSR_CRC_ENABLE; 485 486 val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & 487 EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK); 488 I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val); 489 } 490 491 static void hsw_activate_psr2(struct intel_dp *intel_dp) 492 { 493 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 494 u32 val; 495 496 /* Let's use 6 as the minimum to cover all known cases including the 497 * off-by-one issue that HW has in some cases. 498 */ 499 int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 500 501 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); 502 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT; 503 504 val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; 505 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 506 val |= EDP_Y_COORDINATE_ENABLE; 507 508 val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); 509 510 if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && 511 dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) 512 val |= EDP_PSR2_TP2_TIME_50us; 513 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100) 514 val |= EDP_PSR2_TP2_TIME_100us; 515 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500) 516 val |= EDP_PSR2_TP2_TIME_500us; 517 else 518 val |= EDP_PSR2_TP2_TIME_2500us; 519 520 /* 521 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is 522 * recommending keep this bit unset while PSR2 is enabled. 523 */ 524 I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0); 525 526 I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); 527 } 528 529 static bool 530 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) 531 { 532 if (INTEL_GEN(dev_priv) < 9) 533 return false; 534 else if (INTEL_GEN(dev_priv) >= 12) 535 return trans == TRANSCODER_A; 536 else 537 return trans == TRANSCODER_EDP; 538 } 539 540 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate) 541 { 542 if (!cstate || !cstate->hw.active) 543 return 0; 544 545 return DIV_ROUND_UP(1000 * 1000, 546 drm_mode_vrefresh(&cstate->hw.adjusted_mode)); 547 } 548 549 static void psr2_program_idle_frames(struct drm_i915_private *dev_priv, 550 u32 idle_frames) 551 { 552 u32 val; 553 554 idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT; 555 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); 556 val &= ~EDP_PSR2_IDLE_FRAME_MASK; 557 val |= idle_frames; 558 I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); 559 } 560 561 static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv) 562 { 563 psr2_program_idle_frames(dev_priv, 0); 564 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO); 565 } 566 567 static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv) 568 { 569 int idle_frames; 570 571 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 572 /* 573 * Restore PSR2 idle frame let's use 6 as the minimum to cover all known 574 * cases including the off-by-one issue that HW has in some cases. 575 */ 576 idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 577 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); 578 psr2_program_idle_frames(dev_priv, idle_frames); 579 } 580 581 static void tgl_dc5_idle_thread(struct work_struct *work) 582 { 583 struct drm_i915_private *dev_priv = 584 container_of(work, typeof(*dev_priv), psr.idle_work.work); 585 586 mutex_lock(&dev_priv->psr.lock); 587 /* If delayed work is pending, it is not idle */ 588 if (delayed_work_pending(&dev_priv->psr.idle_work)) 589 goto unlock; 590 591 DRM_DEBUG_KMS("DC5/6 idle thread\n"); 592 tgl_psr2_disable_dc3co(dev_priv); 593 unlock: 594 mutex_unlock(&dev_priv->psr.lock); 595 } 596 597 static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv) 598 { 599 if (!dev_priv->psr.dc3co_enabled) 600 return; 601 602 cancel_delayed_work(&dev_priv->psr.idle_work); 603 /* Before PSR2 exit disallow dc3co*/ 604 tgl_psr2_disable_dc3co(dev_priv); 605 } 606 607 static bool intel_psr2_config_valid(struct intel_dp *intel_dp, 608 struct intel_crtc_state *crtc_state) 609 { 610 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 611 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; 612 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; 613 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0; 614 615 if (!dev_priv->psr.sink_psr2_support) 616 return false; 617 618 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) { 619 DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n", 620 transcoder_name(crtc_state->cpu_transcoder)); 621 return false; 622 } 623 624 /* 625 * DSC and PSR2 cannot be enabled simultaneously. If a requested 626 * resolution requires DSC to be enabled, priority is given to DSC 627 * over PSR2. 628 */ 629 if (crtc_state->dsc.compression_enable) { 630 DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n"); 631 return false; 632 } 633 634 if (INTEL_GEN(dev_priv) >= 12) { 635 psr_max_h = 5120; 636 psr_max_v = 3200; 637 max_bpp = 30; 638 } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 639 psr_max_h = 4096; 640 psr_max_v = 2304; 641 max_bpp = 24; 642 } else if (IS_GEN(dev_priv, 9)) { 643 psr_max_h = 3640; 644 psr_max_v = 2304; 645 max_bpp = 24; 646 } 647 648 if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) { 649 DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", 650 crtc_hdisplay, crtc_vdisplay, 651 psr_max_h, psr_max_v); 652 return false; 653 } 654 655 if (crtc_state->pipe_bpp > max_bpp) { 656 DRM_DEBUG_KMS("PSR2 not enabled, pipe bpp %d > max supported %d\n", 657 crtc_state->pipe_bpp, max_bpp); 658 return false; 659 } 660 661 /* 662 * HW sends SU blocks of size four scan lines, which means the starting 663 * X coordinate and Y granularity requirements will always be met. We 664 * only need to validate the SU block width is a multiple of 665 * x granularity. 666 */ 667 if (crtc_hdisplay % dev_priv->psr.su_x_granularity) { 668 DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n", 669 crtc_hdisplay, dev_priv->psr.su_x_granularity); 670 return false; 671 } 672 673 if (crtc_state->crc_enabled) { 674 DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n"); 675 return false; 676 } 677 678 return true; 679 } 680 681 void intel_psr_compute_config(struct intel_dp *intel_dp, 682 struct intel_crtc_state *crtc_state) 683 { 684 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 685 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 686 const struct drm_display_mode *adjusted_mode = 687 &crtc_state->hw.adjusted_mode; 688 int psr_setup_time; 689 690 if (!CAN_PSR(dev_priv)) 691 return; 692 693 if (intel_dp != dev_priv->psr.dp) 694 return; 695 696 /* 697 * HSW spec explicitly says PSR is tied to port A. 698 * BDW+ platforms have a instance of PSR registers per transcoder but 699 * for now it only supports one instance of PSR, so lets keep it 700 * hardcoded to PORT_A 701 */ 702 if (dig_port->base.port != PORT_A) { 703 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); 704 return; 705 } 706 707 if (dev_priv->psr.sink_not_reliable) { 708 DRM_DEBUG_KMS("PSR sink implementation is not reliable\n"); 709 return; 710 } 711 712 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 713 DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n"); 714 return; 715 } 716 717 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); 718 if (psr_setup_time < 0) { 719 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n", 720 intel_dp->psr_dpcd[1]); 721 return; 722 } 723 724 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > 725 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { 726 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n", 727 psr_setup_time); 728 return; 729 } 730 731 crtc_state->has_psr = true; 732 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); 733 } 734 735 static void intel_psr_activate(struct intel_dp *intel_dp) 736 { 737 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 738 739 if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) 740 WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE); 741 742 WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE); 743 WARN_ON(dev_priv->psr.active); 744 lockdep_assert_held(&dev_priv->psr.lock); 745 746 /* psr1 and psr2 are mutually exclusive.*/ 747 if (dev_priv->psr.psr2_enabled) 748 hsw_activate_psr2(intel_dp); 749 else 750 hsw_activate_psr1(intel_dp); 751 752 dev_priv->psr.active = true; 753 } 754 755 static void intel_psr_enable_source(struct intel_dp *intel_dp, 756 const struct intel_crtc_state *crtc_state) 757 { 758 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 759 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 760 u32 mask; 761 762 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ 763 * use hardcoded values PSR AUX transactions 764 */ 765 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 766 hsw_psr_setup_aux(intel_dp); 767 768 if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) && 769 !IS_GEMINILAKE(dev_priv))) { 770 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); 771 u32 chicken = I915_READ(reg); 772 773 chicken |= PSR2_VSC_ENABLE_PROG_HEADER | 774 PSR2_ADD_VERTICAL_LINE_COUNT; 775 I915_WRITE(reg, chicken); 776 } 777 778 /* 779 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also 780 * mask LPSP to avoid dependency on other drivers that might block 781 * runtime_pm besides preventing other hw tracking issues now we 782 * can rely on frontbuffer tracking. 783 */ 784 mask = EDP_PSR_DEBUG_MASK_MEMUP | 785 EDP_PSR_DEBUG_MASK_HPD | 786 EDP_PSR_DEBUG_MASK_LPSP | 787 EDP_PSR_DEBUG_MASK_MAX_SLEEP; 788 789 if (INTEL_GEN(dev_priv) < 11) 790 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; 791 792 I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask); 793 794 psr_irq_control(dev_priv); 795 } 796 797 static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, 798 const struct intel_crtc_state *crtc_state) 799 { 800 struct intel_dp *intel_dp = dev_priv->psr.dp; 801 u32 val; 802 803 WARN_ON(dev_priv->psr.enabled); 804 805 dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); 806 dev_priv->psr.busy_frontbuffer_bits = 0; 807 dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; 808 dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline; 809 dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state); 810 dev_priv->psr.transcoder = crtc_state->cpu_transcoder; 811 812 /* 813 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR 814 * will still keep the error set even after the reset done in the 815 * irq_preinstall and irq_uninstall hooks. 816 * And enabling in this situation cause the screen to freeze in the 817 * first time that PSR HW tries to activate so lets keep PSR disabled 818 * to avoid any rendering problems. 819 */ 820 if (INTEL_GEN(dev_priv) >= 12) { 821 val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder)); 822 val &= EDP_PSR_ERROR(0); 823 } else { 824 val = I915_READ(EDP_PSR_IIR); 825 val &= EDP_PSR_ERROR(dev_priv->psr.transcoder); 826 } 827 if (val) { 828 dev_priv->psr.sink_not_reliable = true; 829 DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n"); 830 return; 831 } 832 833 DRM_DEBUG_KMS("Enabling PSR%s\n", 834 dev_priv->psr.psr2_enabled ? "2" : "1"); 835 intel_psr_setup_vsc(intel_dp, crtc_state); 836 intel_psr_enable_sink(intel_dp); 837 intel_psr_enable_source(intel_dp, crtc_state); 838 dev_priv->psr.enabled = true; 839 840 intel_psr_activate(intel_dp); 841 } 842 843 /** 844 * intel_psr_enable - Enable PSR 845 * @intel_dp: Intel DP 846 * @crtc_state: new CRTC state 847 * 848 * This function can only be called after the pipe is fully trained and enabled. 849 */ 850 void intel_psr_enable(struct intel_dp *intel_dp, 851 const struct intel_crtc_state *crtc_state) 852 { 853 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 854 855 if (!crtc_state->has_psr) 856 return; 857 858 if (WARN_ON(!CAN_PSR(dev_priv))) 859 return; 860 861 WARN_ON(dev_priv->drrs.dp); 862 863 mutex_lock(&dev_priv->psr.lock); 864 865 if (!psr_global_enabled(dev_priv->psr.debug)) { 866 DRM_DEBUG_KMS("PSR disabled by flag\n"); 867 goto unlock; 868 } 869 870 intel_psr_enable_locked(dev_priv, crtc_state); 871 872 unlock: 873 mutex_unlock(&dev_priv->psr.lock); 874 } 875 876 static void intel_psr_exit(struct drm_i915_private *dev_priv) 877 { 878 u32 val; 879 880 if (!dev_priv->psr.active) { 881 if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) { 882 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); 883 WARN_ON(val & EDP_PSR2_ENABLE); 884 } 885 886 val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); 887 WARN_ON(val & EDP_PSR_ENABLE); 888 889 return; 890 } 891 892 if (dev_priv->psr.psr2_enabled) { 893 tgl_disallow_dc3co_on_psr2_exit(dev_priv); 894 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); 895 WARN_ON(!(val & EDP_PSR2_ENABLE)); 896 val &= ~EDP_PSR2_ENABLE; 897 I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); 898 } else { 899 val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); 900 WARN_ON(!(val & EDP_PSR_ENABLE)); 901 val &= ~EDP_PSR_ENABLE; 902 I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val); 903 } 904 dev_priv->psr.active = false; 905 } 906 907 static void intel_psr_disable_locked(struct intel_dp *intel_dp) 908 { 909 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 910 i915_reg_t psr_status; 911 u32 psr_status_mask; 912 913 lockdep_assert_held(&dev_priv->psr.lock); 914 915 if (!dev_priv->psr.enabled) 916 return; 917 918 DRM_DEBUG_KMS("Disabling PSR%s\n", 919 dev_priv->psr.psr2_enabled ? "2" : "1"); 920 921 intel_psr_exit(dev_priv); 922 923 if (dev_priv->psr.psr2_enabled) { 924 psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder); 925 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; 926 } else { 927 psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder); 928 psr_status_mask = EDP_PSR_STATUS_STATE_MASK; 929 } 930 931 /* Wait till PSR is idle */ 932 if (intel_de_wait_for_clear(dev_priv, psr_status, 933 psr_status_mask, 2000)) 934 DRM_ERROR("Timed out waiting PSR idle state\n"); 935 936 /* Disable PSR on Sink */ 937 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); 938 939 if (dev_priv->psr.psr2_enabled) 940 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0); 941 942 dev_priv->psr.enabled = false; 943 } 944 945 /** 946 * intel_psr_disable - Disable PSR 947 * @intel_dp: Intel DP 948 * @old_crtc_state: old CRTC state 949 * 950 * This function needs to be called before disabling pipe. 951 */ 952 void intel_psr_disable(struct intel_dp *intel_dp, 953 const struct intel_crtc_state *old_crtc_state) 954 { 955 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 956 957 if (!old_crtc_state->has_psr) 958 return; 959 960 if (WARN_ON(!CAN_PSR(dev_priv))) 961 return; 962 963 mutex_lock(&dev_priv->psr.lock); 964 965 intel_psr_disable_locked(intel_dp); 966 967 mutex_unlock(&dev_priv->psr.lock); 968 cancel_work_sync(&dev_priv->psr.work); 969 cancel_delayed_work_sync(&dev_priv->psr.idle_work); 970 } 971 972 static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) 973 { 974 if (INTEL_GEN(dev_priv) >= 9) 975 /* 976 * Display WA #0884: skl+ 977 * This documented WA for bxt can be safely applied 978 * broadly so we can force HW tracking to exit PSR 979 * instead of disabling and re-enabling. 980 * Workaround tells us to write 0 to CUR_SURFLIVE_A, 981 * but it makes more sense write to the current active 982 * pipe. 983 */ 984 I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0); 985 else 986 /* 987 * A write to CURSURFLIVE do not cause HW tracking to exit PSR 988 * on older gens so doing the manual exit instead. 989 */ 990 intel_psr_exit(dev_priv); 991 } 992 993 /** 994 * intel_psr_update - Update PSR state 995 * @intel_dp: Intel DP 996 * @crtc_state: new CRTC state 997 * 998 * This functions will update PSR states, disabling, enabling or switching PSR 999 * version when executing fastsets. For full modeset, intel_psr_disable() and 1000 * intel_psr_enable() should be called instead. 1001 */ 1002 void intel_psr_update(struct intel_dp *intel_dp, 1003 const struct intel_crtc_state *crtc_state) 1004 { 1005 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1006 struct i915_psr *psr = &dev_priv->psr; 1007 bool enable, psr2_enable; 1008 1009 if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp) 1010 return; 1011 1012 mutex_lock(&dev_priv->psr.lock); 1013 1014 enable = crtc_state->has_psr && psr_global_enabled(psr->debug); 1015 psr2_enable = intel_psr2_enabled(dev_priv, crtc_state); 1016 1017 if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) { 1018 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ 1019 if (crtc_state->crc_enabled && psr->enabled) 1020 psr_force_hw_tracking_exit(dev_priv); 1021 else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) { 1022 /* 1023 * Activate PSR again after a force exit when enabling 1024 * CRC in older gens 1025 */ 1026 if (!dev_priv->psr.active && 1027 !dev_priv->psr.busy_frontbuffer_bits) 1028 schedule_work(&dev_priv->psr.work); 1029 } 1030 1031 goto unlock; 1032 } 1033 1034 if (psr->enabled) 1035 intel_psr_disable_locked(intel_dp); 1036 1037 if (enable) 1038 intel_psr_enable_locked(dev_priv, crtc_state); 1039 1040 unlock: 1041 mutex_unlock(&dev_priv->psr.lock); 1042 } 1043 1044 /** 1045 * intel_psr_wait_for_idle - wait for PSR1 to idle 1046 * @new_crtc_state: new CRTC state 1047 * @out_value: PSR status in case of failure 1048 * 1049 * This function is expected to be called from pipe_update_start() where it is 1050 * not expected to race with PSR enable or disable. 1051 * 1052 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle. 1053 */ 1054 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, 1055 u32 *out_value) 1056 { 1057 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 1058 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1059 1060 if (!dev_priv->psr.enabled || !new_crtc_state->has_psr) 1061 return 0; 1062 1063 /* FIXME: Update this for PSR2 if we need to wait for idle */ 1064 if (READ_ONCE(dev_priv->psr.psr2_enabled)) 1065 return 0; 1066 1067 /* 1068 * From bspec: Panel Self Refresh (BDW+) 1069 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of 1070 * exit training time + 1.5 ms of aux channel handshake. 50 ms is 1071 * defensive enough to cover everything. 1072 */ 1073 1074 return __intel_wait_for_register(&dev_priv->uncore, 1075 EDP_PSR_STATUS(dev_priv->psr.transcoder), 1076 EDP_PSR_STATUS_STATE_MASK, 1077 EDP_PSR_STATUS_STATE_IDLE, 2, 50, 1078 out_value); 1079 } 1080 1081 static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) 1082 { 1083 i915_reg_t reg; 1084 u32 mask; 1085 int err; 1086 1087 if (!dev_priv->psr.enabled) 1088 return false; 1089 1090 if (dev_priv->psr.psr2_enabled) { 1091 reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder); 1092 mask = EDP_PSR2_STATUS_STATE_MASK; 1093 } else { 1094 reg = EDP_PSR_STATUS(dev_priv->psr.transcoder); 1095 mask = EDP_PSR_STATUS_STATE_MASK; 1096 } 1097 1098 mutex_unlock(&dev_priv->psr.lock); 1099 1100 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50); 1101 if (err) 1102 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); 1103 1104 /* After the unlocked wait, verify that PSR is still wanted! */ 1105 mutex_lock(&dev_priv->psr.lock); 1106 return err == 0 && dev_priv->psr.enabled; 1107 } 1108 1109 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) 1110 { 1111 struct drm_device *dev = &dev_priv->drm; 1112 struct drm_modeset_acquire_ctx ctx; 1113 struct drm_atomic_state *state; 1114 struct intel_crtc *crtc; 1115 int err; 1116 1117 state = drm_atomic_state_alloc(dev); 1118 if (!state) 1119 return -ENOMEM; 1120 1121 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 1122 state->acquire_ctx = &ctx; 1123 1124 retry: 1125 for_each_intel_crtc(dev, crtc) { 1126 struct intel_crtc_state *crtc_state = 1127 intel_atomic_get_crtc_state(state, crtc); 1128 1129 if (IS_ERR(crtc_state)) { 1130 err = PTR_ERR(crtc_state); 1131 goto error; 1132 } 1133 1134 if (crtc_state->hw.active && crtc_state->has_psr) { 1135 /* Mark mode as changed to trigger a pipe->update() */ 1136 crtc_state->uapi.mode_changed = true; 1137 break; 1138 } 1139 } 1140 1141 err = drm_atomic_commit(state); 1142 1143 error: 1144 if (err == -EDEADLK) { 1145 drm_atomic_state_clear(state); 1146 err = drm_modeset_backoff(&ctx); 1147 if (!err) 1148 goto retry; 1149 } 1150 1151 drm_modeset_drop_locks(&ctx); 1152 drm_modeset_acquire_fini(&ctx); 1153 drm_atomic_state_put(state); 1154 1155 return err; 1156 } 1157 1158 int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val) 1159 { 1160 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK; 1161 u32 old_mode; 1162 int ret; 1163 1164 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) || 1165 mode > I915_PSR_DEBUG_FORCE_PSR1) { 1166 DRM_DEBUG_KMS("Invalid debug mask %llx\n", val); 1167 return -EINVAL; 1168 } 1169 1170 ret = mutex_lock_interruptible(&dev_priv->psr.lock); 1171 if (ret) 1172 return ret; 1173 1174 old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK; 1175 dev_priv->psr.debug = val; 1176 1177 /* 1178 * Do it right away if it's already enabled, otherwise it will be done 1179 * when enabling the source. 1180 */ 1181 if (dev_priv->psr.enabled) 1182 psr_irq_control(dev_priv); 1183 1184 mutex_unlock(&dev_priv->psr.lock); 1185 1186 if (old_mode != mode) 1187 ret = intel_psr_fastset_force(dev_priv); 1188 1189 return ret; 1190 } 1191 1192 static void intel_psr_handle_irq(struct drm_i915_private *dev_priv) 1193 { 1194 struct i915_psr *psr = &dev_priv->psr; 1195 1196 intel_psr_disable_locked(psr->dp); 1197 psr->sink_not_reliable = true; 1198 /* let's make sure that sink is awaken */ 1199 drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 1200 } 1201 1202 static void intel_psr_work(struct work_struct *work) 1203 { 1204 struct drm_i915_private *dev_priv = 1205 container_of(work, typeof(*dev_priv), psr.work); 1206 1207 mutex_lock(&dev_priv->psr.lock); 1208 1209 if (!dev_priv->psr.enabled) 1210 goto unlock; 1211 1212 if (READ_ONCE(dev_priv->psr.irq_aux_error)) 1213 intel_psr_handle_irq(dev_priv); 1214 1215 /* 1216 * We have to make sure PSR is ready for re-enable 1217 * otherwise it keeps disabled until next full enable/disable cycle. 1218 * PSR might take some time to get fully disabled 1219 * and be ready for re-enable. 1220 */ 1221 if (!__psr_wait_for_idle_locked(dev_priv)) 1222 goto unlock; 1223 1224 /* 1225 * The delayed work can race with an invalidate hence we need to 1226 * recheck. Since psr_flush first clears this and then reschedules we 1227 * won't ever miss a flush when bailing out here. 1228 */ 1229 if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active) 1230 goto unlock; 1231 1232 intel_psr_activate(dev_priv->psr.dp); 1233 unlock: 1234 mutex_unlock(&dev_priv->psr.lock); 1235 } 1236 1237 /** 1238 * intel_psr_invalidate - Invalidade PSR 1239 * @dev_priv: i915 device 1240 * @frontbuffer_bits: frontbuffer plane tracking bits 1241 * @origin: which operation caused the invalidate 1242 * 1243 * Since the hardware frontbuffer tracking has gaps we need to integrate 1244 * with the software frontbuffer tracking. This function gets called every 1245 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be 1246 * disabled if the frontbuffer mask contains a buffer relevant to PSR. 1247 * 1248 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." 1249 */ 1250 void intel_psr_invalidate(struct drm_i915_private *dev_priv, 1251 unsigned frontbuffer_bits, enum fb_op_origin origin) 1252 { 1253 if (!CAN_PSR(dev_priv)) 1254 return; 1255 1256 if (origin == ORIGIN_FLIP) 1257 return; 1258 1259 mutex_lock(&dev_priv->psr.lock); 1260 if (!dev_priv->psr.enabled) { 1261 mutex_unlock(&dev_priv->psr.lock); 1262 return; 1263 } 1264 1265 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); 1266 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; 1267 1268 if (frontbuffer_bits) 1269 intel_psr_exit(dev_priv); 1270 1271 mutex_unlock(&dev_priv->psr.lock); 1272 } 1273 1274 /* 1275 * When we will be completely rely on PSR2 S/W tracking in future, 1276 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP 1277 * event also therefore tgl_dc3co_flush() require to be changed 1278 * accrodingly in future. 1279 */ 1280 static void 1281 tgl_dc3co_flush(struct drm_i915_private *dev_priv, 1282 unsigned int frontbuffer_bits, enum fb_op_origin origin) 1283 { 1284 u32 delay; 1285 1286 mutex_lock(&dev_priv->psr.lock); 1287 1288 if (!dev_priv->psr.dc3co_enabled) 1289 goto unlock; 1290 1291 if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active) 1292 goto unlock; 1293 1294 /* 1295 * At every frontbuffer flush flip event modified delay of delayed work, 1296 * when delayed work schedules that means display has been idle. 1297 */ 1298 if (!(frontbuffer_bits & 1299 INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe))) 1300 goto unlock; 1301 1302 tgl_psr2_enable_dc3co(dev_priv); 1303 /* DC5/DC6 required idle frames = 6 */ 1304 delay = 6 * dev_priv->psr.dc3co_exit_delay; 1305 mod_delayed_work(system_wq, &dev_priv->psr.idle_work, 1306 usecs_to_jiffies(delay)); 1307 1308 unlock: 1309 mutex_unlock(&dev_priv->psr.lock); 1310 } 1311 1312 /** 1313 * intel_psr_flush - Flush PSR 1314 * @dev_priv: i915 device 1315 * @frontbuffer_bits: frontbuffer plane tracking bits 1316 * @origin: which operation caused the flush 1317 * 1318 * Since the hardware frontbuffer tracking has gaps we need to integrate 1319 * with the software frontbuffer tracking. This function gets called every 1320 * time frontbuffer rendering has completed and flushed out to memory. PSR 1321 * can be enabled again if no other frontbuffer relevant to PSR is dirty. 1322 * 1323 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. 1324 */ 1325 void intel_psr_flush(struct drm_i915_private *dev_priv, 1326 unsigned frontbuffer_bits, enum fb_op_origin origin) 1327 { 1328 if (!CAN_PSR(dev_priv)) 1329 return; 1330 1331 if (origin == ORIGIN_FLIP) { 1332 tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin); 1333 return; 1334 } 1335 1336 mutex_lock(&dev_priv->psr.lock); 1337 if (!dev_priv->psr.enabled) { 1338 mutex_unlock(&dev_priv->psr.lock); 1339 return; 1340 } 1341 1342 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); 1343 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; 1344 1345 /* By definition flush = invalidate + flush */ 1346 if (frontbuffer_bits) 1347 psr_force_hw_tracking_exit(dev_priv); 1348 1349 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) 1350 schedule_work(&dev_priv->psr.work); 1351 mutex_unlock(&dev_priv->psr.lock); 1352 } 1353 1354 /** 1355 * intel_psr_init - Init basic PSR work and mutex. 1356 * @dev_priv: i915 device private 1357 * 1358 * This function is called only once at driver load to initialize basic 1359 * PSR stuff. 1360 */ 1361 void intel_psr_init(struct drm_i915_private *dev_priv) 1362 { 1363 if (!HAS_PSR(dev_priv)) 1364 return; 1365 1366 if (!dev_priv->psr.sink_support) 1367 return; 1368 1369 if (IS_HASWELL(dev_priv)) 1370 /* 1371 * HSW don't have PSR registers on the same space as transcoder 1372 * so set this to a value that when subtract to the register 1373 * in transcoder space results in the right offset for HSW 1374 */ 1375 dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE; 1376 1377 if (i915_modparams.enable_psr == -1) 1378 if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable) 1379 i915_modparams.enable_psr = 0; 1380 1381 /* Set link_standby x link_off defaults */ 1382 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1383 /* HSW and BDW require workarounds that we don't implement. */ 1384 dev_priv->psr.link_standby = false; 1385 else if (INTEL_GEN(dev_priv) < 12) 1386 /* For new platforms up to TGL let's respect VBT back again */ 1387 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; 1388 1389 INIT_WORK(&dev_priv->psr.work, intel_psr_work); 1390 INIT_DELAYED_WORK(&dev_priv->psr.idle_work, tgl_dc5_idle_thread); 1391 mutex_init(&dev_priv->psr.lock); 1392 } 1393 1394 static int psr_get_status_and_error_status(struct intel_dp *intel_dp, 1395 u8 *status, u8 *error_status) 1396 { 1397 struct drm_dp_aux *aux = &intel_dp->aux; 1398 int ret; 1399 1400 ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status); 1401 if (ret != 1) 1402 return ret; 1403 1404 ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status); 1405 if (ret != 1) 1406 return ret; 1407 1408 *status = *status & DP_PSR_SINK_STATE_MASK; 1409 1410 return 0; 1411 } 1412 1413 static void psr_alpm_check(struct intel_dp *intel_dp) 1414 { 1415 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1416 struct drm_dp_aux *aux = &intel_dp->aux; 1417 struct i915_psr *psr = &dev_priv->psr; 1418 u8 val; 1419 int r; 1420 1421 if (!psr->psr2_enabled) 1422 return; 1423 1424 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val); 1425 if (r != 1) { 1426 DRM_ERROR("Error reading ALPM status\n"); 1427 return; 1428 } 1429 1430 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) { 1431 intel_psr_disable_locked(intel_dp); 1432 psr->sink_not_reliable = true; 1433 DRM_DEBUG_KMS("ALPM lock timeout error, disabling PSR\n"); 1434 1435 /* Clearing error */ 1436 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val); 1437 } 1438 } 1439 1440 static void psr_capability_changed_check(struct intel_dp *intel_dp) 1441 { 1442 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1443 struct i915_psr *psr = &dev_priv->psr; 1444 u8 val; 1445 int r; 1446 1447 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val); 1448 if (r != 1) { 1449 DRM_ERROR("Error reading DP_PSR_ESI\n"); 1450 return; 1451 } 1452 1453 if (val & DP_PSR_CAPS_CHANGE) { 1454 intel_psr_disable_locked(intel_dp); 1455 psr->sink_not_reliable = true; 1456 DRM_DEBUG_KMS("Sink PSR capability changed, disabling PSR\n"); 1457 1458 /* Clearing it */ 1459 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val); 1460 } 1461 } 1462 1463 void intel_psr_short_pulse(struct intel_dp *intel_dp) 1464 { 1465 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1466 struct i915_psr *psr = &dev_priv->psr; 1467 u8 status, error_status; 1468 const u8 errors = DP_PSR_RFB_STORAGE_ERROR | 1469 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | 1470 DP_PSR_LINK_CRC_ERROR; 1471 1472 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) 1473 return; 1474 1475 mutex_lock(&psr->lock); 1476 1477 if (!psr->enabled || psr->dp != intel_dp) 1478 goto exit; 1479 1480 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) { 1481 DRM_ERROR("Error reading PSR status or error status\n"); 1482 goto exit; 1483 } 1484 1485 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) { 1486 intel_psr_disable_locked(intel_dp); 1487 psr->sink_not_reliable = true; 1488 } 1489 1490 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status) 1491 DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n"); 1492 if (error_status & DP_PSR_RFB_STORAGE_ERROR) 1493 DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n"); 1494 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) 1495 DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n"); 1496 if (error_status & DP_PSR_LINK_CRC_ERROR) 1497 DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n"); 1498 1499 if (error_status & ~errors) 1500 DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n", 1501 error_status & ~errors); 1502 /* clear status register */ 1503 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status); 1504 1505 psr_alpm_check(intel_dp); 1506 psr_capability_changed_check(intel_dp); 1507 1508 exit: 1509 mutex_unlock(&psr->lock); 1510 } 1511 1512 bool intel_psr_enabled(struct intel_dp *intel_dp) 1513 { 1514 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1515 bool ret; 1516 1517 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) 1518 return false; 1519 1520 mutex_lock(&dev_priv->psr.lock); 1521 ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled); 1522 mutex_unlock(&dev_priv->psr.lock); 1523 1524 return ret; 1525 } 1526 1527 void intel_psr_atomic_check(struct drm_connector *connector, 1528 struct drm_connector_state *old_state, 1529 struct drm_connector_state *new_state) 1530 { 1531 struct drm_i915_private *dev_priv = to_i915(connector->dev); 1532 struct intel_connector *intel_connector; 1533 struct intel_digital_port *dig_port; 1534 struct drm_crtc_state *crtc_state; 1535 1536 if (!CAN_PSR(dev_priv) || !new_state->crtc || 1537 dev_priv->psr.initially_probed) 1538 return; 1539 1540 intel_connector = to_intel_connector(connector); 1541 dig_port = enc_to_dig_port(intel_connector->encoder); 1542 if (dev_priv->psr.dp != &dig_port->dp) 1543 return; 1544 1545 crtc_state = drm_atomic_get_new_crtc_state(new_state->state, 1546 new_state->crtc); 1547 crtc_state->mode_changed = true; 1548 dev_priv->psr.initially_probed = true; 1549 } 1550