1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <drm/drm_atomic_helper.h> 25 26 #include "display/intel_dp.h" 27 28 #include "i915_drv.h" 29 #include "intel_atomic.h" 30 #include "intel_display_types.h" 31 #include "intel_psr.h" 32 #include "intel_sprite.h" 33 #include "intel_hdmi.h" 34 35 /** 36 * DOC: Panel Self Refresh (PSR/SRD) 37 * 38 * Since Haswell Display controller supports Panel Self-Refresh on display 39 * panels witch have a remote frame buffer (RFB) implemented according to PSR 40 * spec in eDP1.3. PSR feature allows the display to go to lower standby states 41 * when system is idle but display is on as it eliminates display refresh 42 * request to DDR memory completely as long as the frame buffer for that 43 * display is unchanged. 44 * 45 * Panel Self Refresh must be supported by both Hardware (source) and 46 * Panel (sink). 47 * 48 * PSR saves power by caching the framebuffer in the panel RFB, which allows us 49 * to power down the link and memory controller. For DSI panels the same idea 50 * is called "manual mode". 51 * 52 * The implementation uses the hardware-based PSR support which automatically 53 * enters/exits self-refresh mode. The hardware takes care of sending the 54 * required DP aux message and could even retrain the link (that part isn't 55 * enabled yet though). The hardware also keeps track of any frontbuffer 56 * changes to know when to exit self-refresh mode again. Unfortunately that 57 * part doesn't work too well, hence why the i915 PSR support uses the 58 * software frontbuffer tracking to make sure it doesn't miss a screen 59 * update. For this integration intel_psr_invalidate() and intel_psr_flush() 60 * get called by the frontbuffer tracking code. Note that because of locking 61 * issues the self-refresh re-enable code is done from a work queue, which 62 * must be correctly synchronized/cancelled when shutting down the pipe." 63 * 64 * DC3CO (DC3 clock off) 65 * 66 * On top of PSR2, GEN12 adds a intermediate power savings state that turns 67 * clock off automatically during PSR2 idle state. 68 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep 69 * entry/exit allows the HW to enter a low-power state even when page flipping 70 * periodically (for instance a 30fps video playback scenario). 71 * 72 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was), 73 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6 74 * frames, if no other flip occurs and the function above is executed, DC3CO is 75 * disabled and PSR2 is configured to enter deep sleep, resetting again in case 76 * of another flip. 77 * Front buffer modifications do not trigger DC3CO activation on purpose as it 78 * would bring a lot of complexity and most of the moderns systems will only 79 * use page flips. 80 */ 81 82 static bool psr_global_enabled(struct drm_i915_private *i915) 83 { 84 switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) { 85 case I915_PSR_DEBUG_DEFAULT: 86 return i915->params.enable_psr; 87 case I915_PSR_DEBUG_DISABLE: 88 return false; 89 default: 90 return true; 91 } 92 } 93 94 static bool psr2_global_enabled(struct drm_i915_private *dev_priv) 95 { 96 switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { 97 case I915_PSR_DEBUG_DISABLE: 98 case I915_PSR_DEBUG_FORCE_PSR1: 99 return false; 100 default: 101 return true; 102 } 103 } 104 105 static void psr_irq_control(struct drm_i915_private *dev_priv) 106 { 107 enum transcoder trans_shift; 108 u32 mask, val; 109 i915_reg_t imr_reg; 110 111 /* 112 * gen12+ has registers relative to transcoder and one per transcoder 113 * using the same bit definition: handle it as TRANSCODER_EDP to force 114 * 0 shift in bit definition 115 */ 116 if (INTEL_GEN(dev_priv) >= 12) { 117 trans_shift = 0; 118 imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder); 119 } else { 120 trans_shift = dev_priv->psr.transcoder; 121 imr_reg = EDP_PSR_IMR; 122 } 123 124 mask = EDP_PSR_ERROR(trans_shift); 125 if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ) 126 mask |= EDP_PSR_POST_EXIT(trans_shift) | 127 EDP_PSR_PRE_ENTRY(trans_shift); 128 129 /* Warning: it is masking/setting reserved bits too */ 130 val = intel_de_read(dev_priv, imr_reg); 131 val &= ~EDP_PSR_TRANS_MASK(trans_shift); 132 val |= ~mask; 133 intel_de_write(dev_priv, imr_reg, val); 134 } 135 136 static void psr_event_print(struct drm_i915_private *i915, 137 u32 val, bool psr2_enabled) 138 { 139 drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val); 140 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE) 141 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n"); 142 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled) 143 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n"); 144 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN) 145 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n"); 146 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN) 147 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n"); 148 if (val & PSR_EVENT_GRAPHICS_RESET) 149 drm_dbg_kms(&i915->drm, "\tGraphics reset\n"); 150 if (val & PSR_EVENT_PCH_INTERRUPT) 151 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n"); 152 if (val & PSR_EVENT_MEMORY_UP) 153 drm_dbg_kms(&i915->drm, "\tMemory up\n"); 154 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY) 155 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n"); 156 if (val & PSR_EVENT_WD_TIMER_EXPIRE) 157 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n"); 158 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE) 159 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n"); 160 if (val & PSR_EVENT_REGISTER_UPDATE) 161 drm_dbg_kms(&i915->drm, "\tRegister updated\n"); 162 if (val & PSR_EVENT_HDCP_ENABLE) 163 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n"); 164 if (val & PSR_EVENT_KVMR_SESSION_ENABLE) 165 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n"); 166 if (val & PSR_EVENT_VBI_ENABLE) 167 drm_dbg_kms(&i915->drm, "\tVBI enabled\n"); 168 if (val & PSR_EVENT_LPSP_MODE_EXIT) 169 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n"); 170 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled) 171 drm_dbg_kms(&i915->drm, "\tPSR disabled\n"); 172 } 173 174 void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) 175 { 176 enum transcoder cpu_transcoder = dev_priv->psr.transcoder; 177 enum transcoder trans_shift; 178 i915_reg_t imr_reg; 179 ktime_t time_ns = ktime_get(); 180 181 if (INTEL_GEN(dev_priv) >= 12) { 182 trans_shift = 0; 183 imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder); 184 } else { 185 trans_shift = dev_priv->psr.transcoder; 186 imr_reg = EDP_PSR_IMR; 187 } 188 189 if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) { 190 dev_priv->psr.last_entry_attempt = time_ns; 191 drm_dbg_kms(&dev_priv->drm, 192 "[transcoder %s] PSR entry attempt in 2 vblanks\n", 193 transcoder_name(cpu_transcoder)); 194 } 195 196 if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) { 197 dev_priv->psr.last_exit = time_ns; 198 drm_dbg_kms(&dev_priv->drm, 199 "[transcoder %s] PSR exit completed\n", 200 transcoder_name(cpu_transcoder)); 201 202 if (INTEL_GEN(dev_priv) >= 9) { 203 u32 val = intel_de_read(dev_priv, 204 PSR_EVENT(cpu_transcoder)); 205 bool psr2_enabled = dev_priv->psr.psr2_enabled; 206 207 intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder), 208 val); 209 psr_event_print(dev_priv, val, psr2_enabled); 210 } 211 } 212 213 if (psr_iir & EDP_PSR_ERROR(trans_shift)) { 214 u32 val; 215 216 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n", 217 transcoder_name(cpu_transcoder)); 218 219 dev_priv->psr.irq_aux_error = true; 220 221 /* 222 * If this interruption is not masked it will keep 223 * interrupting so fast that it prevents the scheduled 224 * work to run. 225 * Also after a PSR error, we don't want to arm PSR 226 * again so we don't care about unmask the interruption 227 * or unset irq_aux_error. 228 */ 229 val = intel_de_read(dev_priv, imr_reg); 230 val |= EDP_PSR_ERROR(trans_shift); 231 intel_de_write(dev_priv, imr_reg, val); 232 233 schedule_work(&dev_priv->psr.work); 234 } 235 } 236 237 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) 238 { 239 u8 alpm_caps = 0; 240 241 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, 242 &alpm_caps) != 1) 243 return false; 244 return alpm_caps & DP_ALPM_CAP; 245 } 246 247 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) 248 { 249 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 250 u8 val = 8; /* assume the worst if we can't read the value */ 251 252 if (drm_dp_dpcd_readb(&intel_dp->aux, 253 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) 254 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; 255 else 256 drm_dbg_kms(&i915->drm, 257 "Unable to get sink synchronization latency, assuming 8 frames\n"); 258 return val; 259 } 260 261 static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp) 262 { 263 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 264 u16 val; 265 ssize_t r; 266 267 /* 268 * Returning the default X granularity if granularity not required or 269 * if DPCD read fails 270 */ 271 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) 272 return 4; 273 274 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2); 275 if (r != 2) 276 drm_dbg_kms(&i915->drm, 277 "Unable to read DP_PSR2_SU_X_GRANULARITY\n"); 278 279 /* 280 * Spec says that if the value read is 0 the default granularity should 281 * be used instead. 282 */ 283 if (r != 2 || val == 0) 284 val = 4; 285 286 return val; 287 } 288 289 void intel_psr_init_dpcd(struct intel_dp *intel_dp) 290 { 291 struct drm_i915_private *dev_priv = 292 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 293 294 if (dev_priv->psr.dp) { 295 drm_warn(&dev_priv->drm, 296 "More than one eDP panel found, PSR support should be extended\n"); 297 return; 298 } 299 300 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, 301 sizeof(intel_dp->psr_dpcd)); 302 303 if (!intel_dp->psr_dpcd[0]) 304 return; 305 drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n", 306 intel_dp->psr_dpcd[0]); 307 308 if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_NO_PSR)) { 309 drm_dbg_kms(&dev_priv->drm, 310 "PSR support not currently available for this panel\n"); 311 return; 312 } 313 314 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { 315 drm_dbg_kms(&dev_priv->drm, 316 "Panel lacks power state control, PSR cannot be enabled\n"); 317 return; 318 } 319 320 dev_priv->psr.sink_support = true; 321 dev_priv->psr.sink_sync_latency = 322 intel_dp_get_sink_sync_latency(intel_dp); 323 324 dev_priv->psr.dp = intel_dp; 325 326 if (INTEL_GEN(dev_priv) >= 9 && 327 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { 328 bool y_req = intel_dp->psr_dpcd[1] & 329 DP_PSR2_SU_Y_COORDINATE_REQUIRED; 330 bool alpm = intel_dp_get_alpm_status(intel_dp); 331 332 /* 333 * All panels that supports PSR version 03h (PSR2 + 334 * Y-coordinate) can handle Y-coordinates in VSC but we are 335 * only sure that it is going to be used when required by the 336 * panel. This way panel is capable to do selective update 337 * without a aux frame sync. 338 * 339 * To support PSR version 02h and PSR version 03h without 340 * Y-coordinate requirement panels we would need to enable 341 * GTC first. 342 */ 343 dev_priv->psr.sink_psr2_support = y_req && alpm; 344 drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n", 345 dev_priv->psr.sink_psr2_support ? "" : "not "); 346 347 if (dev_priv->psr.sink_psr2_support) { 348 dev_priv->psr.colorimetry_support = 349 intel_dp_get_colorimetry_status(intel_dp); 350 dev_priv->psr.su_x_granularity = 351 intel_dp_get_su_x_granulartiy(intel_dp); 352 } 353 } 354 } 355 356 static void hsw_psr_setup_aux(struct intel_dp *intel_dp) 357 { 358 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 359 u32 aux_clock_divider, aux_ctl; 360 int i; 361 static const u8 aux_msg[] = { 362 [0] = DP_AUX_NATIVE_WRITE << 4, 363 [1] = DP_SET_POWER >> 8, 364 [2] = DP_SET_POWER & 0xff, 365 [3] = 1 - 1, 366 [4] = DP_SET_POWER_D0, 367 }; 368 u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK | 369 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | 370 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | 371 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; 372 373 BUILD_BUG_ON(sizeof(aux_msg) > 20); 374 for (i = 0; i < sizeof(aux_msg); i += 4) 375 intel_de_write(dev_priv, 376 EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2), 377 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); 378 379 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); 380 381 /* Start with bits set for DDI_AUX_CTL register */ 382 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), 383 aux_clock_divider); 384 385 /* Select only valid bits for SRD_AUX_CTL */ 386 aux_ctl &= psr_aux_mask; 387 intel_de_write(dev_priv, EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), 388 aux_ctl); 389 } 390 391 static void intel_psr_enable_sink(struct intel_dp *intel_dp) 392 { 393 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 394 u8 dpcd_val = DP_PSR_ENABLE; 395 396 /* Enable ALPM at sink for psr2 */ 397 if (dev_priv->psr.psr2_enabled) { 398 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 399 DP_ALPM_ENABLE | 400 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE); 401 402 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; 403 } else { 404 if (dev_priv->psr.link_standby) 405 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; 406 407 if (INTEL_GEN(dev_priv) >= 8) 408 dpcd_val |= DP_PSR_CRC_VERIFICATION; 409 } 410 411 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); 412 413 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 414 } 415 416 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) 417 { 418 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 419 u32 val = 0; 420 421 if (INTEL_GEN(dev_priv) >= 11) 422 val |= EDP_PSR_TP4_TIME_0US; 423 424 if (dev_priv->params.psr_safest_params) { 425 val |= EDP_PSR_TP1_TIME_2500us; 426 val |= EDP_PSR_TP2_TP3_TIME_2500us; 427 goto check_tp3_sel; 428 } 429 430 if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) 431 val |= EDP_PSR_TP1_TIME_0us; 432 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) 433 val |= EDP_PSR_TP1_TIME_100us; 434 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500) 435 val |= EDP_PSR_TP1_TIME_500us; 436 else 437 val |= EDP_PSR_TP1_TIME_2500us; 438 439 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0) 440 val |= EDP_PSR_TP2_TP3_TIME_0us; 441 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) 442 val |= EDP_PSR_TP2_TP3_TIME_100us; 443 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) 444 val |= EDP_PSR_TP2_TP3_TIME_500us; 445 else 446 val |= EDP_PSR_TP2_TP3_TIME_2500us; 447 448 check_tp3_sel: 449 if (intel_dp_source_supports_hbr2(intel_dp) && 450 drm_dp_tps3_supported(intel_dp->dpcd)) 451 val |= EDP_PSR_TP1_TP3_SEL; 452 else 453 val |= EDP_PSR_TP1_TP2_SEL; 454 455 return val; 456 } 457 458 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp) 459 { 460 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 461 int idle_frames; 462 463 /* Let's use 6 as the minimum to cover all known cases including the 464 * off-by-one issue that HW has in some cases. 465 */ 466 idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 467 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); 468 469 if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf)) 470 idle_frames = 0xf; 471 472 return idle_frames; 473 } 474 475 static void hsw_activate_psr1(struct intel_dp *intel_dp) 476 { 477 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 478 u32 max_sleep_time = 0x1f; 479 u32 val = EDP_PSR_ENABLE; 480 481 val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT; 482 483 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; 484 if (IS_HASWELL(dev_priv)) 485 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; 486 487 if (dev_priv->psr.link_standby) 488 val |= EDP_PSR_LINK_STANDBY; 489 490 val |= intel_psr1_get_tp_time(intel_dp); 491 492 if (INTEL_GEN(dev_priv) >= 8) 493 val |= EDP_PSR_CRC_ENABLE; 494 495 val |= (intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & 496 EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK); 497 intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val); 498 } 499 500 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp) 501 { 502 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 503 u32 val = 0; 504 505 if (dev_priv->params.psr_safest_params) 506 return EDP_PSR2_TP2_TIME_2500us; 507 508 if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && 509 dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) 510 val |= EDP_PSR2_TP2_TIME_50us; 511 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100) 512 val |= EDP_PSR2_TP2_TIME_100us; 513 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500) 514 val |= EDP_PSR2_TP2_TIME_500us; 515 else 516 val |= EDP_PSR2_TP2_TIME_2500us; 517 518 return val; 519 } 520 521 static void hsw_activate_psr2(struct intel_dp *intel_dp) 522 { 523 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 524 u32 val; 525 526 val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT; 527 528 val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; 529 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 530 val |= EDP_Y_COORDINATE_ENABLE; 531 532 val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); 533 val |= intel_psr2_get_tp_time(intel_dp); 534 535 if (INTEL_GEN(dev_priv) >= 12) { 536 /* 537 * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default 538 * values from BSpec. In order to setting an optimal power 539 * consumption, lower than 4k resoluition mode needs to decrese 540 * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution 541 * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE. 542 */ 543 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; 544 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7); 545 val |= TGL_EDP_PSR2_FAST_WAKE(7); 546 } else if (INTEL_GEN(dev_priv) >= 9) { 547 val |= EDP_PSR2_IO_BUFFER_WAKE(7); 548 val |= EDP_PSR2_FAST_WAKE(7); 549 } 550 551 if (dev_priv->psr.psr2_sel_fetch_enabled) { 552 /* WA 1408330847 */ 553 if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) || 554 IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)) 555 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, 556 DIS_RAM_BYPASS_PSR2_MAN_TRACK, 557 DIS_RAM_BYPASS_PSR2_MAN_TRACK); 558 559 intel_de_write(dev_priv, 560 PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder), 561 PSR2_MAN_TRK_CTL_ENABLE); 562 } else if (HAS_PSR2_SEL_FETCH(dev_priv)) { 563 intel_de_write(dev_priv, 564 PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder), 0); 565 } 566 567 /* 568 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is 569 * recommending keep this bit unset while PSR2 is enabled. 570 */ 571 intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), 0); 572 573 intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val); 574 } 575 576 static bool 577 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) 578 { 579 if (INTEL_GEN(dev_priv) < 9) 580 return false; 581 else if (INTEL_GEN(dev_priv) >= 12) 582 return trans == TRANSCODER_A; 583 else 584 return trans == TRANSCODER_EDP; 585 } 586 587 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate) 588 { 589 if (!cstate || !cstate->hw.active) 590 return 0; 591 592 return DIV_ROUND_UP(1000 * 1000, 593 drm_mode_vrefresh(&cstate->hw.adjusted_mode)); 594 } 595 596 static void psr2_program_idle_frames(struct drm_i915_private *dev_priv, 597 u32 idle_frames) 598 { 599 u32 val; 600 601 idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT; 602 val = intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)); 603 val &= ~EDP_PSR2_IDLE_FRAME_MASK; 604 val |= idle_frames; 605 intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val); 606 } 607 608 static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv) 609 { 610 psr2_program_idle_frames(dev_priv, 0); 611 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO); 612 } 613 614 static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv) 615 { 616 struct intel_dp *intel_dp = dev_priv->psr.dp; 617 618 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 619 psr2_program_idle_frames(dev_priv, psr_compute_idle_frames(intel_dp)); 620 } 621 622 static void tgl_dc3co_disable_work(struct work_struct *work) 623 { 624 struct drm_i915_private *dev_priv = 625 container_of(work, typeof(*dev_priv), psr.dc3co_work.work); 626 627 mutex_lock(&dev_priv->psr.lock); 628 /* If delayed work is pending, it is not idle */ 629 if (delayed_work_pending(&dev_priv->psr.dc3co_work)) 630 goto unlock; 631 632 tgl_psr2_disable_dc3co(dev_priv); 633 unlock: 634 mutex_unlock(&dev_priv->psr.lock); 635 } 636 637 static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv) 638 { 639 if (!dev_priv->psr.dc3co_enabled) 640 return; 641 642 cancel_delayed_work(&dev_priv->psr.dc3co_work); 643 /* Before PSR2 exit disallow dc3co*/ 644 tgl_psr2_disable_dc3co(dev_priv); 645 } 646 647 static void 648 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, 649 struct intel_crtc_state *crtc_state) 650 { 651 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay; 652 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 653 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 654 u32 exit_scanlines; 655 656 if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO)) 657 return; 658 659 /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/ 660 if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A || 661 dig_port->base.port != PORT_A) 662 return; 663 664 /* 665 * DC3CO Exit time 200us B.Spec 49196 666 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1 667 */ 668 exit_scanlines = 669 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1; 670 671 if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay)) 672 return; 673 674 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines; 675 } 676 677 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, 678 struct intel_crtc_state *crtc_state) 679 { 680 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 681 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 682 struct intel_plane_state *plane_state; 683 struct intel_plane *plane; 684 int i; 685 686 if (!dev_priv->params.enable_psr2_sel_fetch) { 687 drm_dbg_kms(&dev_priv->drm, 688 "PSR2 sel fetch not enabled, disabled by parameter\n"); 689 return false; 690 } 691 692 if (crtc_state->uapi.async_flip) { 693 drm_dbg_kms(&dev_priv->drm, 694 "PSR2 sel fetch not enabled, async flip enabled\n"); 695 return false; 696 } 697 698 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 699 if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0) { 700 drm_dbg_kms(&dev_priv->drm, 701 "PSR2 sel fetch not enabled, plane rotated\n"); 702 return false; 703 } 704 } 705 706 return crtc_state->enable_psr2_sel_fetch = true; 707 } 708 709 static bool intel_psr2_config_valid(struct intel_dp *intel_dp, 710 struct intel_crtc_state *crtc_state) 711 { 712 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 713 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; 714 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; 715 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0; 716 717 if (!dev_priv->psr.sink_psr2_support) 718 return false; 719 720 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) { 721 drm_dbg_kms(&dev_priv->drm, 722 "PSR2 not supported in transcoder %s\n", 723 transcoder_name(crtc_state->cpu_transcoder)); 724 return false; 725 } 726 727 if (!psr2_global_enabled(dev_priv)) { 728 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n"); 729 return false; 730 } 731 732 /* 733 * DSC and PSR2 cannot be enabled simultaneously. If a requested 734 * resolution requires DSC to be enabled, priority is given to DSC 735 * over PSR2. 736 */ 737 if (crtc_state->dsc.compression_enable) { 738 drm_dbg_kms(&dev_priv->drm, 739 "PSR2 cannot be enabled since DSC is enabled\n"); 740 return false; 741 } 742 743 if (crtc_state->crc_enabled) { 744 drm_dbg_kms(&dev_priv->drm, 745 "PSR2 not enabled because it would inhibit pipe CRC calculation\n"); 746 return false; 747 } 748 749 if (INTEL_GEN(dev_priv) >= 12) { 750 psr_max_h = 5120; 751 psr_max_v = 3200; 752 max_bpp = 30; 753 } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 754 psr_max_h = 4096; 755 psr_max_v = 2304; 756 max_bpp = 24; 757 } else if (IS_GEN(dev_priv, 9)) { 758 psr_max_h = 3640; 759 psr_max_v = 2304; 760 max_bpp = 24; 761 } 762 763 if (crtc_state->pipe_bpp > max_bpp) { 764 drm_dbg_kms(&dev_priv->drm, 765 "PSR2 not enabled, pipe bpp %d > max supported %d\n", 766 crtc_state->pipe_bpp, max_bpp); 767 return false; 768 } 769 770 /* 771 * HW sends SU blocks of size four scan lines, which means the starting 772 * X coordinate and Y granularity requirements will always be met. We 773 * only need to validate the SU block width is a multiple of 774 * x granularity. 775 */ 776 if (crtc_hdisplay % dev_priv->psr.su_x_granularity) { 777 drm_dbg_kms(&dev_priv->drm, 778 "PSR2 not enabled, hdisplay(%d) not multiple of %d\n", 779 crtc_hdisplay, dev_priv->psr.su_x_granularity); 780 return false; 781 } 782 783 if (HAS_PSR2_SEL_FETCH(dev_priv)) { 784 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && 785 !HAS_PSR_HW_TRACKING(dev_priv)) { 786 drm_dbg_kms(&dev_priv->drm, 787 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n"); 788 return false; 789 } 790 } 791 792 if (!crtc_state->enable_psr2_sel_fetch && 793 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) { 794 drm_dbg_kms(&dev_priv->drm, 795 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", 796 crtc_hdisplay, crtc_vdisplay, 797 psr_max_h, psr_max_v); 798 return false; 799 } 800 801 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state); 802 return true; 803 } 804 805 void intel_psr_compute_config(struct intel_dp *intel_dp, 806 struct intel_crtc_state *crtc_state) 807 { 808 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 809 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 810 const struct drm_display_mode *adjusted_mode = 811 &crtc_state->hw.adjusted_mode; 812 int psr_setup_time; 813 814 if (!CAN_PSR(dev_priv)) 815 return; 816 817 if (intel_dp != dev_priv->psr.dp) 818 return; 819 820 if (!psr_global_enabled(dev_priv)) { 821 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n"); 822 return; 823 } 824 825 /* 826 * HSW spec explicitly says PSR is tied to port A. 827 * BDW+ platforms have a instance of PSR registers per transcoder but 828 * for now it only supports one instance of PSR, so lets keep it 829 * hardcoded to PORT_A 830 */ 831 if (dig_port->base.port != PORT_A) { 832 drm_dbg_kms(&dev_priv->drm, 833 "PSR condition failed: Port not supported\n"); 834 return; 835 } 836 837 if (dev_priv->psr.sink_not_reliable) { 838 drm_dbg_kms(&dev_priv->drm, 839 "PSR sink implementation is not reliable\n"); 840 return; 841 } 842 843 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 844 drm_dbg_kms(&dev_priv->drm, 845 "PSR condition failed: Interlaced mode enabled\n"); 846 return; 847 } 848 849 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); 850 if (psr_setup_time < 0) { 851 drm_dbg_kms(&dev_priv->drm, 852 "PSR condition failed: Invalid PSR setup time (0x%02x)\n", 853 intel_dp->psr_dpcd[1]); 854 return; 855 } 856 857 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > 858 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { 859 drm_dbg_kms(&dev_priv->drm, 860 "PSR condition failed: PSR setup time (%d us) too long\n", 861 psr_setup_time); 862 return; 863 } 864 865 crtc_state->has_psr = true; 866 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); 867 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 868 } 869 870 static void intel_psr_activate(struct intel_dp *intel_dp) 871 { 872 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 873 874 if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) 875 drm_WARN_ON(&dev_priv->drm, 876 intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE); 877 878 drm_WARN_ON(&dev_priv->drm, 879 intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE); 880 drm_WARN_ON(&dev_priv->drm, dev_priv->psr.active); 881 lockdep_assert_held(&dev_priv->psr.lock); 882 883 /* psr1 and psr2 are mutually exclusive.*/ 884 if (dev_priv->psr.psr2_enabled) 885 hsw_activate_psr2(intel_dp); 886 else 887 hsw_activate_psr1(intel_dp); 888 889 dev_priv->psr.active = true; 890 } 891 892 static void intel_psr_enable_source(struct intel_dp *intel_dp, 893 const struct intel_crtc_state *crtc_state) 894 { 895 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 896 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 897 u32 mask; 898 899 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ 900 * use hardcoded values PSR AUX transactions 901 */ 902 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 903 hsw_psr_setup_aux(intel_dp); 904 905 if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) && 906 !IS_GEMINILAKE(dev_priv))) { 907 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); 908 u32 chicken = intel_de_read(dev_priv, reg); 909 910 chicken |= PSR2_VSC_ENABLE_PROG_HEADER | 911 PSR2_ADD_VERTICAL_LINE_COUNT; 912 intel_de_write(dev_priv, reg, chicken); 913 } 914 915 /* 916 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also 917 * mask LPSP to avoid dependency on other drivers that might block 918 * runtime_pm besides preventing other hw tracking issues now we 919 * can rely on frontbuffer tracking. 920 */ 921 mask = EDP_PSR_DEBUG_MASK_MEMUP | 922 EDP_PSR_DEBUG_MASK_HPD | 923 EDP_PSR_DEBUG_MASK_LPSP | 924 EDP_PSR_DEBUG_MASK_MAX_SLEEP; 925 926 if (INTEL_GEN(dev_priv) < 11) 927 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; 928 929 intel_de_write(dev_priv, EDP_PSR_DEBUG(dev_priv->psr.transcoder), 930 mask); 931 932 psr_irq_control(dev_priv); 933 934 if (crtc_state->dc3co_exitline) { 935 u32 val; 936 937 /* 938 * TODO: if future platforms supports DC3CO in more than one 939 * transcoder, EXITLINE will need to be unset when disabling PSR 940 */ 941 val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder)); 942 val &= ~EXITLINE_MASK; 943 val |= crtc_state->dc3co_exitline << EXITLINE_SHIFT; 944 val |= EXITLINE_ENABLE; 945 intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val); 946 } 947 948 if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv)) 949 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING, 950 dev_priv->psr.psr2_sel_fetch_enabled ? 951 IGNORE_PSR2_HW_TRACKING : 0); 952 } 953 954 static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, 955 const struct intel_crtc_state *crtc_state, 956 const struct drm_connector_state *conn_state) 957 { 958 struct intel_dp *intel_dp = dev_priv->psr.dp; 959 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 960 struct intel_encoder *encoder = &dig_port->base; 961 u32 val; 962 963 drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled); 964 965 dev_priv->psr.psr2_enabled = crtc_state->has_psr2; 966 dev_priv->psr.busy_frontbuffer_bits = 0; 967 dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; 968 dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline; 969 dev_priv->psr.transcoder = crtc_state->cpu_transcoder; 970 /* DC5/DC6 requires at least 6 idle frames */ 971 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6); 972 dev_priv->psr.dc3co_exit_delay = val; 973 dev_priv->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch; 974 975 /* 976 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR 977 * will still keep the error set even after the reset done in the 978 * irq_preinstall and irq_uninstall hooks. 979 * And enabling in this situation cause the screen to freeze in the 980 * first time that PSR HW tries to activate so lets keep PSR disabled 981 * to avoid any rendering problems. 982 */ 983 if (INTEL_GEN(dev_priv) >= 12) { 984 val = intel_de_read(dev_priv, 985 TRANS_PSR_IIR(dev_priv->psr.transcoder)); 986 val &= EDP_PSR_ERROR(0); 987 } else { 988 val = intel_de_read(dev_priv, EDP_PSR_IIR); 989 val &= EDP_PSR_ERROR(dev_priv->psr.transcoder); 990 } 991 if (val) { 992 dev_priv->psr.sink_not_reliable = true; 993 drm_dbg_kms(&dev_priv->drm, 994 "PSR interruption error set, not enabling PSR\n"); 995 return; 996 } 997 998 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n", 999 dev_priv->psr.psr2_enabled ? "2" : "1"); 1000 intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state, 1001 &dev_priv->psr.vsc); 1002 intel_write_dp_vsc_sdp(encoder, crtc_state, &dev_priv->psr.vsc); 1003 intel_psr_enable_sink(intel_dp); 1004 intel_psr_enable_source(intel_dp, crtc_state); 1005 dev_priv->psr.enabled = true; 1006 1007 intel_psr_activate(intel_dp); 1008 } 1009 1010 /** 1011 * intel_psr_enable - Enable PSR 1012 * @intel_dp: Intel DP 1013 * @crtc_state: new CRTC state 1014 * @conn_state: new CONNECTOR state 1015 * 1016 * This function can only be called after the pipe is fully trained and enabled. 1017 */ 1018 void intel_psr_enable(struct intel_dp *intel_dp, 1019 const struct intel_crtc_state *crtc_state, 1020 const struct drm_connector_state *conn_state) 1021 { 1022 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1023 1024 if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp) 1025 return; 1026 1027 dev_priv->psr.force_mode_changed = false; 1028 1029 if (!crtc_state->has_psr) 1030 return; 1031 1032 drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp); 1033 1034 mutex_lock(&dev_priv->psr.lock); 1035 intel_psr_enable_locked(dev_priv, crtc_state, conn_state); 1036 mutex_unlock(&dev_priv->psr.lock); 1037 } 1038 1039 static void intel_psr_exit(struct drm_i915_private *dev_priv) 1040 { 1041 u32 val; 1042 1043 if (!dev_priv->psr.active) { 1044 if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) { 1045 val = intel_de_read(dev_priv, 1046 EDP_PSR2_CTL(dev_priv->psr.transcoder)); 1047 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE); 1048 } 1049 1050 val = intel_de_read(dev_priv, 1051 EDP_PSR_CTL(dev_priv->psr.transcoder)); 1052 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE); 1053 1054 return; 1055 } 1056 1057 if (dev_priv->psr.psr2_enabled) { 1058 tgl_disallow_dc3co_on_psr2_exit(dev_priv); 1059 val = intel_de_read(dev_priv, 1060 EDP_PSR2_CTL(dev_priv->psr.transcoder)); 1061 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE)); 1062 val &= ~EDP_PSR2_ENABLE; 1063 intel_de_write(dev_priv, 1064 EDP_PSR2_CTL(dev_priv->psr.transcoder), val); 1065 } else { 1066 val = intel_de_read(dev_priv, 1067 EDP_PSR_CTL(dev_priv->psr.transcoder)); 1068 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE)); 1069 val &= ~EDP_PSR_ENABLE; 1070 intel_de_write(dev_priv, 1071 EDP_PSR_CTL(dev_priv->psr.transcoder), val); 1072 } 1073 dev_priv->psr.active = false; 1074 } 1075 1076 static void intel_psr_disable_locked(struct intel_dp *intel_dp) 1077 { 1078 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1079 i915_reg_t psr_status; 1080 u32 psr_status_mask; 1081 1082 lockdep_assert_held(&dev_priv->psr.lock); 1083 1084 if (!dev_priv->psr.enabled) 1085 return; 1086 1087 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n", 1088 dev_priv->psr.psr2_enabled ? "2" : "1"); 1089 1090 intel_psr_exit(dev_priv); 1091 1092 if (dev_priv->psr.psr2_enabled) { 1093 psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder); 1094 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; 1095 } else { 1096 psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder); 1097 psr_status_mask = EDP_PSR_STATUS_STATE_MASK; 1098 } 1099 1100 /* Wait till PSR is idle */ 1101 if (intel_de_wait_for_clear(dev_priv, psr_status, 1102 psr_status_mask, 2000)) 1103 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n"); 1104 1105 /* WA 1408330847 */ 1106 if (dev_priv->psr.psr2_sel_fetch_enabled && 1107 (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) || 1108 IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))) 1109 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, 1110 DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0); 1111 1112 /* Disable PSR on Sink */ 1113 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); 1114 1115 if (dev_priv->psr.psr2_enabled) 1116 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0); 1117 1118 dev_priv->psr.enabled = false; 1119 } 1120 1121 /** 1122 * intel_psr_disable - Disable PSR 1123 * @intel_dp: Intel DP 1124 * @old_crtc_state: old CRTC state 1125 * 1126 * This function needs to be called before disabling pipe. 1127 */ 1128 void intel_psr_disable(struct intel_dp *intel_dp, 1129 const struct intel_crtc_state *old_crtc_state) 1130 { 1131 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1132 1133 if (!old_crtc_state->has_psr) 1134 return; 1135 1136 if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(dev_priv))) 1137 return; 1138 1139 mutex_lock(&dev_priv->psr.lock); 1140 1141 intel_psr_disable_locked(intel_dp); 1142 1143 mutex_unlock(&dev_priv->psr.lock); 1144 cancel_work_sync(&dev_priv->psr.work); 1145 cancel_delayed_work_sync(&dev_priv->psr.dc3co_work); 1146 } 1147 1148 static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) 1149 { 1150 if (IS_TIGERLAKE(dev_priv)) 1151 /* 1152 * Writes to CURSURFLIVE in TGL are causing IOMMU errors and 1153 * visual glitches that are often reproduced when executing 1154 * CPU intensive workloads while a eDP 4K panel is attached. 1155 * 1156 * Manually exiting PSR causes the frontbuffer to be updated 1157 * without glitches and the IOMMU errors are also gone but 1158 * this comes at the cost of less time with PSR active. 1159 * 1160 * So using this workaround until this issue is root caused 1161 * and a better fix is found. 1162 */ 1163 intel_psr_exit(dev_priv); 1164 else if (INTEL_GEN(dev_priv) >= 9) 1165 /* 1166 * Display WA #0884: skl+ 1167 * This documented WA for bxt can be safely applied 1168 * broadly so we can force HW tracking to exit PSR 1169 * instead of disabling and re-enabling. 1170 * Workaround tells us to write 0 to CUR_SURFLIVE_A, 1171 * but it makes more sense write to the current active 1172 * pipe. 1173 */ 1174 intel_de_write(dev_priv, CURSURFLIVE(dev_priv->psr.pipe), 0); 1175 else 1176 /* 1177 * A write to CURSURFLIVE do not cause HW tracking to exit PSR 1178 * on older gens so doing the manual exit instead. 1179 */ 1180 intel_psr_exit(dev_priv); 1181 } 1182 1183 void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, 1184 const struct intel_crtc_state *crtc_state, 1185 const struct intel_plane_state *plane_state, 1186 int color_plane) 1187 { 1188 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1189 enum pipe pipe = plane->pipe; 1190 u32 val; 1191 1192 if (!crtc_state->enable_psr2_sel_fetch) 1193 return; 1194 1195 val = plane_state ? plane_state->ctl : 0; 1196 val &= plane->id == PLANE_CURSOR ? val : PLANE_SEL_FETCH_CTL_ENABLE; 1197 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), val); 1198 if (!val || plane->id == PLANE_CURSOR) 1199 return; 1200 1201 val = plane_state->uapi.dst.y1 << 16 | plane_state->uapi.dst.x1; 1202 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val); 1203 1204 val = plane_state->color_plane[color_plane].y << 16; 1205 val |= plane_state->color_plane[color_plane].x; 1206 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id), 1207 val); 1208 1209 /* Sizes are 0 based */ 1210 val = ((drm_rect_height(&plane_state->uapi.src) >> 16) - 1) << 16; 1211 val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; 1212 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val); 1213 } 1214 1215 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state) 1216 { 1217 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1218 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1219 struct i915_psr *psr = &dev_priv->psr; 1220 1221 if (!HAS_PSR2_SEL_FETCH(dev_priv) || 1222 !crtc_state->enable_psr2_sel_fetch) 1223 return; 1224 1225 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(psr->transcoder), 1226 crtc_state->psr2_man_track_ctl); 1227 } 1228 1229 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, 1230 struct drm_rect *clip, bool full_update) 1231 { 1232 u32 val = PSR2_MAN_TRK_CTL_ENABLE; 1233 1234 if (full_update) { 1235 val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME; 1236 goto exit; 1237 } 1238 1239 if (clip->y1 == -1) 1240 goto exit; 1241 1242 val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE; 1243 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1); 1244 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(DIV_ROUND_UP(clip->y2, 4) + 1); 1245 exit: 1246 crtc_state->psr2_man_track_ctl = val; 1247 } 1248 1249 static void clip_area_update(struct drm_rect *overlap_damage_area, 1250 struct drm_rect *damage_area) 1251 { 1252 if (overlap_damage_area->y1 == -1) { 1253 overlap_damage_area->y1 = damage_area->y1; 1254 overlap_damage_area->y2 = damage_area->y2; 1255 return; 1256 } 1257 1258 if (damage_area->y1 < overlap_damage_area->y1) 1259 overlap_damage_area->y1 = damage_area->y1; 1260 1261 if (damage_area->y2 > overlap_damage_area->y2) 1262 overlap_damage_area->y2 = damage_area->y2; 1263 } 1264 1265 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, 1266 struct intel_crtc *crtc) 1267 { 1268 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 1269 struct intel_plane_state *new_plane_state, *old_plane_state; 1270 struct drm_rect pipe_clip = { .y1 = -1 }; 1271 struct intel_plane *plane; 1272 bool full_update = false; 1273 int i, ret; 1274 1275 if (!crtc_state->enable_psr2_sel_fetch) 1276 return 0; 1277 1278 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 1279 if (ret) 1280 return ret; 1281 1282 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 1283 new_plane_state, i) { 1284 struct drm_rect temp; 1285 1286 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc) 1287 continue; 1288 1289 /* 1290 * TODO: Not clear how to handle planes with negative position, 1291 * also planes are not updated if they have a negative X 1292 * position so for now doing a full update in this cases 1293 */ 1294 if (new_plane_state->uapi.dst.y1 < 0 || 1295 new_plane_state->uapi.dst.x1 < 0) { 1296 full_update = true; 1297 break; 1298 } 1299 1300 if (!new_plane_state->uapi.visible) 1301 continue; 1302 1303 /* 1304 * For now doing a selective fetch in the whole plane area, 1305 * optimizations will come in the future. 1306 */ 1307 temp.y1 = new_plane_state->uapi.dst.y1; 1308 temp.y2 = new_plane_state->uapi.dst.y2; 1309 clip_area_update(&pipe_clip, &temp); 1310 } 1311 1312 psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update); 1313 return 0; 1314 } 1315 1316 /** 1317 * intel_psr_update - Update PSR state 1318 * @intel_dp: Intel DP 1319 * @crtc_state: new CRTC state 1320 * @conn_state: new CONNECTOR state 1321 * 1322 * This functions will update PSR states, disabling, enabling or switching PSR 1323 * version when executing fastsets. For full modeset, intel_psr_disable() and 1324 * intel_psr_enable() should be called instead. 1325 */ 1326 void intel_psr_update(struct intel_dp *intel_dp, 1327 const struct intel_crtc_state *crtc_state, 1328 const struct drm_connector_state *conn_state) 1329 { 1330 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1331 struct i915_psr *psr = &dev_priv->psr; 1332 bool enable, psr2_enable; 1333 1334 if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp) 1335 return; 1336 1337 dev_priv->psr.force_mode_changed = false; 1338 1339 mutex_lock(&dev_priv->psr.lock); 1340 1341 enable = crtc_state->has_psr; 1342 psr2_enable = crtc_state->has_psr2; 1343 1344 if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) { 1345 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ 1346 if (crtc_state->crc_enabled && psr->enabled) 1347 psr_force_hw_tracking_exit(dev_priv); 1348 else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) { 1349 /* 1350 * Activate PSR again after a force exit when enabling 1351 * CRC in older gens 1352 */ 1353 if (!dev_priv->psr.active && 1354 !dev_priv->psr.busy_frontbuffer_bits) 1355 schedule_work(&dev_priv->psr.work); 1356 } 1357 1358 goto unlock; 1359 } 1360 1361 if (psr->enabled) 1362 intel_psr_disable_locked(intel_dp); 1363 1364 if (enable) 1365 intel_psr_enable_locked(dev_priv, crtc_state, conn_state); 1366 1367 unlock: 1368 mutex_unlock(&dev_priv->psr.lock); 1369 } 1370 1371 /** 1372 * intel_psr_wait_for_idle - wait for PSR1 to idle 1373 * @new_crtc_state: new CRTC state 1374 * @out_value: PSR status in case of failure 1375 * 1376 * This function is expected to be called from pipe_update_start() where it is 1377 * not expected to race with PSR enable or disable. 1378 * 1379 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle. 1380 */ 1381 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, 1382 u32 *out_value) 1383 { 1384 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 1385 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1386 1387 if (!dev_priv->psr.enabled || !new_crtc_state->has_psr) 1388 return 0; 1389 1390 /* FIXME: Update this for PSR2 if we need to wait for idle */ 1391 if (READ_ONCE(dev_priv->psr.psr2_enabled)) 1392 return 0; 1393 1394 /* 1395 * From bspec: Panel Self Refresh (BDW+) 1396 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of 1397 * exit training time + 1.5 ms of aux channel handshake. 50 ms is 1398 * defensive enough to cover everything. 1399 */ 1400 1401 return __intel_wait_for_register(&dev_priv->uncore, 1402 EDP_PSR_STATUS(dev_priv->psr.transcoder), 1403 EDP_PSR_STATUS_STATE_MASK, 1404 EDP_PSR_STATUS_STATE_IDLE, 2, 50, 1405 out_value); 1406 } 1407 1408 static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) 1409 { 1410 i915_reg_t reg; 1411 u32 mask; 1412 int err; 1413 1414 if (!dev_priv->psr.enabled) 1415 return false; 1416 1417 if (dev_priv->psr.psr2_enabled) { 1418 reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder); 1419 mask = EDP_PSR2_STATUS_STATE_MASK; 1420 } else { 1421 reg = EDP_PSR_STATUS(dev_priv->psr.transcoder); 1422 mask = EDP_PSR_STATUS_STATE_MASK; 1423 } 1424 1425 mutex_unlock(&dev_priv->psr.lock); 1426 1427 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50); 1428 if (err) 1429 drm_err(&dev_priv->drm, 1430 "Timed out waiting for PSR Idle for re-enable\n"); 1431 1432 /* After the unlocked wait, verify that PSR is still wanted! */ 1433 mutex_lock(&dev_priv->psr.lock); 1434 return err == 0 && dev_priv->psr.enabled; 1435 } 1436 1437 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) 1438 { 1439 struct drm_connector_list_iter conn_iter; 1440 struct drm_device *dev = &dev_priv->drm; 1441 struct drm_modeset_acquire_ctx ctx; 1442 struct drm_atomic_state *state; 1443 struct drm_connector *conn; 1444 int err = 0; 1445 1446 state = drm_atomic_state_alloc(dev); 1447 if (!state) 1448 return -ENOMEM; 1449 1450 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 1451 state->acquire_ctx = &ctx; 1452 1453 retry: 1454 1455 drm_connector_list_iter_begin(dev, &conn_iter); 1456 drm_for_each_connector_iter(conn, &conn_iter) { 1457 struct drm_connector_state *conn_state; 1458 struct drm_crtc_state *crtc_state; 1459 1460 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP) 1461 continue; 1462 1463 conn_state = drm_atomic_get_connector_state(state, conn); 1464 if (IS_ERR(conn_state)) { 1465 err = PTR_ERR(conn_state); 1466 break; 1467 } 1468 1469 if (!conn_state->crtc) 1470 continue; 1471 1472 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc); 1473 if (IS_ERR(crtc_state)) { 1474 err = PTR_ERR(crtc_state); 1475 break; 1476 } 1477 1478 /* Mark mode as changed to trigger a pipe->update() */ 1479 crtc_state->mode_changed = true; 1480 } 1481 drm_connector_list_iter_end(&conn_iter); 1482 1483 if (err == 0) 1484 err = drm_atomic_commit(state); 1485 1486 if (err == -EDEADLK) { 1487 drm_atomic_state_clear(state); 1488 err = drm_modeset_backoff(&ctx); 1489 if (!err) 1490 goto retry; 1491 } 1492 1493 drm_modeset_drop_locks(&ctx); 1494 drm_modeset_acquire_fini(&ctx); 1495 drm_atomic_state_put(state); 1496 1497 return err; 1498 } 1499 1500 int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val) 1501 { 1502 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK; 1503 u32 old_mode; 1504 int ret; 1505 1506 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) || 1507 mode > I915_PSR_DEBUG_FORCE_PSR1) { 1508 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val); 1509 return -EINVAL; 1510 } 1511 1512 ret = mutex_lock_interruptible(&dev_priv->psr.lock); 1513 if (ret) 1514 return ret; 1515 1516 old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK; 1517 dev_priv->psr.debug = val; 1518 1519 /* 1520 * Do it right away if it's already enabled, otherwise it will be done 1521 * when enabling the source. 1522 */ 1523 if (dev_priv->psr.enabled) 1524 psr_irq_control(dev_priv); 1525 1526 mutex_unlock(&dev_priv->psr.lock); 1527 1528 if (old_mode != mode) 1529 ret = intel_psr_fastset_force(dev_priv); 1530 1531 return ret; 1532 } 1533 1534 static void intel_psr_handle_irq(struct drm_i915_private *dev_priv) 1535 { 1536 struct i915_psr *psr = &dev_priv->psr; 1537 1538 intel_psr_disable_locked(psr->dp); 1539 psr->sink_not_reliable = true; 1540 /* let's make sure that sink is awaken */ 1541 drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 1542 } 1543 1544 static void intel_psr_work(struct work_struct *work) 1545 { 1546 struct drm_i915_private *dev_priv = 1547 container_of(work, typeof(*dev_priv), psr.work); 1548 1549 mutex_lock(&dev_priv->psr.lock); 1550 1551 if (!dev_priv->psr.enabled) 1552 goto unlock; 1553 1554 if (READ_ONCE(dev_priv->psr.irq_aux_error)) 1555 intel_psr_handle_irq(dev_priv); 1556 1557 /* 1558 * We have to make sure PSR is ready for re-enable 1559 * otherwise it keeps disabled until next full enable/disable cycle. 1560 * PSR might take some time to get fully disabled 1561 * and be ready for re-enable. 1562 */ 1563 if (!__psr_wait_for_idle_locked(dev_priv)) 1564 goto unlock; 1565 1566 /* 1567 * The delayed work can race with an invalidate hence we need to 1568 * recheck. Since psr_flush first clears this and then reschedules we 1569 * won't ever miss a flush when bailing out here. 1570 */ 1571 if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active) 1572 goto unlock; 1573 1574 intel_psr_activate(dev_priv->psr.dp); 1575 unlock: 1576 mutex_unlock(&dev_priv->psr.lock); 1577 } 1578 1579 /** 1580 * intel_psr_invalidate - Invalidade PSR 1581 * @dev_priv: i915 device 1582 * @frontbuffer_bits: frontbuffer plane tracking bits 1583 * @origin: which operation caused the invalidate 1584 * 1585 * Since the hardware frontbuffer tracking has gaps we need to integrate 1586 * with the software frontbuffer tracking. This function gets called every 1587 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be 1588 * disabled if the frontbuffer mask contains a buffer relevant to PSR. 1589 * 1590 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." 1591 */ 1592 void intel_psr_invalidate(struct drm_i915_private *dev_priv, 1593 unsigned frontbuffer_bits, enum fb_op_origin origin) 1594 { 1595 if (!CAN_PSR(dev_priv)) 1596 return; 1597 1598 if (origin == ORIGIN_FLIP) 1599 return; 1600 1601 mutex_lock(&dev_priv->psr.lock); 1602 if (!dev_priv->psr.enabled) { 1603 mutex_unlock(&dev_priv->psr.lock); 1604 return; 1605 } 1606 1607 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); 1608 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; 1609 1610 if (frontbuffer_bits) 1611 intel_psr_exit(dev_priv); 1612 1613 mutex_unlock(&dev_priv->psr.lock); 1614 } 1615 1616 /* 1617 * When we will be completely rely on PSR2 S/W tracking in future, 1618 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP 1619 * event also therefore tgl_dc3co_flush() require to be changed 1620 * accordingly in future. 1621 */ 1622 static void 1623 tgl_dc3co_flush(struct drm_i915_private *dev_priv, 1624 unsigned int frontbuffer_bits, enum fb_op_origin origin) 1625 { 1626 mutex_lock(&dev_priv->psr.lock); 1627 1628 if (!dev_priv->psr.dc3co_enabled) 1629 goto unlock; 1630 1631 if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active) 1632 goto unlock; 1633 1634 /* 1635 * At every frontbuffer flush flip event modified delay of delayed work, 1636 * when delayed work schedules that means display has been idle. 1637 */ 1638 if (!(frontbuffer_bits & 1639 INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe))) 1640 goto unlock; 1641 1642 tgl_psr2_enable_dc3co(dev_priv); 1643 mod_delayed_work(system_wq, &dev_priv->psr.dc3co_work, 1644 dev_priv->psr.dc3co_exit_delay); 1645 1646 unlock: 1647 mutex_unlock(&dev_priv->psr.lock); 1648 } 1649 1650 /** 1651 * intel_psr_flush - Flush PSR 1652 * @dev_priv: i915 device 1653 * @frontbuffer_bits: frontbuffer plane tracking bits 1654 * @origin: which operation caused the flush 1655 * 1656 * Since the hardware frontbuffer tracking has gaps we need to integrate 1657 * with the software frontbuffer tracking. This function gets called every 1658 * time frontbuffer rendering has completed and flushed out to memory. PSR 1659 * can be enabled again if no other frontbuffer relevant to PSR is dirty. 1660 * 1661 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. 1662 */ 1663 void intel_psr_flush(struct drm_i915_private *dev_priv, 1664 unsigned frontbuffer_bits, enum fb_op_origin origin) 1665 { 1666 if (!CAN_PSR(dev_priv)) 1667 return; 1668 1669 if (origin == ORIGIN_FLIP) { 1670 tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin); 1671 return; 1672 } 1673 1674 mutex_lock(&dev_priv->psr.lock); 1675 if (!dev_priv->psr.enabled) { 1676 mutex_unlock(&dev_priv->psr.lock); 1677 return; 1678 } 1679 1680 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); 1681 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; 1682 1683 /* By definition flush = invalidate + flush */ 1684 if (frontbuffer_bits) 1685 psr_force_hw_tracking_exit(dev_priv); 1686 1687 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) 1688 schedule_work(&dev_priv->psr.work); 1689 mutex_unlock(&dev_priv->psr.lock); 1690 } 1691 1692 /** 1693 * intel_psr_init - Init basic PSR work and mutex. 1694 * @dev_priv: i915 device private 1695 * 1696 * This function is called only once at driver load to initialize basic 1697 * PSR stuff. 1698 */ 1699 void intel_psr_init(struct drm_i915_private *dev_priv) 1700 { 1701 if (!HAS_PSR(dev_priv)) 1702 return; 1703 1704 if (!dev_priv->psr.sink_support) 1705 return; 1706 1707 if (IS_HASWELL(dev_priv)) 1708 /* 1709 * HSW don't have PSR registers on the same space as transcoder 1710 * so set this to a value that when subtract to the register 1711 * in transcoder space results in the right offset for HSW 1712 */ 1713 dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE; 1714 1715 if (dev_priv->params.enable_psr == -1) 1716 if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable) 1717 dev_priv->params.enable_psr = 0; 1718 1719 /* Set link_standby x link_off defaults */ 1720 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1721 /* HSW and BDW require workarounds that we don't implement. */ 1722 dev_priv->psr.link_standby = false; 1723 else if (INTEL_GEN(dev_priv) < 12) 1724 /* For new platforms up to TGL let's respect VBT back again */ 1725 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; 1726 1727 INIT_WORK(&dev_priv->psr.work, intel_psr_work); 1728 INIT_DELAYED_WORK(&dev_priv->psr.dc3co_work, tgl_dc3co_disable_work); 1729 mutex_init(&dev_priv->psr.lock); 1730 } 1731 1732 static int psr_get_status_and_error_status(struct intel_dp *intel_dp, 1733 u8 *status, u8 *error_status) 1734 { 1735 struct drm_dp_aux *aux = &intel_dp->aux; 1736 int ret; 1737 1738 ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status); 1739 if (ret != 1) 1740 return ret; 1741 1742 ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status); 1743 if (ret != 1) 1744 return ret; 1745 1746 *status = *status & DP_PSR_SINK_STATE_MASK; 1747 1748 return 0; 1749 } 1750 1751 static void psr_alpm_check(struct intel_dp *intel_dp) 1752 { 1753 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1754 struct drm_dp_aux *aux = &intel_dp->aux; 1755 struct i915_psr *psr = &dev_priv->psr; 1756 u8 val; 1757 int r; 1758 1759 if (!psr->psr2_enabled) 1760 return; 1761 1762 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val); 1763 if (r != 1) { 1764 drm_err(&dev_priv->drm, "Error reading ALPM status\n"); 1765 return; 1766 } 1767 1768 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) { 1769 intel_psr_disable_locked(intel_dp); 1770 psr->sink_not_reliable = true; 1771 drm_dbg_kms(&dev_priv->drm, 1772 "ALPM lock timeout error, disabling PSR\n"); 1773 1774 /* Clearing error */ 1775 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val); 1776 } 1777 } 1778 1779 static void psr_capability_changed_check(struct intel_dp *intel_dp) 1780 { 1781 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1782 struct i915_psr *psr = &dev_priv->psr; 1783 u8 val; 1784 int r; 1785 1786 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val); 1787 if (r != 1) { 1788 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n"); 1789 return; 1790 } 1791 1792 if (val & DP_PSR_CAPS_CHANGE) { 1793 intel_psr_disable_locked(intel_dp); 1794 psr->sink_not_reliable = true; 1795 drm_dbg_kms(&dev_priv->drm, 1796 "Sink PSR capability changed, disabling PSR\n"); 1797 1798 /* Clearing it */ 1799 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val); 1800 } 1801 } 1802 1803 void intel_psr_short_pulse(struct intel_dp *intel_dp) 1804 { 1805 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1806 struct i915_psr *psr = &dev_priv->psr; 1807 u8 status, error_status; 1808 const u8 errors = DP_PSR_RFB_STORAGE_ERROR | 1809 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | 1810 DP_PSR_LINK_CRC_ERROR; 1811 1812 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) 1813 return; 1814 1815 mutex_lock(&psr->lock); 1816 1817 if (!psr->enabled || psr->dp != intel_dp) 1818 goto exit; 1819 1820 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) { 1821 drm_err(&dev_priv->drm, 1822 "Error reading PSR status or error status\n"); 1823 goto exit; 1824 } 1825 1826 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) { 1827 intel_psr_disable_locked(intel_dp); 1828 psr->sink_not_reliable = true; 1829 } 1830 1831 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status) 1832 drm_dbg_kms(&dev_priv->drm, 1833 "PSR sink internal error, disabling PSR\n"); 1834 if (error_status & DP_PSR_RFB_STORAGE_ERROR) 1835 drm_dbg_kms(&dev_priv->drm, 1836 "PSR RFB storage error, disabling PSR\n"); 1837 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) 1838 drm_dbg_kms(&dev_priv->drm, 1839 "PSR VSC SDP uncorrectable error, disabling PSR\n"); 1840 if (error_status & DP_PSR_LINK_CRC_ERROR) 1841 drm_dbg_kms(&dev_priv->drm, 1842 "PSR Link CRC error, disabling PSR\n"); 1843 1844 if (error_status & ~errors) 1845 drm_err(&dev_priv->drm, 1846 "PSR_ERROR_STATUS unhandled errors %x\n", 1847 error_status & ~errors); 1848 /* clear status register */ 1849 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status); 1850 1851 psr_alpm_check(intel_dp); 1852 psr_capability_changed_check(intel_dp); 1853 1854 exit: 1855 mutex_unlock(&psr->lock); 1856 } 1857 1858 bool intel_psr_enabled(struct intel_dp *intel_dp) 1859 { 1860 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1861 bool ret; 1862 1863 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) 1864 return false; 1865 1866 mutex_lock(&dev_priv->psr.lock); 1867 ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled); 1868 mutex_unlock(&dev_priv->psr.lock); 1869 1870 return ret; 1871 } 1872 1873 void intel_psr_atomic_check(struct drm_connector *connector, 1874 struct drm_connector_state *old_state, 1875 struct drm_connector_state *new_state) 1876 { 1877 struct drm_i915_private *dev_priv = to_i915(connector->dev); 1878 struct intel_connector *intel_connector; 1879 struct intel_digital_port *dig_port; 1880 struct drm_crtc_state *crtc_state; 1881 1882 if (!CAN_PSR(dev_priv) || !new_state->crtc || 1883 !dev_priv->psr.force_mode_changed) 1884 return; 1885 1886 intel_connector = to_intel_connector(connector); 1887 dig_port = enc_to_dig_port(to_intel_encoder(new_state->best_encoder)); 1888 if (dev_priv->psr.dp != &dig_port->dp) 1889 return; 1890 1891 crtc_state = drm_atomic_get_new_crtc_state(new_state->state, 1892 new_state->crtc); 1893 crtc_state->mode_changed = true; 1894 } 1895 1896 void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp) 1897 { 1898 struct drm_i915_private *dev_priv; 1899 1900 if (!intel_dp) 1901 return; 1902 1903 dev_priv = dp_to_i915(intel_dp); 1904 if (!CAN_PSR(dev_priv) || intel_dp != dev_priv->psr.dp) 1905 return; 1906 1907 dev_priv->psr.force_mode_changed = true; 1908 } 1909