1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <drm/drm_atomic_helper.h> 25 26 #include "display/intel_dp.h" 27 28 #include "i915_drv.h" 29 #include "intel_atomic.h" 30 #include "intel_de.h" 31 #include "intel_display_types.h" 32 #include "intel_dp_aux.h" 33 #include "intel_hdmi.h" 34 #include "intel_psr.h" 35 #include "intel_sprite.h" 36 #include "skl_universal_plane.h" 37 38 /** 39 * DOC: Panel Self Refresh (PSR/SRD) 40 * 41 * Since Haswell Display controller supports Panel Self-Refresh on display 42 * panels witch have a remote frame buffer (RFB) implemented according to PSR 43 * spec in eDP1.3. PSR feature allows the display to go to lower standby states 44 * when system is idle but display is on as it eliminates display refresh 45 * request to DDR memory completely as long as the frame buffer for that 46 * display is unchanged. 47 * 48 * Panel Self Refresh must be supported by both Hardware (source) and 49 * Panel (sink). 50 * 51 * PSR saves power by caching the framebuffer in the panel RFB, which allows us 52 * to power down the link and memory controller. For DSI panels the same idea 53 * is called "manual mode". 54 * 55 * The implementation uses the hardware-based PSR support which automatically 56 * enters/exits self-refresh mode. The hardware takes care of sending the 57 * required DP aux message and could even retrain the link (that part isn't 58 * enabled yet though). The hardware also keeps track of any frontbuffer 59 * changes to know when to exit self-refresh mode again. Unfortunately that 60 * part doesn't work too well, hence why the i915 PSR support uses the 61 * software frontbuffer tracking to make sure it doesn't miss a screen 62 * update. For this integration intel_psr_invalidate() and intel_psr_flush() 63 * get called by the frontbuffer tracking code. Note that because of locking 64 * issues the self-refresh re-enable code is done from a work queue, which 65 * must be correctly synchronized/cancelled when shutting down the pipe." 66 * 67 * DC3CO (DC3 clock off) 68 * 69 * On top of PSR2, GEN12 adds a intermediate power savings state that turns 70 * clock off automatically during PSR2 idle state. 71 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep 72 * entry/exit allows the HW to enter a low-power state even when page flipping 73 * periodically (for instance a 30fps video playback scenario). 74 * 75 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was), 76 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6 77 * frames, if no other flip occurs and the function above is executed, DC3CO is 78 * disabled and PSR2 is configured to enter deep sleep, resetting again in case 79 * of another flip. 80 * Front buffer modifications do not trigger DC3CO activation on purpose as it 81 * would bring a lot of complexity and most of the moderns systems will only 82 * use page flips. 83 */ 84 85 static bool psr_global_enabled(struct intel_dp *intel_dp) 86 { 87 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 88 89 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { 90 case I915_PSR_DEBUG_DEFAULT: 91 return i915->params.enable_psr; 92 case I915_PSR_DEBUG_DISABLE: 93 return false; 94 default: 95 return true; 96 } 97 } 98 99 static bool psr2_global_enabled(struct intel_dp *intel_dp) 100 { 101 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { 102 case I915_PSR_DEBUG_DISABLE: 103 case I915_PSR_DEBUG_FORCE_PSR1: 104 return false; 105 default: 106 return true; 107 } 108 } 109 110 static void psr_irq_control(struct intel_dp *intel_dp) 111 { 112 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 113 enum transcoder trans_shift; 114 i915_reg_t imr_reg; 115 u32 mask, val; 116 117 /* 118 * gen12+ has registers relative to transcoder and one per transcoder 119 * using the same bit definition: handle it as TRANSCODER_EDP to force 120 * 0 shift in bit definition 121 */ 122 if (DISPLAY_VER(dev_priv) >= 12) { 123 trans_shift = 0; 124 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder); 125 } else { 126 trans_shift = intel_dp->psr.transcoder; 127 imr_reg = EDP_PSR_IMR; 128 } 129 130 mask = EDP_PSR_ERROR(trans_shift); 131 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ) 132 mask |= EDP_PSR_POST_EXIT(trans_shift) | 133 EDP_PSR_PRE_ENTRY(trans_shift); 134 135 /* Warning: it is masking/setting reserved bits too */ 136 val = intel_de_read(dev_priv, imr_reg); 137 val &= ~EDP_PSR_TRANS_MASK(trans_shift); 138 val |= ~mask; 139 intel_de_write(dev_priv, imr_reg, val); 140 } 141 142 static void psr_event_print(struct drm_i915_private *i915, 143 u32 val, bool psr2_enabled) 144 { 145 drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val); 146 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE) 147 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n"); 148 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled) 149 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n"); 150 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN) 151 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n"); 152 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN) 153 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n"); 154 if (val & PSR_EVENT_GRAPHICS_RESET) 155 drm_dbg_kms(&i915->drm, "\tGraphics reset\n"); 156 if (val & PSR_EVENT_PCH_INTERRUPT) 157 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n"); 158 if (val & PSR_EVENT_MEMORY_UP) 159 drm_dbg_kms(&i915->drm, "\tMemory up\n"); 160 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY) 161 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n"); 162 if (val & PSR_EVENT_WD_TIMER_EXPIRE) 163 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n"); 164 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE) 165 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n"); 166 if (val & PSR_EVENT_REGISTER_UPDATE) 167 drm_dbg_kms(&i915->drm, "\tRegister updated\n"); 168 if (val & PSR_EVENT_HDCP_ENABLE) 169 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n"); 170 if (val & PSR_EVENT_KVMR_SESSION_ENABLE) 171 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n"); 172 if (val & PSR_EVENT_VBI_ENABLE) 173 drm_dbg_kms(&i915->drm, "\tVBI enabled\n"); 174 if (val & PSR_EVENT_LPSP_MODE_EXIT) 175 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n"); 176 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled) 177 drm_dbg_kms(&i915->drm, "\tPSR disabled\n"); 178 } 179 180 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) 181 { 182 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 183 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 184 ktime_t time_ns = ktime_get(); 185 enum transcoder trans_shift; 186 i915_reg_t imr_reg; 187 188 if (DISPLAY_VER(dev_priv) >= 12) { 189 trans_shift = 0; 190 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder); 191 } else { 192 trans_shift = intel_dp->psr.transcoder; 193 imr_reg = EDP_PSR_IMR; 194 } 195 196 if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) { 197 intel_dp->psr.last_entry_attempt = time_ns; 198 drm_dbg_kms(&dev_priv->drm, 199 "[transcoder %s] PSR entry attempt in 2 vblanks\n", 200 transcoder_name(cpu_transcoder)); 201 } 202 203 if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) { 204 intel_dp->psr.last_exit = time_ns; 205 drm_dbg_kms(&dev_priv->drm, 206 "[transcoder %s] PSR exit completed\n", 207 transcoder_name(cpu_transcoder)); 208 209 if (DISPLAY_VER(dev_priv) >= 9) { 210 u32 val = intel_de_read(dev_priv, 211 PSR_EVENT(cpu_transcoder)); 212 bool psr2_enabled = intel_dp->psr.psr2_enabled; 213 214 intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder), 215 val); 216 psr_event_print(dev_priv, val, psr2_enabled); 217 } 218 } 219 220 if (psr_iir & EDP_PSR_ERROR(trans_shift)) { 221 u32 val; 222 223 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n", 224 transcoder_name(cpu_transcoder)); 225 226 intel_dp->psr.irq_aux_error = true; 227 228 /* 229 * If this interruption is not masked it will keep 230 * interrupting so fast that it prevents the scheduled 231 * work to run. 232 * Also after a PSR error, we don't want to arm PSR 233 * again so we don't care about unmask the interruption 234 * or unset irq_aux_error. 235 */ 236 val = intel_de_read(dev_priv, imr_reg); 237 val |= EDP_PSR_ERROR(trans_shift); 238 intel_de_write(dev_priv, imr_reg, val); 239 240 schedule_work(&intel_dp->psr.work); 241 } 242 } 243 244 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) 245 { 246 u8 alpm_caps = 0; 247 248 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, 249 &alpm_caps) != 1) 250 return false; 251 return alpm_caps & DP_ALPM_CAP; 252 } 253 254 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) 255 { 256 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 257 u8 val = 8; /* assume the worst if we can't read the value */ 258 259 if (drm_dp_dpcd_readb(&intel_dp->aux, 260 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) 261 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; 262 else 263 drm_dbg_kms(&i915->drm, 264 "Unable to get sink synchronization latency, assuming 8 frames\n"); 265 return val; 266 } 267 268 static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp) 269 { 270 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 271 u16 val; 272 ssize_t r; 273 274 /* 275 * Returning the default X granularity if granularity not required or 276 * if DPCD read fails 277 */ 278 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) 279 return 4; 280 281 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2); 282 if (r != 2) 283 drm_dbg_kms(&i915->drm, 284 "Unable to read DP_PSR2_SU_X_GRANULARITY\n"); 285 286 /* 287 * Spec says that if the value read is 0 the default granularity should 288 * be used instead. 289 */ 290 if (r != 2 || val == 0) 291 val = 4; 292 293 return val; 294 } 295 296 void intel_psr_init_dpcd(struct intel_dp *intel_dp) 297 { 298 struct drm_i915_private *dev_priv = 299 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 300 301 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, 302 sizeof(intel_dp->psr_dpcd)); 303 304 if (!intel_dp->psr_dpcd[0]) 305 return; 306 drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n", 307 intel_dp->psr_dpcd[0]); 308 309 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { 310 drm_dbg_kms(&dev_priv->drm, 311 "PSR support not currently available for this panel\n"); 312 return; 313 } 314 315 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { 316 drm_dbg_kms(&dev_priv->drm, 317 "Panel lacks power state control, PSR cannot be enabled\n"); 318 return; 319 } 320 321 intel_dp->psr.sink_support = true; 322 intel_dp->psr.sink_sync_latency = 323 intel_dp_get_sink_sync_latency(intel_dp); 324 325 if (DISPLAY_VER(dev_priv) >= 9 && 326 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { 327 bool y_req = intel_dp->psr_dpcd[1] & 328 DP_PSR2_SU_Y_COORDINATE_REQUIRED; 329 bool alpm = intel_dp_get_alpm_status(intel_dp); 330 331 /* 332 * All panels that supports PSR version 03h (PSR2 + 333 * Y-coordinate) can handle Y-coordinates in VSC but we are 334 * only sure that it is going to be used when required by the 335 * panel. This way panel is capable to do selective update 336 * without a aux frame sync. 337 * 338 * To support PSR version 02h and PSR version 03h without 339 * Y-coordinate requirement panels we would need to enable 340 * GTC first. 341 */ 342 intel_dp->psr.sink_psr2_support = y_req && alpm; 343 drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n", 344 intel_dp->psr.sink_psr2_support ? "" : "not "); 345 346 if (intel_dp->psr.sink_psr2_support) { 347 intel_dp->psr.colorimetry_support = 348 intel_dp_get_colorimetry_status(intel_dp); 349 intel_dp->psr.su_x_granularity = 350 intel_dp_get_su_x_granulartiy(intel_dp); 351 } 352 } 353 } 354 355 static void hsw_psr_setup_aux(struct intel_dp *intel_dp) 356 { 357 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 358 u32 aux_clock_divider, aux_ctl; 359 int i; 360 static const u8 aux_msg[] = { 361 [0] = DP_AUX_NATIVE_WRITE << 4, 362 [1] = DP_SET_POWER >> 8, 363 [2] = DP_SET_POWER & 0xff, 364 [3] = 1 - 1, 365 [4] = DP_SET_POWER_D0, 366 }; 367 u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK | 368 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | 369 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | 370 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; 371 372 BUILD_BUG_ON(sizeof(aux_msg) > 20); 373 for (i = 0; i < sizeof(aux_msg); i += 4) 374 intel_de_write(dev_priv, 375 EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2), 376 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); 377 378 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); 379 380 /* Start with bits set for DDI_AUX_CTL register */ 381 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), 382 aux_clock_divider); 383 384 /* Select only valid bits for SRD_AUX_CTL */ 385 aux_ctl &= psr_aux_mask; 386 intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder), 387 aux_ctl); 388 } 389 390 static void intel_psr_enable_sink(struct intel_dp *intel_dp) 391 { 392 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 393 u8 dpcd_val = DP_PSR_ENABLE; 394 395 /* Enable ALPM at sink for psr2 */ 396 if (intel_dp->psr.psr2_enabled) { 397 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 398 DP_ALPM_ENABLE | 399 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE); 400 401 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; 402 } else { 403 if (intel_dp->psr.link_standby) 404 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; 405 406 if (DISPLAY_VER(dev_priv) >= 8) 407 dpcd_val |= DP_PSR_CRC_VERIFICATION; 408 } 409 410 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); 411 412 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 413 } 414 415 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) 416 { 417 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 418 u32 val = 0; 419 420 if (DISPLAY_VER(dev_priv) >= 11) 421 val |= EDP_PSR_TP4_TIME_0US; 422 423 if (dev_priv->params.psr_safest_params) { 424 val |= EDP_PSR_TP1_TIME_2500us; 425 val |= EDP_PSR_TP2_TP3_TIME_2500us; 426 goto check_tp3_sel; 427 } 428 429 if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) 430 val |= EDP_PSR_TP1_TIME_0us; 431 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) 432 val |= EDP_PSR_TP1_TIME_100us; 433 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500) 434 val |= EDP_PSR_TP1_TIME_500us; 435 else 436 val |= EDP_PSR_TP1_TIME_2500us; 437 438 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0) 439 val |= EDP_PSR_TP2_TP3_TIME_0us; 440 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) 441 val |= EDP_PSR_TP2_TP3_TIME_100us; 442 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) 443 val |= EDP_PSR_TP2_TP3_TIME_500us; 444 else 445 val |= EDP_PSR_TP2_TP3_TIME_2500us; 446 447 check_tp3_sel: 448 if (intel_dp_source_supports_hbr2(intel_dp) && 449 drm_dp_tps3_supported(intel_dp->dpcd)) 450 val |= EDP_PSR_TP1_TP3_SEL; 451 else 452 val |= EDP_PSR_TP1_TP2_SEL; 453 454 return val; 455 } 456 457 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp) 458 { 459 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 460 int idle_frames; 461 462 /* Let's use 6 as the minimum to cover all known cases including the 463 * off-by-one issue that HW has in some cases. 464 */ 465 idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 466 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1); 467 468 if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf)) 469 idle_frames = 0xf; 470 471 return idle_frames; 472 } 473 474 static void hsw_activate_psr1(struct intel_dp *intel_dp) 475 { 476 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 477 u32 max_sleep_time = 0x1f; 478 u32 val = EDP_PSR_ENABLE; 479 480 val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT; 481 482 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; 483 if (IS_HASWELL(dev_priv)) 484 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; 485 486 if (intel_dp->psr.link_standby) 487 val |= EDP_PSR_LINK_STANDBY; 488 489 val |= intel_psr1_get_tp_time(intel_dp); 490 491 if (DISPLAY_VER(dev_priv) >= 8) 492 val |= EDP_PSR_CRC_ENABLE; 493 494 val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) & 495 EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK); 496 intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val); 497 } 498 499 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp) 500 { 501 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 502 u32 val = 0; 503 504 if (dev_priv->params.psr_safest_params) 505 return EDP_PSR2_TP2_TIME_2500us; 506 507 if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && 508 dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) 509 val |= EDP_PSR2_TP2_TIME_50us; 510 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100) 511 val |= EDP_PSR2_TP2_TIME_100us; 512 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500) 513 val |= EDP_PSR2_TP2_TIME_500us; 514 else 515 val |= EDP_PSR2_TP2_TIME_2500us; 516 517 return val; 518 } 519 520 static void hsw_activate_psr2(struct intel_dp *intel_dp) 521 { 522 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 523 u32 val; 524 525 val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT; 526 527 val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; 528 if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12) 529 val |= EDP_Y_COORDINATE_ENABLE; 530 531 val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1); 532 val |= intel_psr2_get_tp_time(intel_dp); 533 534 if (DISPLAY_VER(dev_priv) >= 12) { 535 /* 536 * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default 537 * values from BSpec. In order to setting an optimal power 538 * consumption, lower than 4k resoluition mode needs to decrese 539 * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution 540 * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE. 541 */ 542 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; 543 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7); 544 val |= TGL_EDP_PSR2_FAST_WAKE(7); 545 } else if (DISPLAY_VER(dev_priv) >= 9) { 546 val |= EDP_PSR2_IO_BUFFER_WAKE(7); 547 val |= EDP_PSR2_FAST_WAKE(7); 548 } 549 550 if (intel_dp->psr.psr2_sel_fetch_enabled) { 551 /* WA 1408330847 */ 552 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) || 553 IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)) 554 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, 555 DIS_RAM_BYPASS_PSR2_MAN_TRACK, 556 DIS_RAM_BYPASS_PSR2_MAN_TRACK); 557 558 intel_de_write(dev_priv, 559 PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 560 PSR2_MAN_TRK_CTL_ENABLE); 561 } else if (HAS_PSR2_SEL_FETCH(dev_priv)) { 562 intel_de_write(dev_priv, 563 PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0); 564 } 565 566 /* 567 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is 568 * recommending keep this bit unset while PSR2 is enabled. 569 */ 570 intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0); 571 572 intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val); 573 } 574 575 static bool 576 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) 577 { 578 if (DISPLAY_VER(dev_priv) < 9) 579 return false; 580 else if (DISPLAY_VER(dev_priv) >= 12) 581 return trans == TRANSCODER_A; 582 else 583 return trans == TRANSCODER_EDP; 584 } 585 586 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate) 587 { 588 if (!cstate || !cstate->hw.active) 589 return 0; 590 591 return DIV_ROUND_UP(1000 * 1000, 592 drm_mode_vrefresh(&cstate->hw.adjusted_mode)); 593 } 594 595 static void psr2_program_idle_frames(struct intel_dp *intel_dp, 596 u32 idle_frames) 597 { 598 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 599 u32 val; 600 601 idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT; 602 val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder)); 603 val &= ~EDP_PSR2_IDLE_FRAME_MASK; 604 val |= idle_frames; 605 intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val); 606 } 607 608 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp) 609 { 610 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 611 612 psr2_program_idle_frames(intel_dp, 0); 613 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO); 614 } 615 616 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp) 617 { 618 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 619 620 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 621 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp)); 622 } 623 624 static void tgl_dc3co_disable_work(struct work_struct *work) 625 { 626 struct intel_dp *intel_dp = 627 container_of(work, typeof(*intel_dp), psr.dc3co_work.work); 628 629 mutex_lock(&intel_dp->psr.lock); 630 /* If delayed work is pending, it is not idle */ 631 if (delayed_work_pending(&intel_dp->psr.dc3co_work)) 632 goto unlock; 633 634 tgl_psr2_disable_dc3co(intel_dp); 635 unlock: 636 mutex_unlock(&intel_dp->psr.lock); 637 } 638 639 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp) 640 { 641 if (!intel_dp->psr.dc3co_enabled) 642 return; 643 644 cancel_delayed_work(&intel_dp->psr.dc3co_work); 645 /* Before PSR2 exit disallow dc3co*/ 646 tgl_psr2_disable_dc3co(intel_dp); 647 } 648 649 static void 650 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, 651 struct intel_crtc_state *crtc_state) 652 { 653 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay; 654 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 655 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 656 u32 exit_scanlines; 657 658 /* 659 * FIXME: Due to the changed sequence of activating/deactivating DC3CO, 660 * disable DC3CO until the changed dc3co activating/deactivating sequence 661 * is applied. B.Specs:49196 662 */ 663 return; 664 665 /* 666 * DMC's DC3CO exit mechanism has an issue with Selective Fecth 667 * TODO: when the issue is addressed, this restriction should be removed. 668 */ 669 if (crtc_state->enable_psr2_sel_fetch) 670 return; 671 672 if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO)) 673 return; 674 675 /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/ 676 if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A || 677 dig_port->base.port != PORT_A) 678 return; 679 680 /* 681 * DC3CO Exit time 200us B.Spec 49196 682 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1 683 */ 684 exit_scanlines = 685 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1; 686 687 if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay)) 688 return; 689 690 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines; 691 } 692 693 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, 694 struct intel_crtc_state *crtc_state) 695 { 696 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 697 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 698 struct intel_plane_state *plane_state; 699 struct intel_plane *plane; 700 int i; 701 702 if (!dev_priv->params.enable_psr2_sel_fetch && 703 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) { 704 drm_dbg_kms(&dev_priv->drm, 705 "PSR2 sel fetch not enabled, disabled by parameter\n"); 706 return false; 707 } 708 709 if (crtc_state->uapi.async_flip) { 710 drm_dbg_kms(&dev_priv->drm, 711 "PSR2 sel fetch not enabled, async flip enabled\n"); 712 return false; 713 } 714 715 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 716 if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0) { 717 drm_dbg_kms(&dev_priv->drm, 718 "PSR2 sel fetch not enabled, plane rotated\n"); 719 return false; 720 } 721 } 722 723 /* Wa_14010254185 Wa_14010103792 */ 724 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B1)) { 725 drm_dbg_kms(&dev_priv->drm, 726 "PSR2 sel fetch not enabled, missing the implementation of WAs\n"); 727 return false; 728 } 729 730 return crtc_state->enable_psr2_sel_fetch = true; 731 } 732 733 static bool intel_psr2_config_valid(struct intel_dp *intel_dp, 734 struct intel_crtc_state *crtc_state) 735 { 736 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 737 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; 738 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; 739 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0; 740 741 if (!intel_dp->psr.sink_psr2_support) 742 return false; 743 744 /* JSL and EHL only supports eDP 1.3 */ 745 if (IS_JSL_EHL(dev_priv)) { 746 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n"); 747 return false; 748 } 749 750 /* Wa_16011181250 */ 751 if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv)) { 752 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n"); 753 return false; 754 } 755 756 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) { 757 drm_dbg_kms(&dev_priv->drm, 758 "PSR2 not supported in transcoder %s\n", 759 transcoder_name(crtc_state->cpu_transcoder)); 760 return false; 761 } 762 763 if (!psr2_global_enabled(intel_dp)) { 764 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n"); 765 return false; 766 } 767 768 /* 769 * DSC and PSR2 cannot be enabled simultaneously. If a requested 770 * resolution requires DSC to be enabled, priority is given to DSC 771 * over PSR2. 772 */ 773 if (crtc_state->dsc.compression_enable) { 774 drm_dbg_kms(&dev_priv->drm, 775 "PSR2 cannot be enabled since DSC is enabled\n"); 776 return false; 777 } 778 779 if (crtc_state->crc_enabled) { 780 drm_dbg_kms(&dev_priv->drm, 781 "PSR2 not enabled because it would inhibit pipe CRC calculation\n"); 782 return false; 783 } 784 785 if (DISPLAY_VER(dev_priv) >= 12) { 786 psr_max_h = 5120; 787 psr_max_v = 3200; 788 max_bpp = 30; 789 } else if (DISPLAY_VER(dev_priv) >= 10) { 790 psr_max_h = 4096; 791 psr_max_v = 2304; 792 max_bpp = 24; 793 } else if (DISPLAY_VER(dev_priv) == 9) { 794 psr_max_h = 3640; 795 psr_max_v = 2304; 796 max_bpp = 24; 797 } 798 799 if (crtc_state->pipe_bpp > max_bpp) { 800 drm_dbg_kms(&dev_priv->drm, 801 "PSR2 not enabled, pipe bpp %d > max supported %d\n", 802 crtc_state->pipe_bpp, max_bpp); 803 return false; 804 } 805 806 /* 807 * HW sends SU blocks of size four scan lines, which means the starting 808 * X coordinate and Y granularity requirements will always be met. We 809 * only need to validate the SU block width is a multiple of 810 * x granularity. 811 */ 812 if (crtc_hdisplay % intel_dp->psr.su_x_granularity) { 813 drm_dbg_kms(&dev_priv->drm, 814 "PSR2 not enabled, hdisplay(%d) not multiple of %d\n", 815 crtc_hdisplay, intel_dp->psr.su_x_granularity); 816 return false; 817 } 818 819 if (HAS_PSR2_SEL_FETCH(dev_priv)) { 820 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && 821 !HAS_PSR_HW_TRACKING(dev_priv)) { 822 drm_dbg_kms(&dev_priv->drm, 823 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n"); 824 return false; 825 } 826 } 827 828 /* Wa_2209313811 */ 829 if (!crtc_state->enable_psr2_sel_fetch && 830 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B1)) { 831 drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n"); 832 return false; 833 } 834 835 if (!crtc_state->enable_psr2_sel_fetch && 836 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) { 837 drm_dbg_kms(&dev_priv->drm, 838 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", 839 crtc_hdisplay, crtc_vdisplay, 840 psr_max_h, psr_max_v); 841 return false; 842 } 843 844 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state); 845 return true; 846 } 847 848 void intel_psr_compute_config(struct intel_dp *intel_dp, 849 struct intel_crtc_state *crtc_state) 850 { 851 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 852 const struct drm_display_mode *adjusted_mode = 853 &crtc_state->hw.adjusted_mode; 854 int psr_setup_time; 855 856 /* 857 * Current PSR panels dont work reliably with VRR enabled 858 * So if VRR is enabled, do not enable PSR. 859 */ 860 if (crtc_state->vrr.enable) 861 return; 862 863 if (!CAN_PSR(intel_dp)) 864 return; 865 866 if (!psr_global_enabled(intel_dp)) { 867 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n"); 868 return; 869 } 870 871 if (intel_dp->psr.sink_not_reliable) { 872 drm_dbg_kms(&dev_priv->drm, 873 "PSR sink implementation is not reliable\n"); 874 return; 875 } 876 877 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 878 drm_dbg_kms(&dev_priv->drm, 879 "PSR condition failed: Interlaced mode enabled\n"); 880 return; 881 } 882 883 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); 884 if (psr_setup_time < 0) { 885 drm_dbg_kms(&dev_priv->drm, 886 "PSR condition failed: Invalid PSR setup time (0x%02x)\n", 887 intel_dp->psr_dpcd[1]); 888 return; 889 } 890 891 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > 892 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { 893 drm_dbg_kms(&dev_priv->drm, 894 "PSR condition failed: PSR setup time (%d us) too long\n", 895 psr_setup_time); 896 return; 897 } 898 899 crtc_state->has_psr = true; 900 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); 901 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 902 } 903 904 void intel_psr_get_config(struct intel_encoder *encoder, 905 struct intel_crtc_state *pipe_config) 906 { 907 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 908 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 909 struct intel_dp *intel_dp; 910 u32 val; 911 912 if (!dig_port) 913 return; 914 915 intel_dp = &dig_port->dp; 916 if (!CAN_PSR(intel_dp)) 917 return; 918 919 mutex_lock(&intel_dp->psr.lock); 920 if (!intel_dp->psr.enabled) 921 goto unlock; 922 923 /* 924 * Not possible to read EDP_PSR/PSR2_CTL registers as it is 925 * enabled/disabled because of frontbuffer tracking and others. 926 */ 927 pipe_config->has_psr = true; 928 pipe_config->has_psr2 = intel_dp->psr.psr2_enabled; 929 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 930 931 if (!intel_dp->psr.psr2_enabled) 932 goto unlock; 933 934 if (HAS_PSR2_SEL_FETCH(dev_priv)) { 935 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder)); 936 if (val & PSR2_MAN_TRK_CTL_ENABLE) 937 pipe_config->enable_psr2_sel_fetch = true; 938 } 939 940 if (DISPLAY_VER(dev_priv) >= 12) { 941 val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder)); 942 val &= EXITLINE_MASK; 943 pipe_config->dc3co_exitline = val; 944 } 945 unlock: 946 mutex_unlock(&intel_dp->psr.lock); 947 } 948 949 static void intel_psr_activate(struct intel_dp *intel_dp) 950 { 951 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 952 enum transcoder transcoder = intel_dp->psr.transcoder; 953 954 if (transcoder_has_psr2(dev_priv, transcoder)) 955 drm_WARN_ON(&dev_priv->drm, 956 intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE); 957 958 drm_WARN_ON(&dev_priv->drm, 959 intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE); 960 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active); 961 lockdep_assert_held(&intel_dp->psr.lock); 962 963 /* psr1 and psr2 are mutually exclusive.*/ 964 if (intel_dp->psr.psr2_enabled) 965 hsw_activate_psr2(intel_dp); 966 else 967 hsw_activate_psr1(intel_dp); 968 969 intel_dp->psr.active = true; 970 } 971 972 static void intel_psr_enable_source(struct intel_dp *intel_dp, 973 const struct intel_crtc_state *crtc_state) 974 { 975 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 976 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 977 u32 mask; 978 979 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ 980 * use hardcoded values PSR AUX transactions 981 */ 982 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 983 hsw_psr_setup_aux(intel_dp); 984 985 if (intel_dp->psr.psr2_enabled && DISPLAY_VER(dev_priv) == 9) { 986 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); 987 u32 chicken = intel_de_read(dev_priv, reg); 988 989 chicken |= PSR2_VSC_ENABLE_PROG_HEADER | 990 PSR2_ADD_VERTICAL_LINE_COUNT; 991 intel_de_write(dev_priv, reg, chicken); 992 } 993 994 /* 995 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also 996 * mask LPSP to avoid dependency on other drivers that might block 997 * runtime_pm besides preventing other hw tracking issues now we 998 * can rely on frontbuffer tracking. 999 */ 1000 mask = EDP_PSR_DEBUG_MASK_MEMUP | 1001 EDP_PSR_DEBUG_MASK_HPD | 1002 EDP_PSR_DEBUG_MASK_LPSP | 1003 EDP_PSR_DEBUG_MASK_MAX_SLEEP; 1004 1005 if (DISPLAY_VER(dev_priv) < 11) 1006 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; 1007 1008 intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder), 1009 mask); 1010 1011 psr_irq_control(intel_dp); 1012 1013 if (crtc_state->dc3co_exitline) { 1014 u32 val; 1015 1016 /* 1017 * TODO: if future platforms supports DC3CO in more than one 1018 * transcoder, EXITLINE will need to be unset when disabling PSR 1019 */ 1020 val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder)); 1021 val &= ~EXITLINE_MASK; 1022 val |= crtc_state->dc3co_exitline << EXITLINE_SHIFT; 1023 val |= EXITLINE_ENABLE; 1024 intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val); 1025 } 1026 1027 if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv)) 1028 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING, 1029 intel_dp->psr.psr2_sel_fetch_enabled ? 1030 IGNORE_PSR2_HW_TRACKING : 0); 1031 } 1032 1033 static void intel_psr_enable_locked(struct intel_dp *intel_dp, 1034 const struct intel_crtc_state *crtc_state, 1035 const struct drm_connector_state *conn_state) 1036 { 1037 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1038 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1039 struct intel_encoder *encoder = &dig_port->base; 1040 u32 val; 1041 1042 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled); 1043 1044 intel_dp->psr.psr2_enabled = crtc_state->has_psr2; 1045 intel_dp->psr.busy_frontbuffer_bits = 0; 1046 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; 1047 intel_dp->psr.dc3co_enabled = !!crtc_state->dc3co_exitline; 1048 intel_dp->psr.transcoder = crtc_state->cpu_transcoder; 1049 /* DC5/DC6 requires at least 6 idle frames */ 1050 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6); 1051 intel_dp->psr.dc3co_exit_delay = val; 1052 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch; 1053 1054 /* 1055 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR 1056 * will still keep the error set even after the reset done in the 1057 * irq_preinstall and irq_uninstall hooks. 1058 * And enabling in this situation cause the screen to freeze in the 1059 * first time that PSR HW tries to activate so lets keep PSR disabled 1060 * to avoid any rendering problems. 1061 */ 1062 if (DISPLAY_VER(dev_priv) >= 12) { 1063 val = intel_de_read(dev_priv, 1064 TRANS_PSR_IIR(intel_dp->psr.transcoder)); 1065 val &= EDP_PSR_ERROR(0); 1066 } else { 1067 val = intel_de_read(dev_priv, EDP_PSR_IIR); 1068 val &= EDP_PSR_ERROR(intel_dp->psr.transcoder); 1069 } 1070 if (val) { 1071 intel_dp->psr.sink_not_reliable = true; 1072 drm_dbg_kms(&dev_priv->drm, 1073 "PSR interruption error set, not enabling PSR\n"); 1074 return; 1075 } 1076 1077 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n", 1078 intel_dp->psr.psr2_enabled ? "2" : "1"); 1079 intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state, 1080 &intel_dp->psr.vsc); 1081 intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc); 1082 intel_psr_enable_sink(intel_dp); 1083 intel_psr_enable_source(intel_dp, crtc_state); 1084 intel_dp->psr.enabled = true; 1085 1086 intel_psr_activate(intel_dp); 1087 } 1088 1089 /** 1090 * intel_psr_enable - Enable PSR 1091 * @intel_dp: Intel DP 1092 * @crtc_state: new CRTC state 1093 * @conn_state: new CONNECTOR state 1094 * 1095 * This function can only be called after the pipe is fully trained and enabled. 1096 */ 1097 void intel_psr_enable(struct intel_dp *intel_dp, 1098 const struct intel_crtc_state *crtc_state, 1099 const struct drm_connector_state *conn_state) 1100 { 1101 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1102 1103 if (!CAN_PSR(intel_dp)) 1104 return; 1105 1106 if (!crtc_state->has_psr) 1107 return; 1108 1109 drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp); 1110 1111 mutex_lock(&intel_dp->psr.lock); 1112 intel_psr_enable_locked(intel_dp, crtc_state, conn_state); 1113 mutex_unlock(&intel_dp->psr.lock); 1114 } 1115 1116 static void intel_psr_exit(struct intel_dp *intel_dp) 1117 { 1118 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1119 u32 val; 1120 1121 if (!intel_dp->psr.active) { 1122 if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) { 1123 val = intel_de_read(dev_priv, 1124 EDP_PSR2_CTL(intel_dp->psr.transcoder)); 1125 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE); 1126 } 1127 1128 val = intel_de_read(dev_priv, 1129 EDP_PSR_CTL(intel_dp->psr.transcoder)); 1130 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE); 1131 1132 return; 1133 } 1134 1135 if (intel_dp->psr.psr2_enabled) { 1136 tgl_disallow_dc3co_on_psr2_exit(intel_dp); 1137 val = intel_de_read(dev_priv, 1138 EDP_PSR2_CTL(intel_dp->psr.transcoder)); 1139 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE)); 1140 val &= ~EDP_PSR2_ENABLE; 1141 intel_de_write(dev_priv, 1142 EDP_PSR2_CTL(intel_dp->psr.transcoder), val); 1143 } else { 1144 val = intel_de_read(dev_priv, 1145 EDP_PSR_CTL(intel_dp->psr.transcoder)); 1146 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE)); 1147 val &= ~EDP_PSR_ENABLE; 1148 intel_de_write(dev_priv, 1149 EDP_PSR_CTL(intel_dp->psr.transcoder), val); 1150 } 1151 intel_dp->psr.active = false; 1152 } 1153 1154 static void intel_psr_disable_locked(struct intel_dp *intel_dp) 1155 { 1156 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1157 i915_reg_t psr_status; 1158 u32 psr_status_mask; 1159 1160 lockdep_assert_held(&intel_dp->psr.lock); 1161 1162 if (!intel_dp->psr.enabled) 1163 return; 1164 1165 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n", 1166 intel_dp->psr.psr2_enabled ? "2" : "1"); 1167 1168 intel_psr_exit(intel_dp); 1169 1170 if (intel_dp->psr.psr2_enabled) { 1171 psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder); 1172 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; 1173 } else { 1174 psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder); 1175 psr_status_mask = EDP_PSR_STATUS_STATE_MASK; 1176 } 1177 1178 /* Wait till PSR is idle */ 1179 if (intel_de_wait_for_clear(dev_priv, psr_status, 1180 psr_status_mask, 2000)) 1181 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n"); 1182 1183 /* WA 1408330847 */ 1184 if (intel_dp->psr.psr2_sel_fetch_enabled && 1185 (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) || 1186 IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))) 1187 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, 1188 DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0); 1189 1190 /* Disable PSR on Sink */ 1191 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); 1192 1193 if (intel_dp->psr.psr2_enabled) 1194 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0); 1195 1196 intel_dp->psr.enabled = false; 1197 } 1198 1199 /** 1200 * intel_psr_disable - Disable PSR 1201 * @intel_dp: Intel DP 1202 * @old_crtc_state: old CRTC state 1203 * 1204 * This function needs to be called before disabling pipe. 1205 */ 1206 void intel_psr_disable(struct intel_dp *intel_dp, 1207 const struct intel_crtc_state *old_crtc_state) 1208 { 1209 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1210 1211 if (!old_crtc_state->has_psr) 1212 return; 1213 1214 if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp))) 1215 return; 1216 1217 mutex_lock(&intel_dp->psr.lock); 1218 1219 intel_psr_disable_locked(intel_dp); 1220 1221 mutex_unlock(&intel_dp->psr.lock); 1222 cancel_work_sync(&intel_dp->psr.work); 1223 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work); 1224 } 1225 1226 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp) 1227 { 1228 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1229 1230 if (DISPLAY_VER(dev_priv) >= 9) 1231 /* 1232 * Display WA #0884: skl+ 1233 * This documented WA for bxt can be safely applied 1234 * broadly so we can force HW tracking to exit PSR 1235 * instead of disabling and re-enabling. 1236 * Workaround tells us to write 0 to CUR_SURFLIVE_A, 1237 * but it makes more sense write to the current active 1238 * pipe. 1239 */ 1240 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); 1241 else 1242 /* 1243 * A write to CURSURFLIVE do not cause HW tracking to exit PSR 1244 * on older gens so doing the manual exit instead. 1245 */ 1246 intel_psr_exit(intel_dp); 1247 } 1248 1249 void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, 1250 const struct intel_crtc_state *crtc_state, 1251 const struct intel_plane_state *plane_state, 1252 int color_plane) 1253 { 1254 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1255 enum pipe pipe = plane->pipe; 1256 const struct drm_rect *clip; 1257 u32 val, offset; 1258 int ret, x, y; 1259 1260 if (!crtc_state->enable_psr2_sel_fetch) 1261 return; 1262 1263 val = plane_state ? plane_state->ctl : 0; 1264 val &= plane->id == PLANE_CURSOR ? val : PLANE_SEL_FETCH_CTL_ENABLE; 1265 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), val); 1266 if (!val || plane->id == PLANE_CURSOR) 1267 return; 1268 1269 clip = &plane_state->psr2_sel_fetch_area; 1270 1271 val = (clip->y1 + plane_state->uapi.dst.y1) << 16; 1272 val |= plane_state->uapi.dst.x1; 1273 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val); 1274 1275 /* TODO: consider auxiliary surfaces */ 1276 x = plane_state->uapi.src.x1 >> 16; 1277 y = (plane_state->uapi.src.y1 >> 16) + clip->y1; 1278 ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset); 1279 if (ret) 1280 drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n", 1281 ret); 1282 val = y << 16 | x; 1283 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id), 1284 val); 1285 1286 /* Sizes are 0 based */ 1287 val = (drm_rect_height(clip) - 1) << 16; 1288 val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; 1289 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val); 1290 } 1291 1292 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state) 1293 { 1294 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1295 1296 if (!HAS_PSR2_SEL_FETCH(dev_priv) || 1297 !crtc_state->enable_psr2_sel_fetch) 1298 return; 1299 1300 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder), 1301 crtc_state->psr2_man_track_ctl); 1302 } 1303 1304 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, 1305 struct drm_rect *clip, bool full_update) 1306 { 1307 u32 val = PSR2_MAN_TRK_CTL_ENABLE; 1308 1309 if (full_update) { 1310 val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME; 1311 goto exit; 1312 } 1313 1314 if (clip->y1 == -1) 1315 goto exit; 1316 1317 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4); 1318 1319 val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE; 1320 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1); 1321 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1); 1322 exit: 1323 crtc_state->psr2_man_track_ctl = val; 1324 } 1325 1326 static void clip_area_update(struct drm_rect *overlap_damage_area, 1327 struct drm_rect *damage_area) 1328 { 1329 if (overlap_damage_area->y1 == -1) { 1330 overlap_damage_area->y1 = damage_area->y1; 1331 overlap_damage_area->y2 = damage_area->y2; 1332 return; 1333 } 1334 1335 if (damage_area->y1 < overlap_damage_area->y1) 1336 overlap_damage_area->y1 = damage_area->y1; 1337 1338 if (damage_area->y2 > overlap_damage_area->y2) 1339 overlap_damage_area->y2 = damage_area->y2; 1340 } 1341 1342 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, 1343 struct intel_crtc *crtc) 1344 { 1345 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 1346 struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 }; 1347 struct intel_plane_state *new_plane_state, *old_plane_state; 1348 struct intel_plane *plane; 1349 bool full_update = false; 1350 int i, ret; 1351 1352 if (!crtc_state->enable_psr2_sel_fetch) 1353 return 0; 1354 1355 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 1356 if (ret) 1357 return ret; 1358 1359 /* 1360 * Calculate minimal selective fetch area of each plane and calculate 1361 * the pipe damaged area. 1362 * In the next loop the plane selective fetch area will actually be set 1363 * using whole pipe damaged area. 1364 */ 1365 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 1366 new_plane_state, i) { 1367 struct drm_rect src, damaged_area = { .y1 = -1 }; 1368 struct drm_mode_rect *damaged_clips; 1369 u32 num_clips, j; 1370 1371 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc) 1372 continue; 1373 1374 if (!new_plane_state->uapi.visible && 1375 !old_plane_state->uapi.visible) 1376 continue; 1377 1378 /* 1379 * TODO: Not clear how to handle planes with negative position, 1380 * also planes are not updated if they have a negative X 1381 * position so for now doing a full update in this cases 1382 */ 1383 if (new_plane_state->uapi.dst.y1 < 0 || 1384 new_plane_state->uapi.dst.x1 < 0) { 1385 full_update = true; 1386 break; 1387 } 1388 1389 num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi); 1390 1391 /* 1392 * If visibility or plane moved, mark the whole plane area as 1393 * damaged as it needs to be complete redraw in the new and old 1394 * position. 1395 */ 1396 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible || 1397 !drm_rect_equals(&new_plane_state->uapi.dst, 1398 &old_plane_state->uapi.dst)) { 1399 if (old_plane_state->uapi.visible) { 1400 damaged_area.y1 = old_plane_state->uapi.dst.y1; 1401 damaged_area.y2 = old_plane_state->uapi.dst.y2; 1402 clip_area_update(&pipe_clip, &damaged_area); 1403 } 1404 1405 if (new_plane_state->uapi.visible) { 1406 damaged_area.y1 = new_plane_state->uapi.dst.y1; 1407 damaged_area.y2 = new_plane_state->uapi.dst.y2; 1408 clip_area_update(&pipe_clip, &damaged_area); 1409 } 1410 continue; 1411 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha || 1412 (!num_clips && 1413 new_plane_state->uapi.fb != old_plane_state->uapi.fb)) { 1414 /* 1415 * If the plane don't have damaged areas but the 1416 * framebuffer changed or alpha changed, mark the whole 1417 * plane area as damaged. 1418 */ 1419 damaged_area.y1 = new_plane_state->uapi.dst.y1; 1420 damaged_area.y2 = new_plane_state->uapi.dst.y2; 1421 clip_area_update(&pipe_clip, &damaged_area); 1422 continue; 1423 } 1424 1425 drm_rect_fp_to_int(&src, &new_plane_state->uapi.src); 1426 damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi); 1427 1428 for (j = 0; j < num_clips; j++) { 1429 struct drm_rect clip; 1430 1431 clip.x1 = damaged_clips[j].x1; 1432 clip.y1 = damaged_clips[j].y1; 1433 clip.x2 = damaged_clips[j].x2; 1434 clip.y2 = damaged_clips[j].y2; 1435 if (drm_rect_intersect(&clip, &src)) 1436 clip_area_update(&damaged_area, &clip); 1437 } 1438 1439 if (damaged_area.y1 == -1) 1440 continue; 1441 1442 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1; 1443 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1; 1444 clip_area_update(&pipe_clip, &damaged_area); 1445 } 1446 1447 if (full_update) 1448 goto skip_sel_fetch_set_loop; 1449 1450 /* It must be aligned to 4 lines */ 1451 pipe_clip.y1 -= pipe_clip.y1 % 4; 1452 if (pipe_clip.y2 % 4) 1453 pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4; 1454 1455 /* 1456 * Now that we have the pipe damaged area check if it intersect with 1457 * every plane, if it does set the plane selective fetch area. 1458 */ 1459 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 1460 new_plane_state, i) { 1461 struct drm_rect *sel_fetch_area, inter; 1462 1463 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc || 1464 !new_plane_state->uapi.visible) 1465 continue; 1466 1467 inter = pipe_clip; 1468 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) 1469 continue; 1470 1471 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area; 1472 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1; 1473 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1; 1474 } 1475 1476 skip_sel_fetch_set_loop: 1477 psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update); 1478 return 0; 1479 } 1480 1481 /** 1482 * intel_psr_update - Update PSR state 1483 * @intel_dp: Intel DP 1484 * @crtc_state: new CRTC state 1485 * @conn_state: new CONNECTOR state 1486 * 1487 * This functions will update PSR states, disabling, enabling or switching PSR 1488 * version when executing fastsets. For full modeset, intel_psr_disable() and 1489 * intel_psr_enable() should be called instead. 1490 */ 1491 void intel_psr_update(struct intel_dp *intel_dp, 1492 const struct intel_crtc_state *crtc_state, 1493 const struct drm_connector_state *conn_state) 1494 { 1495 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1496 struct intel_psr *psr = &intel_dp->psr; 1497 bool enable, psr2_enable; 1498 1499 if (!CAN_PSR(intel_dp)) 1500 return; 1501 1502 mutex_lock(&intel_dp->psr.lock); 1503 1504 enable = crtc_state->has_psr; 1505 psr2_enable = crtc_state->has_psr2; 1506 1507 if (enable == psr->enabled && psr2_enable == psr->psr2_enabled && 1508 crtc_state->enable_psr2_sel_fetch == psr->psr2_sel_fetch_enabled) { 1509 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ 1510 if (crtc_state->crc_enabled && psr->enabled) 1511 psr_force_hw_tracking_exit(intel_dp); 1512 else if (DISPLAY_VER(dev_priv) < 9 && psr->enabled) { 1513 /* 1514 * Activate PSR again after a force exit when enabling 1515 * CRC in older gens 1516 */ 1517 if (!intel_dp->psr.active && 1518 !intel_dp->psr.busy_frontbuffer_bits) 1519 schedule_work(&intel_dp->psr.work); 1520 } 1521 1522 goto unlock; 1523 } 1524 1525 if (psr->enabled) 1526 intel_psr_disable_locked(intel_dp); 1527 1528 if (enable) 1529 intel_psr_enable_locked(intel_dp, crtc_state, conn_state); 1530 1531 unlock: 1532 mutex_unlock(&intel_dp->psr.lock); 1533 } 1534 1535 /** 1536 * psr_wait_for_idle - wait for PSR1 to idle 1537 * @intel_dp: Intel DP 1538 * @out_value: PSR status in case of failure 1539 * 1540 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle. 1541 * 1542 */ 1543 static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value) 1544 { 1545 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1546 1547 /* 1548 * From bspec: Panel Self Refresh (BDW+) 1549 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of 1550 * exit training time + 1.5 ms of aux channel handshake. 50 ms is 1551 * defensive enough to cover everything. 1552 */ 1553 return __intel_wait_for_register(&dev_priv->uncore, 1554 EDP_PSR_STATUS(intel_dp->psr.transcoder), 1555 EDP_PSR_STATUS_STATE_MASK, 1556 EDP_PSR_STATUS_STATE_IDLE, 2, 50, 1557 out_value); 1558 } 1559 1560 /** 1561 * intel_psr_wait_for_idle - wait for PSR1 to idle 1562 * @new_crtc_state: new CRTC state 1563 * 1564 * This function is expected to be called from pipe_update_start() where it is 1565 * not expected to race with PSR enable or disable. 1566 */ 1567 void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state) 1568 { 1569 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev); 1570 struct intel_encoder *encoder; 1571 1572 if (!new_crtc_state->has_psr) 1573 return; 1574 1575 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder, 1576 new_crtc_state->uapi.encoder_mask) { 1577 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1578 u32 psr_status; 1579 1580 mutex_lock(&intel_dp->psr.lock); 1581 if (!intel_dp->psr.enabled || intel_dp->psr.psr2_enabled) { 1582 mutex_unlock(&intel_dp->psr.lock); 1583 continue; 1584 } 1585 1586 /* when the PSR1 is enabled */ 1587 if (psr_wait_for_idle(intel_dp, &psr_status)) 1588 drm_err(&dev_priv->drm, 1589 "PSR idle timed out 0x%x, atomic update may fail\n", 1590 psr_status); 1591 mutex_unlock(&intel_dp->psr.lock); 1592 } 1593 } 1594 1595 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp) 1596 { 1597 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1598 i915_reg_t reg; 1599 u32 mask; 1600 int err; 1601 1602 if (!intel_dp->psr.enabled) 1603 return false; 1604 1605 if (intel_dp->psr.psr2_enabled) { 1606 reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder); 1607 mask = EDP_PSR2_STATUS_STATE_MASK; 1608 } else { 1609 reg = EDP_PSR_STATUS(intel_dp->psr.transcoder); 1610 mask = EDP_PSR_STATUS_STATE_MASK; 1611 } 1612 1613 mutex_unlock(&intel_dp->psr.lock); 1614 1615 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50); 1616 if (err) 1617 drm_err(&dev_priv->drm, 1618 "Timed out waiting for PSR Idle for re-enable\n"); 1619 1620 /* After the unlocked wait, verify that PSR is still wanted! */ 1621 mutex_lock(&intel_dp->psr.lock); 1622 return err == 0 && intel_dp->psr.enabled; 1623 } 1624 1625 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) 1626 { 1627 struct drm_connector_list_iter conn_iter; 1628 struct drm_device *dev = &dev_priv->drm; 1629 struct drm_modeset_acquire_ctx ctx; 1630 struct drm_atomic_state *state; 1631 struct drm_connector *conn; 1632 int err = 0; 1633 1634 state = drm_atomic_state_alloc(dev); 1635 if (!state) 1636 return -ENOMEM; 1637 1638 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 1639 state->acquire_ctx = &ctx; 1640 1641 retry: 1642 1643 drm_connector_list_iter_begin(dev, &conn_iter); 1644 drm_for_each_connector_iter(conn, &conn_iter) { 1645 struct drm_connector_state *conn_state; 1646 struct drm_crtc_state *crtc_state; 1647 1648 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP) 1649 continue; 1650 1651 conn_state = drm_atomic_get_connector_state(state, conn); 1652 if (IS_ERR(conn_state)) { 1653 err = PTR_ERR(conn_state); 1654 break; 1655 } 1656 1657 if (!conn_state->crtc) 1658 continue; 1659 1660 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc); 1661 if (IS_ERR(crtc_state)) { 1662 err = PTR_ERR(crtc_state); 1663 break; 1664 } 1665 1666 /* Mark mode as changed to trigger a pipe->update() */ 1667 crtc_state->mode_changed = true; 1668 } 1669 drm_connector_list_iter_end(&conn_iter); 1670 1671 if (err == 0) 1672 err = drm_atomic_commit(state); 1673 1674 if (err == -EDEADLK) { 1675 drm_atomic_state_clear(state); 1676 err = drm_modeset_backoff(&ctx); 1677 if (!err) 1678 goto retry; 1679 } 1680 1681 drm_modeset_drop_locks(&ctx); 1682 drm_modeset_acquire_fini(&ctx); 1683 drm_atomic_state_put(state); 1684 1685 return err; 1686 } 1687 1688 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val) 1689 { 1690 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1691 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK; 1692 u32 old_mode; 1693 int ret; 1694 1695 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) || 1696 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) { 1697 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val); 1698 return -EINVAL; 1699 } 1700 1701 ret = mutex_lock_interruptible(&intel_dp->psr.lock); 1702 if (ret) 1703 return ret; 1704 1705 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK; 1706 intel_dp->psr.debug = val; 1707 1708 /* 1709 * Do it right away if it's already enabled, otherwise it will be done 1710 * when enabling the source. 1711 */ 1712 if (intel_dp->psr.enabled) 1713 psr_irq_control(intel_dp); 1714 1715 mutex_unlock(&intel_dp->psr.lock); 1716 1717 if (old_mode != mode) 1718 ret = intel_psr_fastset_force(dev_priv); 1719 1720 return ret; 1721 } 1722 1723 static void intel_psr_handle_irq(struct intel_dp *intel_dp) 1724 { 1725 struct intel_psr *psr = &intel_dp->psr; 1726 1727 intel_psr_disable_locked(intel_dp); 1728 psr->sink_not_reliable = true; 1729 /* let's make sure that sink is awaken */ 1730 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 1731 } 1732 1733 static void intel_psr_work(struct work_struct *work) 1734 { 1735 struct intel_dp *intel_dp = 1736 container_of(work, typeof(*intel_dp), psr.work); 1737 1738 mutex_lock(&intel_dp->psr.lock); 1739 1740 if (!intel_dp->psr.enabled) 1741 goto unlock; 1742 1743 if (READ_ONCE(intel_dp->psr.irq_aux_error)) 1744 intel_psr_handle_irq(intel_dp); 1745 1746 /* 1747 * We have to make sure PSR is ready for re-enable 1748 * otherwise it keeps disabled until next full enable/disable cycle. 1749 * PSR might take some time to get fully disabled 1750 * and be ready for re-enable. 1751 */ 1752 if (!__psr_wait_for_idle_locked(intel_dp)) 1753 goto unlock; 1754 1755 /* 1756 * The delayed work can race with an invalidate hence we need to 1757 * recheck. Since psr_flush first clears this and then reschedules we 1758 * won't ever miss a flush when bailing out here. 1759 */ 1760 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active) 1761 goto unlock; 1762 1763 intel_psr_activate(intel_dp); 1764 unlock: 1765 mutex_unlock(&intel_dp->psr.lock); 1766 } 1767 1768 /** 1769 * intel_psr_invalidate - Invalidade PSR 1770 * @dev_priv: i915 device 1771 * @frontbuffer_bits: frontbuffer plane tracking bits 1772 * @origin: which operation caused the invalidate 1773 * 1774 * Since the hardware frontbuffer tracking has gaps we need to integrate 1775 * with the software frontbuffer tracking. This function gets called every 1776 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be 1777 * disabled if the frontbuffer mask contains a buffer relevant to PSR. 1778 * 1779 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." 1780 */ 1781 void intel_psr_invalidate(struct drm_i915_private *dev_priv, 1782 unsigned frontbuffer_bits, enum fb_op_origin origin) 1783 { 1784 struct intel_encoder *encoder; 1785 1786 if (origin == ORIGIN_FLIP) 1787 return; 1788 1789 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 1790 unsigned int pipe_frontbuffer_bits = frontbuffer_bits; 1791 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1792 1793 mutex_lock(&intel_dp->psr.lock); 1794 if (!intel_dp->psr.enabled) { 1795 mutex_unlock(&intel_dp->psr.lock); 1796 continue; 1797 } 1798 1799 pipe_frontbuffer_bits &= 1800 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe); 1801 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits; 1802 1803 if (pipe_frontbuffer_bits) 1804 intel_psr_exit(intel_dp); 1805 1806 mutex_unlock(&intel_dp->psr.lock); 1807 } 1808 } 1809 /* 1810 * When we will be completely rely on PSR2 S/W tracking in future, 1811 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP 1812 * event also therefore tgl_dc3co_flush() require to be changed 1813 * accordingly in future. 1814 */ 1815 static void 1816 tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, 1817 enum fb_op_origin origin) 1818 { 1819 mutex_lock(&intel_dp->psr.lock); 1820 1821 if (!intel_dp->psr.dc3co_enabled) 1822 goto unlock; 1823 1824 if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active) 1825 goto unlock; 1826 1827 /* 1828 * At every frontbuffer flush flip event modified delay of delayed work, 1829 * when delayed work schedules that means display has been idle. 1830 */ 1831 if (!(frontbuffer_bits & 1832 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe))) 1833 goto unlock; 1834 1835 tgl_psr2_enable_dc3co(intel_dp); 1836 mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work, 1837 intel_dp->psr.dc3co_exit_delay); 1838 1839 unlock: 1840 mutex_unlock(&intel_dp->psr.lock); 1841 } 1842 1843 /** 1844 * intel_psr_flush - Flush PSR 1845 * @dev_priv: i915 device 1846 * @frontbuffer_bits: frontbuffer plane tracking bits 1847 * @origin: which operation caused the flush 1848 * 1849 * Since the hardware frontbuffer tracking has gaps we need to integrate 1850 * with the software frontbuffer tracking. This function gets called every 1851 * time frontbuffer rendering has completed and flushed out to memory. PSR 1852 * can be enabled again if no other frontbuffer relevant to PSR is dirty. 1853 * 1854 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. 1855 */ 1856 void intel_psr_flush(struct drm_i915_private *dev_priv, 1857 unsigned frontbuffer_bits, enum fb_op_origin origin) 1858 { 1859 struct intel_encoder *encoder; 1860 1861 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 1862 unsigned int pipe_frontbuffer_bits = frontbuffer_bits; 1863 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1864 1865 if (origin == ORIGIN_FLIP) { 1866 tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin); 1867 continue; 1868 } 1869 1870 mutex_lock(&intel_dp->psr.lock); 1871 if (!intel_dp->psr.enabled) { 1872 mutex_unlock(&intel_dp->psr.lock); 1873 continue; 1874 } 1875 1876 pipe_frontbuffer_bits &= 1877 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe); 1878 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits; 1879 1880 /* By definition flush = invalidate + flush */ 1881 if (pipe_frontbuffer_bits) 1882 psr_force_hw_tracking_exit(intel_dp); 1883 1884 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits) 1885 schedule_work(&intel_dp->psr.work); 1886 mutex_unlock(&intel_dp->psr.lock); 1887 } 1888 } 1889 1890 /** 1891 * intel_psr_init - Init basic PSR work and mutex. 1892 * @intel_dp: Intel DP 1893 * 1894 * This function is called after the initializing connector. 1895 * (the initializing of connector treats the handling of connector capabilities) 1896 * And it initializes basic PSR stuff for each DP Encoder. 1897 */ 1898 void intel_psr_init(struct intel_dp *intel_dp) 1899 { 1900 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1901 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1902 1903 if (!HAS_PSR(dev_priv)) 1904 return; 1905 1906 /* 1907 * HSW spec explicitly says PSR is tied to port A. 1908 * BDW+ platforms have a instance of PSR registers per transcoder but 1909 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder 1910 * than eDP one. 1911 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11. 1912 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11. 1913 * But GEN12 supports a instance of PSR registers per transcoder. 1914 */ 1915 if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) { 1916 drm_dbg_kms(&dev_priv->drm, 1917 "PSR condition failed: Port not supported\n"); 1918 return; 1919 } 1920 1921 intel_dp->psr.source_support = true; 1922 1923 if (IS_HASWELL(dev_priv)) 1924 /* 1925 * HSW don't have PSR registers on the same space as transcoder 1926 * so set this to a value that when subtract to the register 1927 * in transcoder space results in the right offset for HSW 1928 */ 1929 dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE; 1930 1931 if (dev_priv->params.enable_psr == -1) 1932 if (DISPLAY_VER(dev_priv) < 9 || !dev_priv->vbt.psr.enable) 1933 dev_priv->params.enable_psr = 0; 1934 1935 /* Set link_standby x link_off defaults */ 1936 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1937 /* HSW and BDW require workarounds that we don't implement. */ 1938 intel_dp->psr.link_standby = false; 1939 else if (DISPLAY_VER(dev_priv) < 12) 1940 /* For new platforms up to TGL let's respect VBT back again */ 1941 intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link; 1942 1943 INIT_WORK(&intel_dp->psr.work, intel_psr_work); 1944 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work); 1945 mutex_init(&intel_dp->psr.lock); 1946 } 1947 1948 static int psr_get_status_and_error_status(struct intel_dp *intel_dp, 1949 u8 *status, u8 *error_status) 1950 { 1951 struct drm_dp_aux *aux = &intel_dp->aux; 1952 int ret; 1953 1954 ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status); 1955 if (ret != 1) 1956 return ret; 1957 1958 ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status); 1959 if (ret != 1) 1960 return ret; 1961 1962 *status = *status & DP_PSR_SINK_STATE_MASK; 1963 1964 return 0; 1965 } 1966 1967 static void psr_alpm_check(struct intel_dp *intel_dp) 1968 { 1969 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1970 struct drm_dp_aux *aux = &intel_dp->aux; 1971 struct intel_psr *psr = &intel_dp->psr; 1972 u8 val; 1973 int r; 1974 1975 if (!psr->psr2_enabled) 1976 return; 1977 1978 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val); 1979 if (r != 1) { 1980 drm_err(&dev_priv->drm, "Error reading ALPM status\n"); 1981 return; 1982 } 1983 1984 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) { 1985 intel_psr_disable_locked(intel_dp); 1986 psr->sink_not_reliable = true; 1987 drm_dbg_kms(&dev_priv->drm, 1988 "ALPM lock timeout error, disabling PSR\n"); 1989 1990 /* Clearing error */ 1991 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val); 1992 } 1993 } 1994 1995 static void psr_capability_changed_check(struct intel_dp *intel_dp) 1996 { 1997 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1998 struct intel_psr *psr = &intel_dp->psr; 1999 u8 val; 2000 int r; 2001 2002 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val); 2003 if (r != 1) { 2004 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n"); 2005 return; 2006 } 2007 2008 if (val & DP_PSR_CAPS_CHANGE) { 2009 intel_psr_disable_locked(intel_dp); 2010 psr->sink_not_reliable = true; 2011 drm_dbg_kms(&dev_priv->drm, 2012 "Sink PSR capability changed, disabling PSR\n"); 2013 2014 /* Clearing it */ 2015 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val); 2016 } 2017 } 2018 2019 void intel_psr_short_pulse(struct intel_dp *intel_dp) 2020 { 2021 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2022 struct intel_psr *psr = &intel_dp->psr; 2023 u8 status, error_status; 2024 const u8 errors = DP_PSR_RFB_STORAGE_ERROR | 2025 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | 2026 DP_PSR_LINK_CRC_ERROR; 2027 2028 if (!CAN_PSR(intel_dp)) 2029 return; 2030 2031 mutex_lock(&psr->lock); 2032 2033 if (!psr->enabled) 2034 goto exit; 2035 2036 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) { 2037 drm_err(&dev_priv->drm, 2038 "Error reading PSR status or error status\n"); 2039 goto exit; 2040 } 2041 2042 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) { 2043 intel_psr_disable_locked(intel_dp); 2044 psr->sink_not_reliable = true; 2045 } 2046 2047 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status) 2048 drm_dbg_kms(&dev_priv->drm, 2049 "PSR sink internal error, disabling PSR\n"); 2050 if (error_status & DP_PSR_RFB_STORAGE_ERROR) 2051 drm_dbg_kms(&dev_priv->drm, 2052 "PSR RFB storage error, disabling PSR\n"); 2053 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) 2054 drm_dbg_kms(&dev_priv->drm, 2055 "PSR VSC SDP uncorrectable error, disabling PSR\n"); 2056 if (error_status & DP_PSR_LINK_CRC_ERROR) 2057 drm_dbg_kms(&dev_priv->drm, 2058 "PSR Link CRC error, disabling PSR\n"); 2059 2060 if (error_status & ~errors) 2061 drm_err(&dev_priv->drm, 2062 "PSR_ERROR_STATUS unhandled errors %x\n", 2063 error_status & ~errors); 2064 /* clear status register */ 2065 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status); 2066 2067 psr_alpm_check(intel_dp); 2068 psr_capability_changed_check(intel_dp); 2069 2070 exit: 2071 mutex_unlock(&psr->lock); 2072 } 2073 2074 bool intel_psr_enabled(struct intel_dp *intel_dp) 2075 { 2076 bool ret; 2077 2078 if (!CAN_PSR(intel_dp)) 2079 return false; 2080 2081 mutex_lock(&intel_dp->psr.lock); 2082 ret = intel_dp->psr.enabled; 2083 mutex_unlock(&intel_dp->psr.lock); 2084 2085 return ret; 2086 } 2087