1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 
27 #include "i915_drv.h"
28 #include "i915_reg.h"
29 #include "intel_atomic.h"
30 #include "intel_crtc.h"
31 #include "intel_de.h"
32 #include "intel_display_types.h"
33 #include "intel_dp.h"
34 #include "intel_dp_aux.h"
35 #include "intel_hdmi.h"
36 #include "intel_psr.h"
37 #include "intel_psr_regs.h"
38 #include "intel_snps_phy.h"
39 #include "skl_universal_plane.h"
40 
41 /**
42  * DOC: Panel Self Refresh (PSR/SRD)
43  *
44  * Since Haswell Display controller supports Panel Self-Refresh on display
45  * panels witch have a remote frame buffer (RFB) implemented according to PSR
46  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
47  * when system is idle but display is on as it eliminates display refresh
48  * request to DDR memory completely as long as the frame buffer for that
49  * display is unchanged.
50  *
51  * Panel Self Refresh must be supported by both Hardware (source) and
52  * Panel (sink).
53  *
54  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
55  * to power down the link and memory controller. For DSI panels the same idea
56  * is called "manual mode".
57  *
58  * The implementation uses the hardware-based PSR support which automatically
59  * enters/exits self-refresh mode. The hardware takes care of sending the
60  * required DP aux message and could even retrain the link (that part isn't
61  * enabled yet though). The hardware also keeps track of any frontbuffer
62  * changes to know when to exit self-refresh mode again. Unfortunately that
63  * part doesn't work too well, hence why the i915 PSR support uses the
64  * software frontbuffer tracking to make sure it doesn't miss a screen
65  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
66  * get called by the frontbuffer tracking code. Note that because of locking
67  * issues the self-refresh re-enable code is done from a work queue, which
68  * must be correctly synchronized/cancelled when shutting down the pipe."
69  *
70  * DC3CO (DC3 clock off)
71  *
72  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
73  * clock off automatically during PSR2 idle state.
74  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
75  * entry/exit allows the HW to enter a low-power state even when page flipping
76  * periodically (for instance a 30fps video playback scenario).
77  *
78  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
79  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
80  * frames, if no other flip occurs and the function above is executed, DC3CO is
81  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
82  * of another flip.
83  * Front buffer modifications do not trigger DC3CO activation on purpose as it
84  * would bring a lot of complexity and most of the moderns systems will only
85  * use page flips.
86  */
87 
88 /*
89  * Description of PSR mask bits:
90  *
91  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
92  *
93  *  When unmasked (nearly) all display register writes (eg. even
94  *  SWF) trigger a PSR exit. Some registers are excluded from this
95  *  and they have a more specific mask (described below). On icl+
96  *  this bit no longer exists and is effectively always set.
97  *
98  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
99  *
100  *  When unmasked (nearly) all pipe/plane register writes
101  *  trigger a PSR exit. Some plane registers are excluded from this
102  *  and they have a more specific mask (described below).
103  *
104  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
105  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
106  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
107  *
108  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
109  *  SPR_SURF/CURBASE are not included in this and instead are
110  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
111  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
112  *
113  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
114  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
115  *
116  *  When unmasked PSR is blocked as long as the sprite
117  *  plane is enabled. skl+ with their universal planes no
118  *  longer have a mask bit like this, and no plane being
119  *  enabledb blocks PSR.
120  *
121  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
122  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
123  *
124  *  When umasked CURPOS writes trigger a PSR exit. On skl+
125  *  this doesn't exit but CURPOS is included in the
126  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
127  *
128  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
129  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
130  *
131  *  When unmasked PSR is blocked as long as vblank and/or vsync
132  *  interrupt is unmasked in IMR *and* enabled in IER.
133  *
134  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
135  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
136  *
137  *  Selectcs whether PSR exit generates an extra vblank before
138  *  the first frame is transmitted. Also note the opposite polarity
139  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
140  *  unmasked==do not generate the extra vblank).
141  *
142  *  With DC states enabled the extra vblank happens after link training,
143  *  with DC states disabled it happens immediately upuon PSR exit trigger.
144  *  No idea as of now why there is a difference. HSW/BDW (which don't
145  *  even have DMC) always generate it after link training. Go figure.
146  *
147  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
148  *  and thus won't latch until the first vblank. So with DC states
149  *  enabled the register effctively uses the reset value during DC5
150  *  exit+PSR exit sequence, and thus the bit does nothing until
151  *  latched by the vblank that it was trying to prevent from being
152  *  generated in the first place. So we should probably call this
153  *  one a chicken/egg bit instead on skl+.
154  *
155  *  In standby mode (as opposed to link-off) this makes no difference
156  *  as the timing generator keeps running the whole time generating
157  *  normal periodic vblanks.
158  *
159  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
160  *  and doing so makes the behaviour match the skl+ reset value.
161  *
162  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
163  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
164  *
165  *  On BDW without this bit is no vblanks whatsoever are
166  *  generated after PSR exit. On HSW this has no apparant effect.
167  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
168  *
169  * The rest of the bits are more self-explanatory and/or
170  * irrelevant for normal operation.
171  */
172 
173 static bool psr_global_enabled(struct intel_dp *intel_dp)
174 {
175 	struct intel_connector *connector = intel_dp->attached_connector;
176 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
177 
178 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
179 	case I915_PSR_DEBUG_DEFAULT:
180 		if (i915->params.enable_psr == -1)
181 			return connector->panel.vbt.psr.enable;
182 		return i915->params.enable_psr;
183 	case I915_PSR_DEBUG_DISABLE:
184 		return false;
185 	default:
186 		return true;
187 	}
188 }
189 
190 static bool psr2_global_enabled(struct intel_dp *intel_dp)
191 {
192 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
193 
194 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
195 	case I915_PSR_DEBUG_DISABLE:
196 	case I915_PSR_DEBUG_FORCE_PSR1:
197 		return false;
198 	default:
199 		if (i915->params.enable_psr == 1)
200 			return false;
201 		return true;
202 	}
203 }
204 
205 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
206 {
207 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
208 
209 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
210 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
211 }
212 
213 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
214 {
215 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
216 
217 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
218 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
219 }
220 
221 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
222 {
223 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
224 
225 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
226 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
227 }
228 
229 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
230 {
231 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
232 
233 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
234 		EDP_PSR_MASK(intel_dp->psr.transcoder);
235 }
236 
237 static void psr_irq_control(struct intel_dp *intel_dp)
238 {
239 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
240 	i915_reg_t imr_reg;
241 	u32 mask;
242 
243 	if (DISPLAY_VER(dev_priv) >= 12)
244 		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
245 	else
246 		imr_reg = EDP_PSR_IMR;
247 
248 	mask = psr_irq_psr_error_bit_get(intel_dp);
249 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
250 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
251 			psr_irq_pre_entry_bit_get(intel_dp);
252 
253 	intel_de_rmw(dev_priv, imr_reg, psr_irq_mask_get(intel_dp), ~mask);
254 }
255 
256 static void psr_event_print(struct drm_i915_private *i915,
257 			    u32 val, bool psr2_enabled)
258 {
259 	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
260 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
261 		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
262 	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
263 		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
264 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
265 		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
266 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
267 		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
268 	if (val & PSR_EVENT_GRAPHICS_RESET)
269 		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
270 	if (val & PSR_EVENT_PCH_INTERRUPT)
271 		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
272 	if (val & PSR_EVENT_MEMORY_UP)
273 		drm_dbg_kms(&i915->drm, "\tMemory up\n");
274 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
275 		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
276 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
277 		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
278 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
279 		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
280 	if (val & PSR_EVENT_REGISTER_UPDATE)
281 		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
282 	if (val & PSR_EVENT_HDCP_ENABLE)
283 		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
284 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
285 		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
286 	if (val & PSR_EVENT_VBI_ENABLE)
287 		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
288 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
289 		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
290 	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
291 		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
292 }
293 
294 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
295 {
296 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
297 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
298 	ktime_t time_ns =  ktime_get();
299 	i915_reg_t imr_reg;
300 
301 	if (DISPLAY_VER(dev_priv) >= 12)
302 		imr_reg = TRANS_PSR_IMR(cpu_transcoder);
303 	else
304 		imr_reg = EDP_PSR_IMR;
305 
306 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
307 		intel_dp->psr.last_entry_attempt = time_ns;
308 		drm_dbg_kms(&dev_priv->drm,
309 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
310 			    transcoder_name(cpu_transcoder));
311 	}
312 
313 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
314 		intel_dp->psr.last_exit = time_ns;
315 		drm_dbg_kms(&dev_priv->drm,
316 			    "[transcoder %s] PSR exit completed\n",
317 			    transcoder_name(cpu_transcoder));
318 
319 		if (DISPLAY_VER(dev_priv) >= 9) {
320 			u32 val;
321 
322 			val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
323 
324 			psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
325 		}
326 	}
327 
328 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
329 		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
330 			 transcoder_name(cpu_transcoder));
331 
332 		intel_dp->psr.irq_aux_error = true;
333 
334 		/*
335 		 * If this interruption is not masked it will keep
336 		 * interrupting so fast that it prevents the scheduled
337 		 * work to run.
338 		 * Also after a PSR error, we don't want to arm PSR
339 		 * again so we don't care about unmask the interruption
340 		 * or unset irq_aux_error.
341 		 */
342 		intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp));
343 
344 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
345 	}
346 }
347 
348 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
349 {
350 	u8 alpm_caps = 0;
351 
352 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
353 			      &alpm_caps) != 1)
354 		return false;
355 	return alpm_caps & DP_ALPM_CAP;
356 }
357 
358 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
359 {
360 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
361 	u8 val = 8; /* assume the worst if we can't read the value */
362 
363 	if (drm_dp_dpcd_readb(&intel_dp->aux,
364 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
365 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
366 	else
367 		drm_dbg_kms(&i915->drm,
368 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
369 	return val;
370 }
371 
372 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
373 {
374 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
375 	ssize_t r;
376 	u16 w;
377 	u8 y;
378 
379 	/* If sink don't have specific granularity requirements set legacy ones */
380 	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
381 		/* As PSR2 HW sends full lines, we do not care about x granularity */
382 		w = 4;
383 		y = 4;
384 		goto exit;
385 	}
386 
387 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
388 	if (r != 2)
389 		drm_dbg_kms(&i915->drm,
390 			    "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
391 	/*
392 	 * Spec says that if the value read is 0 the default granularity should
393 	 * be used instead.
394 	 */
395 	if (r != 2 || w == 0)
396 		w = 4;
397 
398 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
399 	if (r != 1) {
400 		drm_dbg_kms(&i915->drm,
401 			    "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
402 		y = 4;
403 	}
404 	if (y == 0)
405 		y = 1;
406 
407 exit:
408 	intel_dp->psr.su_w_granularity = w;
409 	intel_dp->psr.su_y_granularity = y;
410 }
411 
412 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
413 {
414 	struct drm_i915_private *dev_priv =
415 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
416 
417 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
418 			 sizeof(intel_dp->psr_dpcd));
419 
420 	if (!intel_dp->psr_dpcd[0])
421 		return;
422 	drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
423 		    intel_dp->psr_dpcd[0]);
424 
425 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
426 		drm_dbg_kms(&dev_priv->drm,
427 			    "PSR support not currently available for this panel\n");
428 		return;
429 	}
430 
431 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
432 		drm_dbg_kms(&dev_priv->drm,
433 			    "Panel lacks power state control, PSR cannot be enabled\n");
434 		return;
435 	}
436 
437 	intel_dp->psr.sink_support = true;
438 	intel_dp->psr.sink_sync_latency =
439 		intel_dp_get_sink_sync_latency(intel_dp);
440 
441 	if (DISPLAY_VER(dev_priv) >= 9 &&
442 	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
443 		bool y_req = intel_dp->psr_dpcd[1] &
444 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
445 		bool alpm = intel_dp_get_alpm_status(intel_dp);
446 
447 		/*
448 		 * All panels that supports PSR version 03h (PSR2 +
449 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
450 		 * only sure that it is going to be used when required by the
451 		 * panel. This way panel is capable to do selective update
452 		 * without a aux frame sync.
453 		 *
454 		 * To support PSR version 02h and PSR version 03h without
455 		 * Y-coordinate requirement panels we would need to enable
456 		 * GTC first.
457 		 */
458 		intel_dp->psr.sink_psr2_support = y_req && alpm;
459 		drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
460 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
461 
462 		if (intel_dp->psr.sink_psr2_support) {
463 			intel_dp->psr.colorimetry_support =
464 				intel_dp_get_colorimetry_status(intel_dp);
465 			intel_dp_get_su_granularity(intel_dp);
466 		}
467 	}
468 }
469 
470 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
471 {
472 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
473 	u8 dpcd_val = DP_PSR_ENABLE;
474 
475 	/* Enable ALPM at sink for psr2 */
476 	if (intel_dp->psr.psr2_enabled) {
477 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
478 				   DP_ALPM_ENABLE |
479 				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
480 
481 		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
482 	} else {
483 		if (intel_dp->psr.link_standby)
484 			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
485 
486 		if (DISPLAY_VER(dev_priv) >= 8)
487 			dpcd_val |= DP_PSR_CRC_VERIFICATION;
488 	}
489 
490 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
491 		dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
492 
493 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
494 
495 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
496 }
497 
498 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
499 {
500 	struct intel_connector *connector = intel_dp->attached_connector;
501 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
502 	u32 val = 0;
503 
504 	if (DISPLAY_VER(dev_priv) >= 11)
505 		val |= EDP_PSR_TP4_TIME_0us;
506 
507 	if (dev_priv->params.psr_safest_params) {
508 		val |= EDP_PSR_TP1_TIME_2500us;
509 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
510 		goto check_tp3_sel;
511 	}
512 
513 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
514 		val |= EDP_PSR_TP1_TIME_0us;
515 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
516 		val |= EDP_PSR_TP1_TIME_100us;
517 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
518 		val |= EDP_PSR_TP1_TIME_500us;
519 	else
520 		val |= EDP_PSR_TP1_TIME_2500us;
521 
522 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
523 		val |= EDP_PSR_TP2_TP3_TIME_0us;
524 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
525 		val |= EDP_PSR_TP2_TP3_TIME_100us;
526 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
527 		val |= EDP_PSR_TP2_TP3_TIME_500us;
528 	else
529 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
530 
531 check_tp3_sel:
532 	if (intel_dp_source_supports_tps3(dev_priv) &&
533 	    drm_dp_tps3_supported(intel_dp->dpcd))
534 		val |= EDP_PSR_TP_TP1_TP3;
535 	else
536 		val |= EDP_PSR_TP_TP1_TP2;
537 
538 	return val;
539 }
540 
541 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
542 {
543 	struct intel_connector *connector = intel_dp->attached_connector;
544 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
545 	int idle_frames;
546 
547 	/* Let's use 6 as the minimum to cover all known cases including the
548 	 * off-by-one issue that HW has in some cases.
549 	 */
550 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
551 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
552 
553 	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
554 		idle_frames = 0xf;
555 
556 	return idle_frames;
557 }
558 
559 static void hsw_activate_psr1(struct intel_dp *intel_dp)
560 {
561 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
562 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
563 	u32 max_sleep_time = 0x1f;
564 	u32 val = EDP_PSR_ENABLE;
565 
566 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
567 
568 	val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
569 	if (IS_HASWELL(dev_priv))
570 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
571 
572 	if (intel_dp->psr.link_standby)
573 		val |= EDP_PSR_LINK_STANDBY;
574 
575 	val |= intel_psr1_get_tp_time(intel_dp);
576 
577 	if (DISPLAY_VER(dev_priv) >= 8)
578 		val |= EDP_PSR_CRC_ENABLE;
579 
580 	intel_de_rmw(dev_priv, EDP_PSR_CTL(cpu_transcoder),
581 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
582 }
583 
584 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
585 {
586 	struct intel_connector *connector = intel_dp->attached_connector;
587 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
588 	u32 val = 0;
589 
590 	if (dev_priv->params.psr_safest_params)
591 		return EDP_PSR2_TP2_TIME_2500us;
592 
593 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
594 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
595 		val |= EDP_PSR2_TP2_TIME_50us;
596 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
597 		val |= EDP_PSR2_TP2_TIME_100us;
598 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
599 		val |= EDP_PSR2_TP2_TIME_500us;
600 	else
601 		val |= EDP_PSR2_TP2_TIME_2500us;
602 
603 	return val;
604 }
605 
606 static int psr2_block_count_lines(struct intel_dp *intel_dp)
607 {
608 	return intel_dp->psr.io_wake_lines < 9 &&
609 		intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
610 }
611 
612 static int psr2_block_count(struct intel_dp *intel_dp)
613 {
614 	return psr2_block_count_lines(intel_dp) / 4;
615 }
616 
617 static void hsw_activate_psr2(struct intel_dp *intel_dp)
618 {
619 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
620 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
621 	u32 val = EDP_PSR2_ENABLE;
622 
623 	val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
624 
625 	if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
626 		val |= EDP_SU_TRACK_ENABLE;
627 
628 	if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
629 		val |= EDP_Y_COORDINATE_ENABLE;
630 
631 	val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
632 	val |= intel_psr2_get_tp_time(intel_dp);
633 
634 	if (DISPLAY_VER(dev_priv) >= 12) {
635 		if (psr2_block_count(intel_dp) > 2)
636 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
637 		else
638 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
639 	}
640 
641 	/* Wa_22012278275:adl-p */
642 	if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
643 		static const u8 map[] = {
644 			2, /* 5 lines */
645 			1, /* 6 lines */
646 			0, /* 7 lines */
647 			3, /* 8 lines */
648 			6, /* 9 lines */
649 			5, /* 10 lines */
650 			4, /* 11 lines */
651 			7, /* 12 lines */
652 		};
653 		/*
654 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
655 		 * comments bellow for more information
656 		 */
657 		int tmp;
658 
659 		tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
660 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
661 
662 		tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
663 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
664 	} else if (DISPLAY_VER(dev_priv) >= 12) {
665 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
666 		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
667 	} else if (DISPLAY_VER(dev_priv) >= 9) {
668 		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
669 		val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
670 	}
671 
672 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
673 		val |= EDP_PSR2_SU_SDP_SCANLINE;
674 
675 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
676 		u32 tmp;
677 
678 		tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
679 		drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
680 	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
681 		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
682 	}
683 
684 	/*
685 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
686 	 * recommending keep this bit unset while PSR2 is enabled.
687 	 */
688 	intel_de_write(dev_priv, EDP_PSR_CTL(cpu_transcoder), 0);
689 
690 	intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
691 }
692 
693 static bool
694 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
695 {
696 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
697 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
698 	else if (DISPLAY_VER(dev_priv) >= 12)
699 		return cpu_transcoder == TRANSCODER_A;
700 	else
701 		return cpu_transcoder == TRANSCODER_EDP;
702 }
703 
704 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
705 {
706 	if (!cstate || !cstate->hw.active)
707 		return 0;
708 
709 	return DIV_ROUND_UP(1000 * 1000,
710 			    drm_mode_vrefresh(&cstate->hw.adjusted_mode));
711 }
712 
713 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
714 				     u32 idle_frames)
715 {
716 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
717 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
718 
719 	intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
720 		     EDP_PSR2_IDLE_FRAMES_MASK,
721 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
722 }
723 
724 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
725 {
726 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
727 
728 	psr2_program_idle_frames(intel_dp, 0);
729 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
730 }
731 
732 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
733 {
734 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
735 
736 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
737 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
738 }
739 
740 static void tgl_dc3co_disable_work(struct work_struct *work)
741 {
742 	struct intel_dp *intel_dp =
743 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
744 
745 	mutex_lock(&intel_dp->psr.lock);
746 	/* If delayed work is pending, it is not idle */
747 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
748 		goto unlock;
749 
750 	tgl_psr2_disable_dc3co(intel_dp);
751 unlock:
752 	mutex_unlock(&intel_dp->psr.lock);
753 }
754 
755 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
756 {
757 	if (!intel_dp->psr.dc3co_exitline)
758 		return;
759 
760 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
761 	/* Before PSR2 exit disallow dc3co*/
762 	tgl_psr2_disable_dc3co(intel_dp);
763 }
764 
765 static bool
766 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
767 			      struct intel_crtc_state *crtc_state)
768 {
769 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
770 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
771 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
772 	enum port port = dig_port->base.port;
773 
774 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
775 		return pipe <= PIPE_B && port <= PORT_B;
776 	else
777 		return pipe == PIPE_A && port == PORT_A;
778 }
779 
780 static void
781 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
782 				  struct intel_crtc_state *crtc_state)
783 {
784 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
785 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
786 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
787 	u32 exit_scanlines;
788 
789 	/*
790 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
791 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
792 	 * is applied. B.Specs:49196
793 	 */
794 	return;
795 
796 	/*
797 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
798 	 * TODO: when the issue is addressed, this restriction should be removed.
799 	 */
800 	if (crtc_state->enable_psr2_sel_fetch)
801 		return;
802 
803 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
804 		return;
805 
806 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
807 		return;
808 
809 	/* Wa_16011303918:adl-p */
810 	if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
811 		return;
812 
813 	/*
814 	 * DC3CO Exit time 200us B.Spec 49196
815 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
816 	 */
817 	exit_scanlines =
818 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
819 
820 	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
821 		return;
822 
823 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
824 }
825 
826 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
827 					      struct intel_crtc_state *crtc_state)
828 {
829 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
830 
831 	if (!dev_priv->params.enable_psr2_sel_fetch &&
832 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
833 		drm_dbg_kms(&dev_priv->drm,
834 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
835 		return false;
836 	}
837 
838 	if (crtc_state->uapi.async_flip) {
839 		drm_dbg_kms(&dev_priv->drm,
840 			    "PSR2 sel fetch not enabled, async flip enabled\n");
841 		return false;
842 	}
843 
844 	return crtc_state->enable_psr2_sel_fetch = true;
845 }
846 
847 static bool psr2_granularity_check(struct intel_dp *intel_dp,
848 				   struct intel_crtc_state *crtc_state)
849 {
850 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
851 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
852 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
853 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
854 	u16 y_granularity = 0;
855 
856 	/* PSR2 HW only send full lines so we only need to validate the width */
857 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
858 		return false;
859 
860 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
861 		return false;
862 
863 	/* HW tracking is only aligned to 4 lines */
864 	if (!crtc_state->enable_psr2_sel_fetch)
865 		return intel_dp->psr.su_y_granularity == 4;
866 
867 	/*
868 	 * adl_p and mtl platforms have 1 line granularity.
869 	 * For other platforms with SW tracking we can adjust the y coordinates
870 	 * to match sink requirement if multiple of 4.
871 	 */
872 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
873 		y_granularity = intel_dp->psr.su_y_granularity;
874 	else if (intel_dp->psr.su_y_granularity <= 2)
875 		y_granularity = 4;
876 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
877 		y_granularity = intel_dp->psr.su_y_granularity;
878 
879 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
880 		return false;
881 
882 	if (crtc_state->dsc.compression_enable &&
883 	    vdsc_cfg->slice_height % y_granularity)
884 		return false;
885 
886 	crtc_state->su_y_granularity = y_granularity;
887 	return true;
888 }
889 
890 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
891 							struct intel_crtc_state *crtc_state)
892 {
893 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
894 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
895 	u32 hblank_total, hblank_ns, req_ns;
896 
897 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
898 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
899 
900 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
901 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
902 
903 	if ((hblank_ns - req_ns) > 100)
904 		return true;
905 
906 	/* Not supported <13 / Wa_22012279113:adl-p */
907 	if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
908 		return false;
909 
910 	crtc_state->req_psr2_sdp_prior_scanline = true;
911 	return true;
912 }
913 
914 static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
915 				     struct intel_crtc_state *crtc_state)
916 {
917 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
918 	int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
919 	u8 max_wake_lines;
920 
921 	if (DISPLAY_VER(i915) >= 12) {
922 		io_wake_time = 42;
923 		/*
924 		 * According to Bspec it's 42us, but based on testing
925 		 * it is not enough -> use 45 us.
926 		 */
927 		fast_wake_time = 45;
928 		max_wake_lines = 12;
929 	} else {
930 		io_wake_time = 50;
931 		fast_wake_time = 32;
932 		max_wake_lines = 8;
933 	}
934 
935 	io_wake_lines = intel_usecs_to_scanlines(
936 		&crtc_state->hw.adjusted_mode, io_wake_time);
937 	fast_wake_lines = intel_usecs_to_scanlines(
938 		&crtc_state->hw.adjusted_mode, fast_wake_time);
939 
940 	if (io_wake_lines > max_wake_lines ||
941 	    fast_wake_lines > max_wake_lines)
942 		return false;
943 
944 	if (i915->params.psr_safest_params)
945 		io_wake_lines = fast_wake_lines = max_wake_lines;
946 
947 	/* According to Bspec lower limit should be set as 7 lines. */
948 	intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
949 	intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
950 
951 	return true;
952 }
953 
954 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
955 				    struct intel_crtc_state *crtc_state)
956 {
957 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
958 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
959 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
960 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
961 
962 	if (!intel_dp->psr.sink_psr2_support)
963 		return false;
964 
965 	/* JSL and EHL only supports eDP 1.3 */
966 	if (IS_JSL_EHL(dev_priv)) {
967 		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
968 		return false;
969 	}
970 
971 	/* Wa_16011181250 */
972 	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
973 	    IS_DG2(dev_priv)) {
974 		drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
975 		return false;
976 	}
977 
978 	if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
979 		drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
980 		return false;
981 	}
982 
983 	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
984 		drm_dbg_kms(&dev_priv->drm,
985 			    "PSR2 not supported in transcoder %s\n",
986 			    transcoder_name(crtc_state->cpu_transcoder));
987 		return false;
988 	}
989 
990 	if (!psr2_global_enabled(intel_dp)) {
991 		drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
992 		return false;
993 	}
994 
995 	/*
996 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
997 	 * resolution requires DSC to be enabled, priority is given to DSC
998 	 * over PSR2.
999 	 */
1000 	if (crtc_state->dsc.compression_enable &&
1001 	    (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
1002 		drm_dbg_kms(&dev_priv->drm,
1003 			    "PSR2 cannot be enabled since DSC is enabled\n");
1004 		return false;
1005 	}
1006 
1007 	if (crtc_state->crc_enabled) {
1008 		drm_dbg_kms(&dev_priv->drm,
1009 			    "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1010 		return false;
1011 	}
1012 
1013 	if (DISPLAY_VER(dev_priv) >= 12) {
1014 		psr_max_h = 5120;
1015 		psr_max_v = 3200;
1016 		max_bpp = 30;
1017 	} else if (DISPLAY_VER(dev_priv) >= 10) {
1018 		psr_max_h = 4096;
1019 		psr_max_v = 2304;
1020 		max_bpp = 24;
1021 	} else if (DISPLAY_VER(dev_priv) == 9) {
1022 		psr_max_h = 3640;
1023 		psr_max_v = 2304;
1024 		max_bpp = 24;
1025 	}
1026 
1027 	if (crtc_state->pipe_bpp > max_bpp) {
1028 		drm_dbg_kms(&dev_priv->drm,
1029 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1030 			    crtc_state->pipe_bpp, max_bpp);
1031 		return false;
1032 	}
1033 
1034 	/* Wa_16011303918:adl-p */
1035 	if (crtc_state->vrr.enable &&
1036 	    IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1037 		drm_dbg_kms(&dev_priv->drm,
1038 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1039 		return false;
1040 	}
1041 
1042 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1043 		drm_dbg_kms(&dev_priv->drm,
1044 			    "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1045 		return false;
1046 	}
1047 
1048 	if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
1049 		drm_dbg_kms(&dev_priv->drm,
1050 			    "PSR2 not enabled, Unable to use long enough wake times\n");
1051 		return false;
1052 	}
1053 
1054 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1055 	if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1056 	    crtc_state->hw.adjusted_mode.crtc_vblank_start <
1057 	    psr2_block_count_lines(intel_dp)) {
1058 		drm_dbg_kms(&dev_priv->drm,
1059 			    "PSR2 not enabled, too short vblank time\n");
1060 		return false;
1061 	}
1062 
1063 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1064 		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1065 		    !HAS_PSR_HW_TRACKING(dev_priv)) {
1066 			drm_dbg_kms(&dev_priv->drm,
1067 				    "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1068 			return false;
1069 		}
1070 	}
1071 
1072 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1073 		drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1074 		goto unsupported;
1075 	}
1076 
1077 	if (!crtc_state->enable_psr2_sel_fetch &&
1078 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1079 		drm_dbg_kms(&dev_priv->drm,
1080 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1081 			    crtc_hdisplay, crtc_vdisplay,
1082 			    psr_max_h, psr_max_v);
1083 		goto unsupported;
1084 	}
1085 
1086 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1087 	return true;
1088 
1089 unsupported:
1090 	crtc_state->enable_psr2_sel_fetch = false;
1091 	return false;
1092 }
1093 
1094 void intel_psr_compute_config(struct intel_dp *intel_dp,
1095 			      struct intel_crtc_state *crtc_state,
1096 			      struct drm_connector_state *conn_state)
1097 {
1098 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1099 	const struct drm_display_mode *adjusted_mode =
1100 		&crtc_state->hw.adjusted_mode;
1101 	int psr_setup_time;
1102 
1103 	/*
1104 	 * Current PSR panels don't work reliably with VRR enabled
1105 	 * So if VRR is enabled, do not enable PSR.
1106 	 */
1107 	if (crtc_state->vrr.enable)
1108 		return;
1109 
1110 	if (!CAN_PSR(intel_dp))
1111 		return;
1112 
1113 	if (!psr_global_enabled(intel_dp)) {
1114 		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1115 		return;
1116 	}
1117 
1118 	if (intel_dp->psr.sink_not_reliable) {
1119 		drm_dbg_kms(&dev_priv->drm,
1120 			    "PSR sink implementation is not reliable\n");
1121 		return;
1122 	}
1123 
1124 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1125 		drm_dbg_kms(&dev_priv->drm,
1126 			    "PSR condition failed: Interlaced mode enabled\n");
1127 		return;
1128 	}
1129 
1130 	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1131 	if (psr_setup_time < 0) {
1132 		drm_dbg_kms(&dev_priv->drm,
1133 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1134 			    intel_dp->psr_dpcd[1]);
1135 		return;
1136 	}
1137 
1138 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1139 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1140 		drm_dbg_kms(&dev_priv->drm,
1141 			    "PSR condition failed: PSR setup time (%d us) too long\n",
1142 			    psr_setup_time);
1143 		return;
1144 	}
1145 
1146 	crtc_state->has_psr = true;
1147 	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1148 
1149 	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1150 	intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1151 				     &crtc_state->psr_vsc);
1152 }
1153 
1154 void intel_psr_get_config(struct intel_encoder *encoder,
1155 			  struct intel_crtc_state *pipe_config)
1156 {
1157 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1158 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1159 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1160 	struct intel_dp *intel_dp;
1161 	u32 val;
1162 
1163 	if (!dig_port)
1164 		return;
1165 
1166 	intel_dp = &dig_port->dp;
1167 	if (!CAN_PSR(intel_dp))
1168 		return;
1169 
1170 	mutex_lock(&intel_dp->psr.lock);
1171 	if (!intel_dp->psr.enabled)
1172 		goto unlock;
1173 
1174 	/*
1175 	 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1176 	 * enabled/disabled because of frontbuffer tracking and others.
1177 	 */
1178 	pipe_config->has_psr = true;
1179 	pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1180 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1181 
1182 	if (!intel_dp->psr.psr2_enabled)
1183 		goto unlock;
1184 
1185 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1186 		val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1187 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1188 			pipe_config->enable_psr2_sel_fetch = true;
1189 	}
1190 
1191 	if (DISPLAY_VER(dev_priv) >= 12) {
1192 		val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1193 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1194 	}
1195 unlock:
1196 	mutex_unlock(&intel_dp->psr.lock);
1197 }
1198 
1199 static void intel_psr_activate(struct intel_dp *intel_dp)
1200 {
1201 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1202 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1203 
1204 	if (transcoder_has_psr2(dev_priv, cpu_transcoder))
1205 		drm_WARN_ON(&dev_priv->drm,
1206 			    intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1207 
1208 	drm_WARN_ON(&dev_priv->drm,
1209 		    intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder)) & EDP_PSR_ENABLE);
1210 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1211 	lockdep_assert_held(&intel_dp->psr.lock);
1212 
1213 	/* psr1 and psr2 are mutually exclusive.*/
1214 	if (intel_dp->psr.psr2_enabled)
1215 		hsw_activate_psr2(intel_dp);
1216 	else
1217 		hsw_activate_psr1(intel_dp);
1218 
1219 	intel_dp->psr.active = true;
1220 }
1221 
1222 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1223 {
1224 	switch (intel_dp->psr.pipe) {
1225 	case PIPE_A:
1226 		return LATENCY_REPORTING_REMOVED_PIPE_A;
1227 	case PIPE_B:
1228 		return LATENCY_REPORTING_REMOVED_PIPE_B;
1229 	case PIPE_C:
1230 		return LATENCY_REPORTING_REMOVED_PIPE_C;
1231 	case PIPE_D:
1232 		return LATENCY_REPORTING_REMOVED_PIPE_D;
1233 	default:
1234 		MISSING_CASE(intel_dp->psr.pipe);
1235 		return 0;
1236 	}
1237 }
1238 
1239 /*
1240  * Wa_16013835468
1241  * Wa_14015648006
1242  */
1243 static void wm_optimization_wa(struct intel_dp *intel_dp,
1244 			       const struct intel_crtc_state *crtc_state)
1245 {
1246 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1247 	bool set_wa_bit = false;
1248 
1249 	/* Wa_14015648006 */
1250 	if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1251 	    IS_DISPLAY_VER(dev_priv, 11, 13))
1252 		set_wa_bit |= crtc_state->wm_level_disabled;
1253 
1254 	/* Wa_16013835468 */
1255 	if (DISPLAY_VER(dev_priv) == 12)
1256 		set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1257 			crtc_state->hw.adjusted_mode.crtc_vdisplay;
1258 
1259 	if (set_wa_bit)
1260 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1261 			     0, wa_16013835468_bit_get(intel_dp));
1262 	else
1263 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1264 			     wa_16013835468_bit_get(intel_dp), 0);
1265 }
1266 
1267 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1268 				    const struct intel_crtc_state *crtc_state)
1269 {
1270 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1271 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1272 	u32 mask;
1273 
1274 	/*
1275 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1276 	 * mask LPSP to avoid dependency on other drivers that might block
1277 	 * runtime_pm besides preventing  other hw tracking issues now we
1278 	 * can rely on frontbuffer tracking.
1279 	 */
1280 	mask = EDP_PSR_DEBUG_MASK_MEMUP |
1281 	       EDP_PSR_DEBUG_MASK_HPD |
1282 	       EDP_PSR_DEBUG_MASK_LPSP |
1283 	       EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1284 
1285 	if (DISPLAY_VER(dev_priv) < 11)
1286 		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1287 
1288 	intel_de_write(dev_priv, EDP_PSR_DEBUG(cpu_transcoder),
1289 		       mask);
1290 
1291 	psr_irq_control(intel_dp);
1292 
1293 	/*
1294 	 * TODO: if future platforms supports DC3CO in more than one
1295 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1296 	 */
1297 	if (intel_dp->psr.dc3co_exitline)
1298 		intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1299 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1300 
1301 	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1302 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1303 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1304 			     IGNORE_PSR2_HW_TRACKING : 0);
1305 
1306 	/*
1307 	 * Wa_16013835468
1308 	 * Wa_14015648006
1309 	 */
1310 	wm_optimization_wa(intel_dp, crtc_state);
1311 
1312 	if (intel_dp->psr.psr2_enabled) {
1313 		if (DISPLAY_VER(dev_priv) == 9)
1314 			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1315 				     PSR2_VSC_ENABLE_PROG_HEADER |
1316 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1317 
1318 		/*
1319 		 * Wa_16014451276:adlp,mtl[a0,b0]
1320 		 * All supported adlp panels have 1-based X granularity, this may
1321 		 * cause issues if non-supported panels are used.
1322 		 */
1323 		if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1324 			intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
1325 				     ADLP_1_BASED_X_GRANULARITY);
1326 		else if (IS_ALDERLAKE_P(dev_priv))
1327 			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1328 				     ADLP_1_BASED_X_GRANULARITY);
1329 
1330 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1331 		if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1332 			intel_de_rmw(dev_priv,
1333 				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1334 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1335 		else if (IS_ALDERLAKE_P(dev_priv))
1336 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1337 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1338 	}
1339 }
1340 
1341 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1342 {
1343 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1344 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1345 	u32 val;
1346 
1347 	/*
1348 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1349 	 * will still keep the error set even after the reset done in the
1350 	 * irq_preinstall and irq_uninstall hooks.
1351 	 * And enabling in this situation cause the screen to freeze in the
1352 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1353 	 * to avoid any rendering problems.
1354 	 */
1355 	if (DISPLAY_VER(dev_priv) >= 12)
1356 		val = intel_de_read(dev_priv, TRANS_PSR_IIR(cpu_transcoder));
1357 	else
1358 		val = intel_de_read(dev_priv, EDP_PSR_IIR);
1359 	val &= psr_irq_psr_error_bit_get(intel_dp);
1360 	if (val) {
1361 		intel_dp->psr.sink_not_reliable = true;
1362 		drm_dbg_kms(&dev_priv->drm,
1363 			    "PSR interruption error set, not enabling PSR\n");
1364 		return false;
1365 	}
1366 
1367 	return true;
1368 }
1369 
1370 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1371 				    const struct intel_crtc_state *crtc_state)
1372 {
1373 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1374 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1375 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1376 	struct intel_encoder *encoder = &dig_port->base;
1377 	u32 val;
1378 
1379 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1380 
1381 	intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1382 	intel_dp->psr.busy_frontbuffer_bits = 0;
1383 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1384 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1385 	/* DC5/DC6 requires at least 6 idle frames */
1386 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1387 	intel_dp->psr.dc3co_exit_delay = val;
1388 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1389 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1390 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1391 	intel_dp->psr.req_psr2_sdp_prior_scanline =
1392 		crtc_state->req_psr2_sdp_prior_scanline;
1393 
1394 	if (!psr_interrupt_error_check(intel_dp))
1395 		return;
1396 
1397 	drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1398 		    intel_dp->psr.psr2_enabled ? "2" : "1");
1399 	intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1400 	intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1401 	intel_psr_enable_sink(intel_dp);
1402 	intel_psr_enable_source(intel_dp, crtc_state);
1403 	intel_dp->psr.enabled = true;
1404 	intel_dp->psr.paused = false;
1405 
1406 	intel_psr_activate(intel_dp);
1407 }
1408 
1409 static void intel_psr_exit(struct intel_dp *intel_dp)
1410 {
1411 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1412 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1413 	u32 val;
1414 
1415 	if (!intel_dp->psr.active) {
1416 		if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1417 			val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1418 			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1419 		}
1420 
1421 		val = intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder));
1422 		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1423 
1424 		return;
1425 	}
1426 
1427 	if (intel_dp->psr.psr2_enabled) {
1428 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1429 
1430 		val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1431 				   EDP_PSR2_ENABLE, 0);
1432 
1433 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1434 	} else {
1435 		val = intel_de_rmw(dev_priv, EDP_PSR_CTL(cpu_transcoder),
1436 				   EDP_PSR_ENABLE, 0);
1437 
1438 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1439 	}
1440 	intel_dp->psr.active = false;
1441 }
1442 
1443 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1444 {
1445 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1446 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1447 	i915_reg_t psr_status;
1448 	u32 psr_status_mask;
1449 
1450 	if (intel_dp->psr.psr2_enabled) {
1451 		psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1452 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1453 	} else {
1454 		psr_status = EDP_PSR_STATUS(cpu_transcoder);
1455 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1456 	}
1457 
1458 	/* Wait till PSR is idle */
1459 	if (intel_de_wait_for_clear(dev_priv, psr_status,
1460 				    psr_status_mask, 2000))
1461 		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1462 }
1463 
1464 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1465 {
1466 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1467 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1468 	enum phy phy = intel_port_to_phy(dev_priv,
1469 					 dp_to_dig_port(intel_dp)->base.port);
1470 
1471 	lockdep_assert_held(&intel_dp->psr.lock);
1472 
1473 	if (!intel_dp->psr.enabled)
1474 		return;
1475 
1476 	drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1477 		    intel_dp->psr.psr2_enabled ? "2" : "1");
1478 
1479 	intel_psr_exit(intel_dp);
1480 	intel_psr_wait_exit_locked(intel_dp);
1481 
1482 	/*
1483 	 * Wa_16013835468
1484 	 * Wa_14015648006
1485 	 */
1486 	if (DISPLAY_VER(dev_priv) >= 11)
1487 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1488 			     wa_16013835468_bit_get(intel_dp), 0);
1489 
1490 	if (intel_dp->psr.psr2_enabled) {
1491 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1492 		if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1493 			intel_de_rmw(dev_priv,
1494 				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1495 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1496 		else if (IS_ALDERLAKE_P(dev_priv))
1497 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1498 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1499 	}
1500 
1501 	intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1502 
1503 	/* Disable PSR on Sink */
1504 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1505 
1506 	if (intel_dp->psr.psr2_enabled)
1507 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1508 
1509 	intel_dp->psr.enabled = false;
1510 	intel_dp->psr.psr2_enabled = false;
1511 	intel_dp->psr.psr2_sel_fetch_enabled = false;
1512 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1513 }
1514 
1515 /**
1516  * intel_psr_disable - Disable PSR
1517  * @intel_dp: Intel DP
1518  * @old_crtc_state: old CRTC state
1519  *
1520  * This function needs to be called before disabling pipe.
1521  */
1522 void intel_psr_disable(struct intel_dp *intel_dp,
1523 		       const struct intel_crtc_state *old_crtc_state)
1524 {
1525 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1526 
1527 	if (!old_crtc_state->has_psr)
1528 		return;
1529 
1530 	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1531 		return;
1532 
1533 	mutex_lock(&intel_dp->psr.lock);
1534 
1535 	intel_psr_disable_locked(intel_dp);
1536 
1537 	mutex_unlock(&intel_dp->psr.lock);
1538 	cancel_work_sync(&intel_dp->psr.work);
1539 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1540 }
1541 
1542 /**
1543  * intel_psr_pause - Pause PSR
1544  * @intel_dp: Intel DP
1545  *
1546  * This function need to be called after enabling psr.
1547  */
1548 void intel_psr_pause(struct intel_dp *intel_dp)
1549 {
1550 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1551 	struct intel_psr *psr = &intel_dp->psr;
1552 
1553 	if (!CAN_PSR(intel_dp))
1554 		return;
1555 
1556 	mutex_lock(&psr->lock);
1557 
1558 	if (!psr->enabled) {
1559 		mutex_unlock(&psr->lock);
1560 		return;
1561 	}
1562 
1563 	/* If we ever hit this, we will need to add refcount to pause/resume */
1564 	drm_WARN_ON(&dev_priv->drm, psr->paused);
1565 
1566 	intel_psr_exit(intel_dp);
1567 	intel_psr_wait_exit_locked(intel_dp);
1568 	psr->paused = true;
1569 
1570 	mutex_unlock(&psr->lock);
1571 
1572 	cancel_work_sync(&psr->work);
1573 	cancel_delayed_work_sync(&psr->dc3co_work);
1574 }
1575 
1576 /**
1577  * intel_psr_resume - Resume PSR
1578  * @intel_dp: Intel DP
1579  *
1580  * This function need to be called after pausing psr.
1581  */
1582 void intel_psr_resume(struct intel_dp *intel_dp)
1583 {
1584 	struct intel_psr *psr = &intel_dp->psr;
1585 
1586 	if (!CAN_PSR(intel_dp))
1587 		return;
1588 
1589 	mutex_lock(&psr->lock);
1590 
1591 	if (!psr->paused)
1592 		goto unlock;
1593 
1594 	psr->paused = false;
1595 	intel_psr_activate(intel_dp);
1596 
1597 unlock:
1598 	mutex_unlock(&psr->lock);
1599 }
1600 
1601 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1602 {
1603 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1604 		PSR2_MAN_TRK_CTL_ENABLE;
1605 }
1606 
1607 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1608 {
1609 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1610 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1611 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1612 }
1613 
1614 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1615 {
1616 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1617 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1618 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1619 }
1620 
1621 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1622 {
1623 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1624 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1625 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1626 }
1627 
1628 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1629 {
1630 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1631 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1632 
1633 	if (intel_dp->psr.psr2_sel_fetch_enabled)
1634 		intel_de_write(dev_priv,
1635 			       PSR2_MAN_TRK_CTL(cpu_transcoder),
1636 			       man_trk_ctl_enable_bit_get(dev_priv) |
1637 			       man_trk_ctl_partial_frame_bit_get(dev_priv) |
1638 			       man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1639 			       man_trk_ctl_continuos_full_frame(dev_priv));
1640 
1641 	/*
1642 	 * Display WA #0884: skl+
1643 	 * This documented WA for bxt can be safely applied
1644 	 * broadly so we can force HW tracking to exit PSR
1645 	 * instead of disabling and re-enabling.
1646 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1647 	 * but it makes more sense write to the current active
1648 	 * pipe.
1649 	 *
1650 	 * This workaround do not exist for platforms with display 10 or newer
1651 	 * but testing proved that it works for up display 13, for newer
1652 	 * than that testing will be needed.
1653 	 */
1654 	intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1655 }
1656 
1657 void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
1658 					    const struct intel_crtc_state *crtc_state)
1659 {
1660 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1661 	enum pipe pipe = plane->pipe;
1662 
1663 	if (!crtc_state->enable_psr2_sel_fetch)
1664 		return;
1665 
1666 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
1667 }
1668 
1669 void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
1670 					    const struct intel_crtc_state *crtc_state,
1671 					    const struct intel_plane_state *plane_state)
1672 {
1673 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
1674 	enum pipe pipe = plane->pipe;
1675 
1676 	if (!crtc_state->enable_psr2_sel_fetch)
1677 		return;
1678 
1679 	if (plane->id == PLANE_CURSOR)
1680 		intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1681 				  plane_state->ctl);
1682 	else
1683 		intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1684 				  PLANE_SEL_FETCH_CTL_ENABLE);
1685 }
1686 
1687 void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
1688 					      const struct intel_crtc_state *crtc_state,
1689 					      const struct intel_plane_state *plane_state,
1690 					      int color_plane)
1691 {
1692 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1693 	enum pipe pipe = plane->pipe;
1694 	const struct drm_rect *clip;
1695 	u32 val;
1696 	int x, y;
1697 
1698 	if (!crtc_state->enable_psr2_sel_fetch)
1699 		return;
1700 
1701 	if (plane->id == PLANE_CURSOR)
1702 		return;
1703 
1704 	clip = &plane_state->psr2_sel_fetch_area;
1705 
1706 	val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1707 	val |= plane_state->uapi.dst.x1;
1708 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1709 
1710 	x = plane_state->view.color_plane[color_plane].x;
1711 
1712 	/*
1713 	 * From Bspec: UV surface Start Y Position = half of Y plane Y
1714 	 * start position.
1715 	 */
1716 	if (!color_plane)
1717 		y = plane_state->view.color_plane[color_plane].y + clip->y1;
1718 	else
1719 		y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
1720 
1721 	val = y << 16 | x;
1722 
1723 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1724 			  val);
1725 
1726 	/* Sizes are 0 based */
1727 	val = (drm_rect_height(clip) - 1) << 16;
1728 	val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1729 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1730 }
1731 
1732 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1733 {
1734 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1735 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1736 	struct intel_encoder *encoder;
1737 
1738 	if (!crtc_state->enable_psr2_sel_fetch)
1739 		return;
1740 
1741 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1742 					     crtc_state->uapi.encoder_mask) {
1743 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1744 
1745 		lockdep_assert_held(&intel_dp->psr.lock);
1746 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1747 			return;
1748 		break;
1749 	}
1750 
1751 	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
1752 		       crtc_state->psr2_man_track_ctl);
1753 }
1754 
1755 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1756 				  struct drm_rect *clip, bool full_update)
1757 {
1758 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1759 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1760 	u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1761 
1762 	/* SF partial frame enable has to be set even on full update */
1763 	val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1764 
1765 	if (full_update) {
1766 		val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1767 		val |= man_trk_ctl_continuos_full_frame(dev_priv);
1768 		goto exit;
1769 	}
1770 
1771 	if (clip->y1 == -1)
1772 		goto exit;
1773 
1774 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
1775 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1776 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1777 	} else {
1778 		drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1779 
1780 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1781 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1782 	}
1783 exit:
1784 	crtc_state->psr2_man_track_ctl = val;
1785 }
1786 
1787 static void clip_area_update(struct drm_rect *overlap_damage_area,
1788 			     struct drm_rect *damage_area,
1789 			     struct drm_rect *pipe_src)
1790 {
1791 	if (!drm_rect_intersect(damage_area, pipe_src))
1792 		return;
1793 
1794 	if (overlap_damage_area->y1 == -1) {
1795 		overlap_damage_area->y1 = damage_area->y1;
1796 		overlap_damage_area->y2 = damage_area->y2;
1797 		return;
1798 	}
1799 
1800 	if (damage_area->y1 < overlap_damage_area->y1)
1801 		overlap_damage_area->y1 = damage_area->y1;
1802 
1803 	if (damage_area->y2 > overlap_damage_area->y2)
1804 		overlap_damage_area->y2 = damage_area->y2;
1805 }
1806 
1807 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1808 						struct drm_rect *pipe_clip)
1809 {
1810 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1811 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1812 	u16 y_alignment;
1813 
1814 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
1815 	if (crtc_state->dsc.compression_enable &&
1816 	    (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
1817 		y_alignment = vdsc_cfg->slice_height;
1818 	else
1819 		y_alignment = crtc_state->su_y_granularity;
1820 
1821 	pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
1822 	if (pipe_clip->y2 % y_alignment)
1823 		pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
1824 }
1825 
1826 /*
1827  * TODO: Not clear how to handle planes with negative position,
1828  * also planes are not updated if they have a negative X
1829  * position so for now doing a full update in this cases
1830  *
1831  * Plane scaling and rotation is not supported by selective fetch and both
1832  * properties can change without a modeset, so need to be check at every
1833  * atomic commit.
1834  */
1835 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
1836 {
1837 	if (plane_state->uapi.dst.y1 < 0 ||
1838 	    plane_state->uapi.dst.x1 < 0 ||
1839 	    plane_state->scaler_id >= 0 ||
1840 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
1841 		return false;
1842 
1843 	return true;
1844 }
1845 
1846 /*
1847  * Check for pipe properties that is not supported by selective fetch.
1848  *
1849  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
1850  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
1851  * enabled and going to the full update path.
1852  */
1853 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
1854 {
1855 	if (crtc_state->scaler_state.scaler_id >= 0)
1856 		return false;
1857 
1858 	return true;
1859 }
1860 
1861 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1862 				struct intel_crtc *crtc)
1863 {
1864 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1865 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1866 	struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1867 	struct intel_plane_state *new_plane_state, *old_plane_state;
1868 	struct intel_plane *plane;
1869 	bool full_update = false;
1870 	int i, ret;
1871 
1872 	if (!crtc_state->enable_psr2_sel_fetch)
1873 		return 0;
1874 
1875 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
1876 		full_update = true;
1877 		goto skip_sel_fetch_set_loop;
1878 	}
1879 
1880 	/*
1881 	 * Calculate minimal selective fetch area of each plane and calculate
1882 	 * the pipe damaged area.
1883 	 * In the next loop the plane selective fetch area will actually be set
1884 	 * using whole pipe damaged area.
1885 	 */
1886 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1887 					     new_plane_state, i) {
1888 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
1889 						      .x2 = INT_MAX };
1890 
1891 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
1892 			continue;
1893 
1894 		if (!new_plane_state->uapi.visible &&
1895 		    !old_plane_state->uapi.visible)
1896 			continue;
1897 
1898 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
1899 			full_update = true;
1900 			break;
1901 		}
1902 
1903 		/*
1904 		 * If visibility or plane moved, mark the whole plane area as
1905 		 * damaged as it needs to be complete redraw in the new and old
1906 		 * position.
1907 		 */
1908 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
1909 		    !drm_rect_equals(&new_plane_state->uapi.dst,
1910 				     &old_plane_state->uapi.dst)) {
1911 			if (old_plane_state->uapi.visible) {
1912 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
1913 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
1914 				clip_area_update(&pipe_clip, &damaged_area,
1915 						 &crtc_state->pipe_src);
1916 			}
1917 
1918 			if (new_plane_state->uapi.visible) {
1919 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
1920 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
1921 				clip_area_update(&pipe_clip, &damaged_area,
1922 						 &crtc_state->pipe_src);
1923 			}
1924 			continue;
1925 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
1926 			/* If alpha changed mark the whole plane area as damaged */
1927 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
1928 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
1929 			clip_area_update(&pipe_clip, &damaged_area,
1930 					 &crtc_state->pipe_src);
1931 			continue;
1932 		}
1933 
1934 		src = drm_plane_state_src(&new_plane_state->uapi);
1935 		drm_rect_fp_to_int(&src, &src);
1936 
1937 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
1938 						     &new_plane_state->uapi, &damaged_area))
1939 			continue;
1940 
1941 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
1942 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
1943 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
1944 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
1945 
1946 		clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
1947 	}
1948 
1949 	/*
1950 	 * TODO: For now we are just using full update in case
1951 	 * selective fetch area calculation fails. To optimize this we
1952 	 * should identify cases where this happens and fix the area
1953 	 * calculation for those.
1954 	 */
1955 	if (pipe_clip.y1 == -1) {
1956 		drm_info_once(&dev_priv->drm,
1957 			      "Selective fetch area calculation failed in pipe %c\n",
1958 			      pipe_name(crtc->pipe));
1959 		full_update = true;
1960 	}
1961 
1962 	if (full_update)
1963 		goto skip_sel_fetch_set_loop;
1964 
1965 	/* Wa_14014971492 */
1966 	if ((IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1967 	     IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
1968 	    crtc_state->splitter.enable)
1969 		pipe_clip.y1 = 0;
1970 
1971 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
1972 	if (ret)
1973 		return ret;
1974 
1975 	intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
1976 
1977 	/*
1978 	 * Now that we have the pipe damaged area check if it intersect with
1979 	 * every plane, if it does set the plane selective fetch area.
1980 	 */
1981 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1982 					     new_plane_state, i) {
1983 		struct drm_rect *sel_fetch_area, inter;
1984 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
1985 
1986 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
1987 		    !new_plane_state->uapi.visible)
1988 			continue;
1989 
1990 		inter = pipe_clip;
1991 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
1992 			continue;
1993 
1994 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
1995 			full_update = true;
1996 			break;
1997 		}
1998 
1999 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2000 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2001 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2002 		crtc_state->update_planes |= BIT(plane->id);
2003 
2004 		/*
2005 		 * Sel_fetch_area is calculated for UV plane. Use
2006 		 * same area for Y plane as well.
2007 		 */
2008 		if (linked) {
2009 			struct intel_plane_state *linked_new_plane_state;
2010 			struct drm_rect *linked_sel_fetch_area;
2011 
2012 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2013 			if (IS_ERR(linked_new_plane_state))
2014 				return PTR_ERR(linked_new_plane_state);
2015 
2016 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2017 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2018 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2019 			crtc_state->update_planes |= BIT(linked->id);
2020 		}
2021 	}
2022 
2023 skip_sel_fetch_set_loop:
2024 	psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
2025 	return 0;
2026 }
2027 
2028 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2029 				struct intel_crtc *crtc)
2030 {
2031 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2032 	const struct intel_crtc_state *old_crtc_state =
2033 		intel_atomic_get_old_crtc_state(state, crtc);
2034 	const struct intel_crtc_state *new_crtc_state =
2035 		intel_atomic_get_new_crtc_state(state, crtc);
2036 	struct intel_encoder *encoder;
2037 
2038 	if (!HAS_PSR(i915))
2039 		return;
2040 
2041 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2042 					     old_crtc_state->uapi.encoder_mask) {
2043 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2044 		struct intel_psr *psr = &intel_dp->psr;
2045 		bool needs_to_disable = false;
2046 
2047 		mutex_lock(&psr->lock);
2048 
2049 		/*
2050 		 * Reasons to disable:
2051 		 * - PSR disabled in new state
2052 		 * - All planes will go inactive
2053 		 * - Changing between PSR versions
2054 		 * - Display WA #1136: skl, bxt
2055 		 */
2056 		needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2057 		needs_to_disable |= !new_crtc_state->has_psr;
2058 		needs_to_disable |= !new_crtc_state->active_planes;
2059 		needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2060 		needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2061 			new_crtc_state->wm_level_disabled;
2062 
2063 		if (psr->enabled && needs_to_disable)
2064 			intel_psr_disable_locked(intel_dp);
2065 		else if (psr->enabled && new_crtc_state->wm_level_disabled)
2066 			/* Wa_14015648006 */
2067 			wm_optimization_wa(intel_dp, new_crtc_state);
2068 
2069 		mutex_unlock(&psr->lock);
2070 	}
2071 }
2072 
2073 static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
2074 					 const struct intel_crtc_state *crtc_state)
2075 {
2076 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2077 	struct intel_encoder *encoder;
2078 
2079 	if (!crtc_state->has_psr)
2080 		return;
2081 
2082 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2083 					     crtc_state->uapi.encoder_mask) {
2084 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2085 		struct intel_psr *psr = &intel_dp->psr;
2086 		bool keep_disabled = false;
2087 
2088 		mutex_lock(&psr->lock);
2089 
2090 		drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2091 
2092 		keep_disabled |= psr->sink_not_reliable;
2093 		keep_disabled |= !crtc_state->active_planes;
2094 
2095 		/* Display WA #1136: skl, bxt */
2096 		keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2097 			crtc_state->wm_level_disabled;
2098 
2099 		if (!psr->enabled && !keep_disabled)
2100 			intel_psr_enable_locked(intel_dp, crtc_state);
2101 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2102 			/* Wa_14015648006 */
2103 			wm_optimization_wa(intel_dp, crtc_state);
2104 
2105 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2106 		if (crtc_state->crc_enabled && psr->enabled)
2107 			psr_force_hw_tracking_exit(intel_dp);
2108 
2109 		mutex_unlock(&psr->lock);
2110 	}
2111 }
2112 
2113 void intel_psr_post_plane_update(const struct intel_atomic_state *state)
2114 {
2115 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2116 	struct intel_crtc_state *crtc_state;
2117 	struct intel_crtc *crtc;
2118 	int i;
2119 
2120 	if (!HAS_PSR(dev_priv))
2121 		return;
2122 
2123 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
2124 		_intel_psr_post_plane_update(state, crtc_state);
2125 }
2126 
2127 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2128 {
2129 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2130 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2131 
2132 	/*
2133 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2134 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2135 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2136 	 */
2137 	return intel_de_wait_for_clear(dev_priv,
2138 				       EDP_PSR2_STATUS(cpu_transcoder),
2139 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2140 }
2141 
2142 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2143 {
2144 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2145 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2146 
2147 	/*
2148 	 * From bspec: Panel Self Refresh (BDW+)
2149 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2150 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2151 	 * defensive enough to cover everything.
2152 	 */
2153 	return intel_de_wait_for_clear(dev_priv,
2154 				       EDP_PSR_STATUS(cpu_transcoder),
2155 				       EDP_PSR_STATUS_STATE_MASK, 50);
2156 }
2157 
2158 /**
2159  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2160  * @new_crtc_state: new CRTC state
2161  *
2162  * This function is expected to be called from pipe_update_start() where it is
2163  * not expected to race with PSR enable or disable.
2164  */
2165 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2166 {
2167 	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2168 	struct intel_encoder *encoder;
2169 
2170 	if (!new_crtc_state->has_psr)
2171 		return;
2172 
2173 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2174 					     new_crtc_state->uapi.encoder_mask) {
2175 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2176 		int ret;
2177 
2178 		lockdep_assert_held(&intel_dp->psr.lock);
2179 
2180 		if (!intel_dp->psr.enabled)
2181 			continue;
2182 
2183 		if (intel_dp->psr.psr2_enabled)
2184 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2185 		else
2186 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2187 
2188 		if (ret)
2189 			drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2190 	}
2191 }
2192 
2193 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2194 {
2195 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2196 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2197 	i915_reg_t reg;
2198 	u32 mask;
2199 	int err;
2200 
2201 	if (!intel_dp->psr.enabled)
2202 		return false;
2203 
2204 	if (intel_dp->psr.psr2_enabled) {
2205 		reg = EDP_PSR2_STATUS(cpu_transcoder);
2206 		mask = EDP_PSR2_STATUS_STATE_MASK;
2207 	} else {
2208 		reg = EDP_PSR_STATUS(cpu_transcoder);
2209 		mask = EDP_PSR_STATUS_STATE_MASK;
2210 	}
2211 
2212 	mutex_unlock(&intel_dp->psr.lock);
2213 
2214 	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2215 	if (err)
2216 		drm_err(&dev_priv->drm,
2217 			"Timed out waiting for PSR Idle for re-enable\n");
2218 
2219 	/* After the unlocked wait, verify that PSR is still wanted! */
2220 	mutex_lock(&intel_dp->psr.lock);
2221 	return err == 0 && intel_dp->psr.enabled;
2222 }
2223 
2224 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2225 {
2226 	struct drm_connector_list_iter conn_iter;
2227 	struct drm_modeset_acquire_ctx ctx;
2228 	struct drm_atomic_state *state;
2229 	struct drm_connector *conn;
2230 	int err = 0;
2231 
2232 	state = drm_atomic_state_alloc(&dev_priv->drm);
2233 	if (!state)
2234 		return -ENOMEM;
2235 
2236 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2237 
2238 	state->acquire_ctx = &ctx;
2239 	to_intel_atomic_state(state)->internal = true;
2240 
2241 retry:
2242 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2243 	drm_for_each_connector_iter(conn, &conn_iter) {
2244 		struct drm_connector_state *conn_state;
2245 		struct drm_crtc_state *crtc_state;
2246 
2247 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2248 			continue;
2249 
2250 		conn_state = drm_atomic_get_connector_state(state, conn);
2251 		if (IS_ERR(conn_state)) {
2252 			err = PTR_ERR(conn_state);
2253 			break;
2254 		}
2255 
2256 		if (!conn_state->crtc)
2257 			continue;
2258 
2259 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2260 		if (IS_ERR(crtc_state)) {
2261 			err = PTR_ERR(crtc_state);
2262 			break;
2263 		}
2264 
2265 		/* Mark mode as changed to trigger a pipe->update() */
2266 		crtc_state->mode_changed = true;
2267 	}
2268 	drm_connector_list_iter_end(&conn_iter);
2269 
2270 	if (err == 0)
2271 		err = drm_atomic_commit(state);
2272 
2273 	if (err == -EDEADLK) {
2274 		drm_atomic_state_clear(state);
2275 		err = drm_modeset_backoff(&ctx);
2276 		if (!err)
2277 			goto retry;
2278 	}
2279 
2280 	drm_modeset_drop_locks(&ctx);
2281 	drm_modeset_acquire_fini(&ctx);
2282 	drm_atomic_state_put(state);
2283 
2284 	return err;
2285 }
2286 
2287 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2288 {
2289 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2290 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2291 	u32 old_mode;
2292 	int ret;
2293 
2294 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2295 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2296 		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2297 		return -EINVAL;
2298 	}
2299 
2300 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2301 	if (ret)
2302 		return ret;
2303 
2304 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2305 	intel_dp->psr.debug = val;
2306 
2307 	/*
2308 	 * Do it right away if it's already enabled, otherwise it will be done
2309 	 * when enabling the source.
2310 	 */
2311 	if (intel_dp->psr.enabled)
2312 		psr_irq_control(intel_dp);
2313 
2314 	mutex_unlock(&intel_dp->psr.lock);
2315 
2316 	if (old_mode != mode)
2317 		ret = intel_psr_fastset_force(dev_priv);
2318 
2319 	return ret;
2320 }
2321 
2322 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2323 {
2324 	struct intel_psr *psr = &intel_dp->psr;
2325 
2326 	intel_psr_disable_locked(intel_dp);
2327 	psr->sink_not_reliable = true;
2328 	/* let's make sure that sink is awaken */
2329 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2330 }
2331 
2332 static void intel_psr_work(struct work_struct *work)
2333 {
2334 	struct intel_dp *intel_dp =
2335 		container_of(work, typeof(*intel_dp), psr.work);
2336 
2337 	mutex_lock(&intel_dp->psr.lock);
2338 
2339 	if (!intel_dp->psr.enabled)
2340 		goto unlock;
2341 
2342 	if (READ_ONCE(intel_dp->psr.irq_aux_error))
2343 		intel_psr_handle_irq(intel_dp);
2344 
2345 	/*
2346 	 * We have to make sure PSR is ready for re-enable
2347 	 * otherwise it keeps disabled until next full enable/disable cycle.
2348 	 * PSR might take some time to get fully disabled
2349 	 * and be ready for re-enable.
2350 	 */
2351 	if (!__psr_wait_for_idle_locked(intel_dp))
2352 		goto unlock;
2353 
2354 	/*
2355 	 * The delayed work can race with an invalidate hence we need to
2356 	 * recheck. Since psr_flush first clears this and then reschedules we
2357 	 * won't ever miss a flush when bailing out here.
2358 	 */
2359 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2360 		goto unlock;
2361 
2362 	intel_psr_activate(intel_dp);
2363 unlock:
2364 	mutex_unlock(&intel_dp->psr.lock);
2365 }
2366 
2367 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2368 {
2369 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2370 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2371 
2372 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2373 		u32 val;
2374 
2375 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2376 			/* Send one update otherwise lag is observed in screen */
2377 			intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2378 			return;
2379 		}
2380 
2381 		val = man_trk_ctl_enable_bit_get(dev_priv) |
2382 		      man_trk_ctl_partial_frame_bit_get(dev_priv) |
2383 		      man_trk_ctl_continuos_full_frame(dev_priv);
2384 		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2385 		intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2386 		intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2387 	} else {
2388 		intel_psr_exit(intel_dp);
2389 	}
2390 }
2391 
2392 /**
2393  * intel_psr_invalidate - Invalidate PSR
2394  * @dev_priv: i915 device
2395  * @frontbuffer_bits: frontbuffer plane tracking bits
2396  * @origin: which operation caused the invalidate
2397  *
2398  * Since the hardware frontbuffer tracking has gaps we need to integrate
2399  * with the software frontbuffer tracking. This function gets called every
2400  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2401  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2402  *
2403  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2404  */
2405 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2406 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
2407 {
2408 	struct intel_encoder *encoder;
2409 
2410 	if (origin == ORIGIN_FLIP)
2411 		return;
2412 
2413 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2414 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2415 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2416 
2417 		mutex_lock(&intel_dp->psr.lock);
2418 		if (!intel_dp->psr.enabled) {
2419 			mutex_unlock(&intel_dp->psr.lock);
2420 			continue;
2421 		}
2422 
2423 		pipe_frontbuffer_bits &=
2424 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2425 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2426 
2427 		if (pipe_frontbuffer_bits)
2428 			_psr_invalidate_handle(intel_dp);
2429 
2430 		mutex_unlock(&intel_dp->psr.lock);
2431 	}
2432 }
2433 /*
2434  * When we will be completely rely on PSR2 S/W tracking in future,
2435  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2436  * event also therefore tgl_dc3co_flush_locked() require to be changed
2437  * accordingly in future.
2438  */
2439 static void
2440 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2441 		       enum fb_op_origin origin)
2442 {
2443 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2444 
2445 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2446 	    !intel_dp->psr.active)
2447 		return;
2448 
2449 	/*
2450 	 * At every frontbuffer flush flip event modified delay of delayed work,
2451 	 * when delayed work schedules that means display has been idle.
2452 	 */
2453 	if (!(frontbuffer_bits &
2454 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2455 		return;
2456 
2457 	tgl_psr2_enable_dc3co(intel_dp);
2458 	mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2459 			 intel_dp->psr.dc3co_exit_delay);
2460 }
2461 
2462 static void _psr_flush_handle(struct intel_dp *intel_dp)
2463 {
2464 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2465 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2466 
2467 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2468 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2469 			/* can we turn CFF off? */
2470 			if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2471 				u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2472 					man_trk_ctl_partial_frame_bit_get(dev_priv) |
2473 					man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2474 					man_trk_ctl_continuos_full_frame(dev_priv);
2475 
2476 				/*
2477 				 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2478 				 * updates. Still keep cff bit enabled as we don't have proper
2479 				 * SU configuration in case update is sent for any reason after
2480 				 * sff bit gets cleared by the HW on next vblank.
2481 				 */
2482 				intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2483 					       val);
2484 				intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2485 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2486 			}
2487 		} else {
2488 			/*
2489 			 * continuous full frame is disabled, only a single full
2490 			 * frame is required
2491 			 */
2492 			psr_force_hw_tracking_exit(intel_dp);
2493 		}
2494 	} else {
2495 		psr_force_hw_tracking_exit(intel_dp);
2496 
2497 		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2498 			queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2499 	}
2500 }
2501 
2502 /**
2503  * intel_psr_flush - Flush PSR
2504  * @dev_priv: i915 device
2505  * @frontbuffer_bits: frontbuffer plane tracking bits
2506  * @origin: which operation caused the flush
2507  *
2508  * Since the hardware frontbuffer tracking has gaps we need to integrate
2509  * with the software frontbuffer tracking. This function gets called every
2510  * time frontbuffer rendering has completed and flushed out to memory. PSR
2511  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2512  *
2513  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2514  */
2515 void intel_psr_flush(struct drm_i915_private *dev_priv,
2516 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
2517 {
2518 	struct intel_encoder *encoder;
2519 
2520 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2521 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2522 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2523 
2524 		mutex_lock(&intel_dp->psr.lock);
2525 		if (!intel_dp->psr.enabled) {
2526 			mutex_unlock(&intel_dp->psr.lock);
2527 			continue;
2528 		}
2529 
2530 		pipe_frontbuffer_bits &=
2531 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2532 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2533 
2534 		/*
2535 		 * If the PSR is paused by an explicit intel_psr_paused() call,
2536 		 * we have to ensure that the PSR is not activated until
2537 		 * intel_psr_resume() is called.
2538 		 */
2539 		if (intel_dp->psr.paused)
2540 			goto unlock;
2541 
2542 		if (origin == ORIGIN_FLIP ||
2543 		    (origin == ORIGIN_CURSOR_UPDATE &&
2544 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
2545 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2546 			goto unlock;
2547 		}
2548 
2549 		if (pipe_frontbuffer_bits == 0)
2550 			goto unlock;
2551 
2552 		/* By definition flush = invalidate + flush */
2553 		_psr_flush_handle(intel_dp);
2554 unlock:
2555 		mutex_unlock(&intel_dp->psr.lock);
2556 	}
2557 }
2558 
2559 /**
2560  * intel_psr_init - Init basic PSR work and mutex.
2561  * @intel_dp: Intel DP
2562  *
2563  * This function is called after the initializing connector.
2564  * (the initializing of connector treats the handling of connector capabilities)
2565  * And it initializes basic PSR stuff for each DP Encoder.
2566  */
2567 void intel_psr_init(struct intel_dp *intel_dp)
2568 {
2569 	struct intel_connector *connector = intel_dp->attached_connector;
2570 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2571 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2572 
2573 	if (!HAS_PSR(dev_priv))
2574 		return;
2575 
2576 	/*
2577 	 * HSW spec explicitly says PSR is tied to port A.
2578 	 * BDW+ platforms have a instance of PSR registers per transcoder but
2579 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2580 	 * than eDP one.
2581 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2582 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2583 	 * But GEN12 supports a instance of PSR registers per transcoder.
2584 	 */
2585 	if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2586 		drm_dbg_kms(&dev_priv->drm,
2587 			    "PSR condition failed: Port not supported\n");
2588 		return;
2589 	}
2590 
2591 	intel_dp->psr.source_support = true;
2592 
2593 	/* Set link_standby x link_off defaults */
2594 	if (DISPLAY_VER(dev_priv) < 12)
2595 		/* For new platforms up to TGL let's respect VBT back again */
2596 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2597 
2598 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2599 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2600 	mutex_init(&intel_dp->psr.lock);
2601 }
2602 
2603 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2604 					   u8 *status, u8 *error_status)
2605 {
2606 	struct drm_dp_aux *aux = &intel_dp->aux;
2607 	int ret;
2608 
2609 	ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2610 	if (ret != 1)
2611 		return ret;
2612 
2613 	ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
2614 	if (ret != 1)
2615 		return ret;
2616 
2617 	*status = *status & DP_PSR_SINK_STATE_MASK;
2618 
2619 	return 0;
2620 }
2621 
2622 static void psr_alpm_check(struct intel_dp *intel_dp)
2623 {
2624 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2625 	struct drm_dp_aux *aux = &intel_dp->aux;
2626 	struct intel_psr *psr = &intel_dp->psr;
2627 	u8 val;
2628 	int r;
2629 
2630 	if (!psr->psr2_enabled)
2631 		return;
2632 
2633 	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2634 	if (r != 1) {
2635 		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2636 		return;
2637 	}
2638 
2639 	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2640 		intel_psr_disable_locked(intel_dp);
2641 		psr->sink_not_reliable = true;
2642 		drm_dbg_kms(&dev_priv->drm,
2643 			    "ALPM lock timeout error, disabling PSR\n");
2644 
2645 		/* Clearing error */
2646 		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2647 	}
2648 }
2649 
2650 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2651 {
2652 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2653 	struct intel_psr *psr = &intel_dp->psr;
2654 	u8 val;
2655 	int r;
2656 
2657 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2658 	if (r != 1) {
2659 		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2660 		return;
2661 	}
2662 
2663 	if (val & DP_PSR_CAPS_CHANGE) {
2664 		intel_psr_disable_locked(intel_dp);
2665 		psr->sink_not_reliable = true;
2666 		drm_dbg_kms(&dev_priv->drm,
2667 			    "Sink PSR capability changed, disabling PSR\n");
2668 
2669 		/* Clearing it */
2670 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2671 	}
2672 }
2673 
2674 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2675 {
2676 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2677 	struct intel_psr *psr = &intel_dp->psr;
2678 	u8 status, error_status;
2679 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2680 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2681 			  DP_PSR_LINK_CRC_ERROR;
2682 
2683 	if (!CAN_PSR(intel_dp))
2684 		return;
2685 
2686 	mutex_lock(&psr->lock);
2687 
2688 	if (!psr->enabled)
2689 		goto exit;
2690 
2691 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2692 		drm_err(&dev_priv->drm,
2693 			"Error reading PSR status or error status\n");
2694 		goto exit;
2695 	}
2696 
2697 	if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2698 		intel_psr_disable_locked(intel_dp);
2699 		psr->sink_not_reliable = true;
2700 	}
2701 
2702 	if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2703 		drm_dbg_kms(&dev_priv->drm,
2704 			    "PSR sink internal error, disabling PSR\n");
2705 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2706 		drm_dbg_kms(&dev_priv->drm,
2707 			    "PSR RFB storage error, disabling PSR\n");
2708 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2709 		drm_dbg_kms(&dev_priv->drm,
2710 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
2711 	if (error_status & DP_PSR_LINK_CRC_ERROR)
2712 		drm_dbg_kms(&dev_priv->drm,
2713 			    "PSR Link CRC error, disabling PSR\n");
2714 
2715 	if (error_status & ~errors)
2716 		drm_err(&dev_priv->drm,
2717 			"PSR_ERROR_STATUS unhandled errors %x\n",
2718 			error_status & ~errors);
2719 	/* clear status register */
2720 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2721 
2722 	psr_alpm_check(intel_dp);
2723 	psr_capability_changed_check(intel_dp);
2724 
2725 exit:
2726 	mutex_unlock(&psr->lock);
2727 }
2728 
2729 bool intel_psr_enabled(struct intel_dp *intel_dp)
2730 {
2731 	bool ret;
2732 
2733 	if (!CAN_PSR(intel_dp))
2734 		return false;
2735 
2736 	mutex_lock(&intel_dp->psr.lock);
2737 	ret = intel_dp->psr.enabled;
2738 	mutex_unlock(&intel_dp->psr.lock);
2739 
2740 	return ret;
2741 }
2742 
2743 /**
2744  * intel_psr_lock - grab PSR lock
2745  * @crtc_state: the crtc state
2746  *
2747  * This is initially meant to be used by around CRTC update, when
2748  * vblank sensitive registers are updated and we need grab the lock
2749  * before it to avoid vblank evasion.
2750  */
2751 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2752 {
2753 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2754 	struct intel_encoder *encoder;
2755 
2756 	if (!crtc_state->has_psr)
2757 		return;
2758 
2759 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2760 					     crtc_state->uapi.encoder_mask) {
2761 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2762 
2763 		mutex_lock(&intel_dp->psr.lock);
2764 		break;
2765 	}
2766 }
2767 
2768 /**
2769  * intel_psr_unlock - release PSR lock
2770  * @crtc_state: the crtc state
2771  *
2772  * Release the PSR lock that was held during pipe update.
2773  */
2774 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2775 {
2776 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2777 	struct intel_encoder *encoder;
2778 
2779 	if (!crtc_state->has_psr)
2780 		return;
2781 
2782 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2783 					     crtc_state->uapi.encoder_mask) {
2784 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2785 
2786 		mutex_unlock(&intel_dp->psr.lock);
2787 		break;
2788 	}
2789 }
2790 
2791 static void
2792 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
2793 {
2794 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2795 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2796 	const char *status = "unknown";
2797 	u32 val, status_val;
2798 
2799 	if (intel_dp->psr.psr2_enabled) {
2800 		static const char * const live_status[] = {
2801 			"IDLE",
2802 			"CAPTURE",
2803 			"CAPTURE_FS",
2804 			"SLEEP",
2805 			"BUFON_FW",
2806 			"ML_UP",
2807 			"SU_STANDBY",
2808 			"FAST_SLEEP",
2809 			"DEEP_SLEEP",
2810 			"BUF_ON",
2811 			"TG_ON"
2812 		};
2813 		val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
2814 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
2815 		if (status_val < ARRAY_SIZE(live_status))
2816 			status = live_status[status_val];
2817 	} else {
2818 		static const char * const live_status[] = {
2819 			"IDLE",
2820 			"SRDONACK",
2821 			"SRDENT",
2822 			"BUFOFF",
2823 			"BUFON",
2824 			"AUXACK",
2825 			"SRDOFFACK",
2826 			"SRDENT_ON",
2827 		};
2828 		val = intel_de_read(dev_priv, EDP_PSR_STATUS(cpu_transcoder));
2829 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
2830 		if (status_val < ARRAY_SIZE(live_status))
2831 			status = live_status[status_val];
2832 	}
2833 
2834 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2835 }
2836 
2837 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
2838 {
2839 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2840 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2841 	struct intel_psr *psr = &intel_dp->psr;
2842 	intel_wakeref_t wakeref;
2843 	const char *status;
2844 	bool enabled;
2845 	u32 val;
2846 
2847 	seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
2848 	if (psr->sink_support)
2849 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
2850 	seq_puts(m, "\n");
2851 
2852 	if (!psr->sink_support)
2853 		return 0;
2854 
2855 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2856 	mutex_lock(&psr->lock);
2857 
2858 	if (psr->enabled)
2859 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2860 	else
2861 		status = "disabled";
2862 	seq_printf(m, "PSR mode: %s\n", status);
2863 
2864 	if (!psr->enabled) {
2865 		seq_printf(m, "PSR sink not reliable: %s\n",
2866 			   str_yes_no(psr->sink_not_reliable));
2867 
2868 		goto unlock;
2869 	}
2870 
2871 	if (psr->psr2_enabled) {
2872 		val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
2873 		enabled = val & EDP_PSR2_ENABLE;
2874 	} else {
2875 		val = intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder));
2876 		enabled = val & EDP_PSR_ENABLE;
2877 	}
2878 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2879 		   str_enabled_disabled(enabled), val);
2880 	psr_source_status(intel_dp, m);
2881 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2882 		   psr->busy_frontbuffer_bits);
2883 
2884 	/*
2885 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2886 	 */
2887 	val = intel_de_read(dev_priv, EDP_PSR_PERF_CNT(cpu_transcoder));
2888 	seq_printf(m, "Performance counter: %u\n",
2889 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
2890 
2891 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
2892 		seq_printf(m, "Last attempted entry at: %lld\n",
2893 			   psr->last_entry_attempt);
2894 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2895 	}
2896 
2897 	if (psr->psr2_enabled) {
2898 		u32 su_frames_val[3];
2899 		int frame;
2900 
2901 		/*
2902 		 * Reading all 3 registers before hand to minimize crossing a
2903 		 * frame boundary between register reads
2904 		 */
2905 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
2906 			val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
2907 			su_frames_val[frame / 3] = val;
2908 		}
2909 
2910 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2911 
2912 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2913 			u32 su_blocks;
2914 
2915 			su_blocks = su_frames_val[frame / 3] &
2916 				    PSR2_SU_STATUS_MASK(frame);
2917 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2918 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
2919 		}
2920 
2921 		seq_printf(m, "PSR2 selective fetch: %s\n",
2922 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
2923 	}
2924 
2925 unlock:
2926 	mutex_unlock(&psr->lock);
2927 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2928 
2929 	return 0;
2930 }
2931 
2932 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
2933 {
2934 	struct drm_i915_private *dev_priv = m->private;
2935 	struct intel_dp *intel_dp = NULL;
2936 	struct intel_encoder *encoder;
2937 
2938 	if (!HAS_PSR(dev_priv))
2939 		return -ENODEV;
2940 
2941 	/* Find the first EDP which supports PSR */
2942 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2943 		intel_dp = enc_to_intel_dp(encoder);
2944 		break;
2945 	}
2946 
2947 	if (!intel_dp)
2948 		return -ENODEV;
2949 
2950 	return intel_psr_status(m, intel_dp);
2951 }
2952 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
2953 
2954 static int
2955 i915_edp_psr_debug_set(void *data, u64 val)
2956 {
2957 	struct drm_i915_private *dev_priv = data;
2958 	struct intel_encoder *encoder;
2959 	intel_wakeref_t wakeref;
2960 	int ret = -ENODEV;
2961 
2962 	if (!HAS_PSR(dev_priv))
2963 		return ret;
2964 
2965 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2966 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2967 
2968 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
2969 
2970 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2971 
2972 		// TODO: split to each transcoder's PSR debug state
2973 		ret = intel_psr_debug_set(intel_dp, val);
2974 
2975 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2976 	}
2977 
2978 	return ret;
2979 }
2980 
2981 static int
2982 i915_edp_psr_debug_get(void *data, u64 *val)
2983 {
2984 	struct drm_i915_private *dev_priv = data;
2985 	struct intel_encoder *encoder;
2986 
2987 	if (!HAS_PSR(dev_priv))
2988 		return -ENODEV;
2989 
2990 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2991 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2992 
2993 		// TODO: split to each transcoder's PSR debug state
2994 		*val = READ_ONCE(intel_dp->psr.debug);
2995 		return 0;
2996 	}
2997 
2998 	return -ENODEV;
2999 }
3000 
3001 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3002 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3003 			"%llu\n");
3004 
3005 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3006 {
3007 	struct drm_minor *minor = i915->drm.primary;
3008 
3009 	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3010 			    i915, &i915_edp_psr_debug_fops);
3011 
3012 	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3013 			    i915, &i915_edp_psr_status_fops);
3014 }
3015 
3016 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3017 {
3018 	struct intel_connector *connector = m->private;
3019 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3020 	static const char * const sink_status[] = {
3021 		"inactive",
3022 		"transition to active, capture and display",
3023 		"active, display from RFB",
3024 		"active, capture and display on sink device timings",
3025 		"transition to inactive, capture and display, timing re-sync",
3026 		"reserved",
3027 		"reserved",
3028 		"sink internal error",
3029 	};
3030 	const char *str;
3031 	int ret;
3032 	u8 val;
3033 
3034 	if (!CAN_PSR(intel_dp)) {
3035 		seq_puts(m, "PSR Unsupported\n");
3036 		return -ENODEV;
3037 	}
3038 
3039 	if (connector->base.status != connector_status_connected)
3040 		return -ENODEV;
3041 
3042 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
3043 	if (ret != 1)
3044 		return ret < 0 ? ret : -EIO;
3045 
3046 	val &= DP_PSR_SINK_STATE_MASK;
3047 	if (val < ARRAY_SIZE(sink_status))
3048 		str = sink_status[val];
3049 	else
3050 		str = "unknown";
3051 
3052 	seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
3053 
3054 	return 0;
3055 }
3056 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3057 
3058 static int i915_psr_status_show(struct seq_file *m, void *data)
3059 {
3060 	struct intel_connector *connector = m->private;
3061 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3062 
3063 	return intel_psr_status(m, intel_dp);
3064 }
3065 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3066 
3067 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3068 {
3069 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3070 	struct dentry *root = connector->base.debugfs_entry;
3071 
3072 	if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
3073 		return;
3074 
3075 	debugfs_create_file("i915_psr_sink_status", 0444, root,
3076 			    connector, &i915_psr_sink_status_fops);
3077 
3078 	if (HAS_PSR(i915))
3079 		debugfs_create_file("i915_psr_status", 0444, root,
3080 				    connector, &i915_psr_status_fops);
3081 }
3082