1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 
27 #include "i915_drv.h"
28 #include "i915_reg.h"
29 #include "intel_atomic.h"
30 #include "intel_crtc.h"
31 #include "intel_de.h"
32 #include "intel_display_types.h"
33 #include "intel_dp.h"
34 #include "intel_dp_aux.h"
35 #include "intel_hdmi.h"
36 #include "intel_psr.h"
37 #include "intel_snps_phy.h"
38 #include "skl_universal_plane.h"
39 
40 /**
41  * DOC: Panel Self Refresh (PSR/SRD)
42  *
43  * Since Haswell Display controller supports Panel Self-Refresh on display
44  * panels witch have a remote frame buffer (RFB) implemented according to PSR
45  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
46  * when system is idle but display is on as it eliminates display refresh
47  * request to DDR memory completely as long as the frame buffer for that
48  * display is unchanged.
49  *
50  * Panel Self Refresh must be supported by both Hardware (source) and
51  * Panel (sink).
52  *
53  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
54  * to power down the link and memory controller. For DSI panels the same idea
55  * is called "manual mode".
56  *
57  * The implementation uses the hardware-based PSR support which automatically
58  * enters/exits self-refresh mode. The hardware takes care of sending the
59  * required DP aux message and could even retrain the link (that part isn't
60  * enabled yet though). The hardware also keeps track of any frontbuffer
61  * changes to know when to exit self-refresh mode again. Unfortunately that
62  * part doesn't work too well, hence why the i915 PSR support uses the
63  * software frontbuffer tracking to make sure it doesn't miss a screen
64  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
65  * get called by the frontbuffer tracking code. Note that because of locking
66  * issues the self-refresh re-enable code is done from a work queue, which
67  * must be correctly synchronized/cancelled when shutting down the pipe."
68  *
69  * DC3CO (DC3 clock off)
70  *
71  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
72  * clock off automatically during PSR2 idle state.
73  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
74  * entry/exit allows the HW to enter a low-power state even when page flipping
75  * periodically (for instance a 30fps video playback scenario).
76  *
77  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
78  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
79  * frames, if no other flip occurs and the function above is executed, DC3CO is
80  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
81  * of another flip.
82  * Front buffer modifications do not trigger DC3CO activation on purpose as it
83  * would bring a lot of complexity and most of the moderns systems will only
84  * use page flips.
85  */
86 
87 static bool psr_global_enabled(struct intel_dp *intel_dp)
88 {
89 	struct intel_connector *connector = intel_dp->attached_connector;
90 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
91 
92 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
93 	case I915_PSR_DEBUG_DEFAULT:
94 		if (i915->params.enable_psr == -1)
95 			return connector->panel.vbt.psr.enable;
96 		return i915->params.enable_psr;
97 	case I915_PSR_DEBUG_DISABLE:
98 		return false;
99 	default:
100 		return true;
101 	}
102 }
103 
104 static bool psr2_global_enabled(struct intel_dp *intel_dp)
105 {
106 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
107 
108 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
109 	case I915_PSR_DEBUG_DISABLE:
110 	case I915_PSR_DEBUG_FORCE_PSR1:
111 		return false;
112 	default:
113 		if (i915->params.enable_psr == 1)
114 			return false;
115 		return true;
116 	}
117 }
118 
119 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
120 {
121 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
122 
123 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
124 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
125 }
126 
127 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
128 {
129 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
130 
131 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
132 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
133 }
134 
135 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
136 {
137 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
138 
139 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
140 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
141 }
142 
143 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
144 {
145 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
146 
147 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
148 		EDP_PSR_MASK(intel_dp->psr.transcoder);
149 }
150 
151 static void psr_irq_control(struct intel_dp *intel_dp)
152 {
153 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
154 	i915_reg_t imr_reg;
155 	u32 mask;
156 
157 	if (DISPLAY_VER(dev_priv) >= 12)
158 		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
159 	else
160 		imr_reg = EDP_PSR_IMR;
161 
162 	mask = psr_irq_psr_error_bit_get(intel_dp);
163 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
164 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
165 			psr_irq_pre_entry_bit_get(intel_dp);
166 
167 	intel_de_rmw(dev_priv, imr_reg, psr_irq_mask_get(intel_dp), ~mask);
168 }
169 
170 static void psr_event_print(struct drm_i915_private *i915,
171 			    u32 val, bool psr2_enabled)
172 {
173 	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
174 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
175 		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
176 	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
177 		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
178 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
179 		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
180 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
181 		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
182 	if (val & PSR_EVENT_GRAPHICS_RESET)
183 		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
184 	if (val & PSR_EVENT_PCH_INTERRUPT)
185 		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
186 	if (val & PSR_EVENT_MEMORY_UP)
187 		drm_dbg_kms(&i915->drm, "\tMemory up\n");
188 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
189 		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
190 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
191 		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
192 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
193 		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
194 	if (val & PSR_EVENT_REGISTER_UPDATE)
195 		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
196 	if (val & PSR_EVENT_HDCP_ENABLE)
197 		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
198 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
199 		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
200 	if (val & PSR_EVENT_VBI_ENABLE)
201 		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
202 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
203 		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
204 	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
205 		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
206 }
207 
208 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
209 {
210 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
211 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
212 	ktime_t time_ns =  ktime_get();
213 	i915_reg_t imr_reg;
214 
215 	if (DISPLAY_VER(dev_priv) >= 12)
216 		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
217 	else
218 		imr_reg = EDP_PSR_IMR;
219 
220 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
221 		intel_dp->psr.last_entry_attempt = time_ns;
222 		drm_dbg_kms(&dev_priv->drm,
223 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
224 			    transcoder_name(cpu_transcoder));
225 	}
226 
227 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
228 		intel_dp->psr.last_exit = time_ns;
229 		drm_dbg_kms(&dev_priv->drm,
230 			    "[transcoder %s] PSR exit completed\n",
231 			    transcoder_name(cpu_transcoder));
232 
233 		if (DISPLAY_VER(dev_priv) >= 9) {
234 			u32 val = intel_de_read(dev_priv,
235 						PSR_EVENT(cpu_transcoder));
236 			bool psr2_enabled = intel_dp->psr.psr2_enabled;
237 
238 			intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
239 				       val);
240 			psr_event_print(dev_priv, val, psr2_enabled);
241 		}
242 	}
243 
244 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
245 		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
246 			 transcoder_name(cpu_transcoder));
247 
248 		intel_dp->psr.irq_aux_error = true;
249 
250 		/*
251 		 * If this interruption is not masked it will keep
252 		 * interrupting so fast that it prevents the scheduled
253 		 * work to run.
254 		 * Also after a PSR error, we don't want to arm PSR
255 		 * again so we don't care about unmask the interruption
256 		 * or unset irq_aux_error.
257 		 */
258 		intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp));
259 
260 		schedule_work(&intel_dp->psr.work);
261 	}
262 }
263 
264 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
265 {
266 	u8 alpm_caps = 0;
267 
268 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
269 			      &alpm_caps) != 1)
270 		return false;
271 	return alpm_caps & DP_ALPM_CAP;
272 }
273 
274 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
275 {
276 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
277 	u8 val = 8; /* assume the worst if we can't read the value */
278 
279 	if (drm_dp_dpcd_readb(&intel_dp->aux,
280 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
281 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
282 	else
283 		drm_dbg_kms(&i915->drm,
284 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
285 	return val;
286 }
287 
288 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
289 {
290 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
291 	ssize_t r;
292 	u16 w;
293 	u8 y;
294 
295 	/* If sink don't have specific granularity requirements set legacy ones */
296 	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
297 		/* As PSR2 HW sends full lines, we do not care about x granularity */
298 		w = 4;
299 		y = 4;
300 		goto exit;
301 	}
302 
303 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
304 	if (r != 2)
305 		drm_dbg_kms(&i915->drm,
306 			    "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
307 	/*
308 	 * Spec says that if the value read is 0 the default granularity should
309 	 * be used instead.
310 	 */
311 	if (r != 2 || w == 0)
312 		w = 4;
313 
314 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
315 	if (r != 1) {
316 		drm_dbg_kms(&i915->drm,
317 			    "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
318 		y = 4;
319 	}
320 	if (y == 0)
321 		y = 1;
322 
323 exit:
324 	intel_dp->psr.su_w_granularity = w;
325 	intel_dp->psr.su_y_granularity = y;
326 }
327 
328 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
329 {
330 	struct drm_i915_private *dev_priv =
331 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
332 
333 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
334 			 sizeof(intel_dp->psr_dpcd));
335 
336 	if (!intel_dp->psr_dpcd[0])
337 		return;
338 	drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
339 		    intel_dp->psr_dpcd[0]);
340 
341 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
342 		drm_dbg_kms(&dev_priv->drm,
343 			    "PSR support not currently available for this panel\n");
344 		return;
345 	}
346 
347 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
348 		drm_dbg_kms(&dev_priv->drm,
349 			    "Panel lacks power state control, PSR cannot be enabled\n");
350 		return;
351 	}
352 
353 	intel_dp->psr.sink_support = true;
354 	intel_dp->psr.sink_sync_latency =
355 		intel_dp_get_sink_sync_latency(intel_dp);
356 
357 	if (DISPLAY_VER(dev_priv) >= 9 &&
358 	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
359 		bool y_req = intel_dp->psr_dpcd[1] &
360 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
361 		bool alpm = intel_dp_get_alpm_status(intel_dp);
362 
363 		/*
364 		 * All panels that supports PSR version 03h (PSR2 +
365 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
366 		 * only sure that it is going to be used when required by the
367 		 * panel. This way panel is capable to do selective update
368 		 * without a aux frame sync.
369 		 *
370 		 * To support PSR version 02h and PSR version 03h without
371 		 * Y-coordinate requirement panels we would need to enable
372 		 * GTC first.
373 		 */
374 		intel_dp->psr.sink_psr2_support = y_req && alpm;
375 		drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
376 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
377 
378 		if (intel_dp->psr.sink_psr2_support) {
379 			intel_dp->psr.colorimetry_support =
380 				intel_dp_get_colorimetry_status(intel_dp);
381 			intel_dp_get_su_granularity(intel_dp);
382 		}
383 	}
384 }
385 
386 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
387 {
388 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
389 	u8 dpcd_val = DP_PSR_ENABLE;
390 
391 	/* Enable ALPM at sink for psr2 */
392 	if (intel_dp->psr.psr2_enabled) {
393 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
394 				   DP_ALPM_ENABLE |
395 				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
396 
397 		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
398 	} else {
399 		if (intel_dp->psr.link_standby)
400 			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
401 
402 		if (DISPLAY_VER(dev_priv) >= 8)
403 			dpcd_val |= DP_PSR_CRC_VERIFICATION;
404 	}
405 
406 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
407 		dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
408 
409 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
410 
411 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
412 }
413 
414 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
415 {
416 	struct intel_connector *connector = intel_dp->attached_connector;
417 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
418 	u32 val = 0;
419 
420 	if (DISPLAY_VER(dev_priv) >= 11)
421 		val |= EDP_PSR_TP4_TIME_0US;
422 
423 	if (dev_priv->params.psr_safest_params) {
424 		val |= EDP_PSR_TP1_TIME_2500us;
425 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
426 		goto check_tp3_sel;
427 	}
428 
429 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
430 		val |= EDP_PSR_TP1_TIME_0us;
431 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
432 		val |= EDP_PSR_TP1_TIME_100us;
433 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
434 		val |= EDP_PSR_TP1_TIME_500us;
435 	else
436 		val |= EDP_PSR_TP1_TIME_2500us;
437 
438 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
439 		val |= EDP_PSR_TP2_TP3_TIME_0us;
440 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
441 		val |= EDP_PSR_TP2_TP3_TIME_100us;
442 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
443 		val |= EDP_PSR_TP2_TP3_TIME_500us;
444 	else
445 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
446 
447 check_tp3_sel:
448 	if (intel_dp_source_supports_tps3(dev_priv) &&
449 	    drm_dp_tps3_supported(intel_dp->dpcd))
450 		val |= EDP_PSR_TP1_TP3_SEL;
451 	else
452 		val |= EDP_PSR_TP1_TP2_SEL;
453 
454 	return val;
455 }
456 
457 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
458 {
459 	struct intel_connector *connector = intel_dp->attached_connector;
460 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
461 	int idle_frames;
462 
463 	/* Let's use 6 as the minimum to cover all known cases including the
464 	 * off-by-one issue that HW has in some cases.
465 	 */
466 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
467 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
468 
469 	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
470 		idle_frames = 0xf;
471 
472 	return idle_frames;
473 }
474 
475 static void hsw_activate_psr1(struct intel_dp *intel_dp)
476 {
477 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
478 	u32 max_sleep_time = 0x1f;
479 	u32 val = EDP_PSR_ENABLE;
480 
481 	val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
482 
483 	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
484 	if (IS_HASWELL(dev_priv))
485 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
486 
487 	if (intel_dp->psr.link_standby)
488 		val |= EDP_PSR_LINK_STANDBY;
489 
490 	val |= intel_psr1_get_tp_time(intel_dp);
491 
492 	if (DISPLAY_VER(dev_priv) >= 8)
493 		val |= EDP_PSR_CRC_ENABLE;
494 
495 	val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
496 		EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
497 	intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
498 }
499 
500 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
501 {
502 	struct intel_connector *connector = intel_dp->attached_connector;
503 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
504 	u32 val = 0;
505 
506 	if (dev_priv->params.psr_safest_params)
507 		return EDP_PSR2_TP2_TIME_2500us;
508 
509 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
510 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
511 		val |= EDP_PSR2_TP2_TIME_50us;
512 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
513 		val |= EDP_PSR2_TP2_TIME_100us;
514 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
515 		val |= EDP_PSR2_TP2_TIME_500us;
516 	else
517 		val |= EDP_PSR2_TP2_TIME_2500us;
518 
519 	return val;
520 }
521 
522 static void hsw_activate_psr2(struct intel_dp *intel_dp)
523 {
524 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
525 	u32 val = EDP_PSR2_ENABLE;
526 
527 	val |= psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
528 
529 	if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
530 		val |= EDP_SU_TRACK_ENABLE;
531 
532 	if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
533 		val |= EDP_Y_COORDINATE_ENABLE;
534 
535 	val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
536 	val |= intel_psr2_get_tp_time(intel_dp);
537 
538 	if (DISPLAY_VER(dev_priv) >= 12) {
539 		if (intel_dp->psr.io_wake_lines < 9 &&
540 		    intel_dp->psr.fast_wake_lines < 9)
541 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
542 		else
543 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
544 	}
545 
546 	/* Wa_22012278275:adl-p */
547 	if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
548 		static const u8 map[] = {
549 			2, /* 5 lines */
550 			1, /* 6 lines */
551 			0, /* 7 lines */
552 			3, /* 8 lines */
553 			6, /* 9 lines */
554 			5, /* 10 lines */
555 			4, /* 11 lines */
556 			7, /* 12 lines */
557 		};
558 		/*
559 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
560 		 * comments bellow for more information
561 		 */
562 		u32 tmp;
563 
564 		tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
565 		tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
566 		val |= tmp;
567 
568 		tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
569 		tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
570 		val |= tmp;
571 	} else if (DISPLAY_VER(dev_priv) >= 12) {
572 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
573 		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
574 	} else if (DISPLAY_VER(dev_priv) >= 9) {
575 		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
576 		val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
577 	}
578 
579 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
580 		val |= EDP_PSR2_SU_SDP_SCANLINE;
581 
582 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
583 		u32 tmp;
584 
585 		tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
586 		drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
587 	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
588 		intel_de_write(dev_priv,
589 			       PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
590 	}
591 
592 	/*
593 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
594 	 * recommending keep this bit unset while PSR2 is enabled.
595 	 */
596 	intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
597 
598 	intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
599 }
600 
601 static bool
602 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
603 {
604 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
605 		return trans == TRANSCODER_A || trans == TRANSCODER_B;
606 	else if (DISPLAY_VER(dev_priv) >= 12)
607 		return trans == TRANSCODER_A;
608 	else
609 		return trans == TRANSCODER_EDP;
610 }
611 
612 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
613 {
614 	if (!cstate || !cstate->hw.active)
615 		return 0;
616 
617 	return DIV_ROUND_UP(1000 * 1000,
618 			    drm_mode_vrefresh(&cstate->hw.adjusted_mode));
619 }
620 
621 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
622 				     u32 idle_frames)
623 {
624 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
625 
626 	idle_frames <<=  EDP_PSR2_IDLE_FRAME_SHIFT;
627 	intel_de_rmw(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder),
628 		     EDP_PSR2_IDLE_FRAME_MASK, idle_frames);
629 }
630 
631 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
632 {
633 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
634 
635 	psr2_program_idle_frames(intel_dp, 0);
636 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
637 }
638 
639 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
640 {
641 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
642 
643 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
644 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
645 }
646 
647 static void tgl_dc3co_disable_work(struct work_struct *work)
648 {
649 	struct intel_dp *intel_dp =
650 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
651 
652 	mutex_lock(&intel_dp->psr.lock);
653 	/* If delayed work is pending, it is not idle */
654 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
655 		goto unlock;
656 
657 	tgl_psr2_disable_dc3co(intel_dp);
658 unlock:
659 	mutex_unlock(&intel_dp->psr.lock);
660 }
661 
662 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
663 {
664 	if (!intel_dp->psr.dc3co_exitline)
665 		return;
666 
667 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
668 	/* Before PSR2 exit disallow dc3co*/
669 	tgl_psr2_disable_dc3co(intel_dp);
670 }
671 
672 static bool
673 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
674 			      struct intel_crtc_state *crtc_state)
675 {
676 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
677 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
678 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
679 	enum port port = dig_port->base.port;
680 
681 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
682 		return pipe <= PIPE_B && port <= PORT_B;
683 	else
684 		return pipe == PIPE_A && port == PORT_A;
685 }
686 
687 static void
688 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
689 				  struct intel_crtc_state *crtc_state)
690 {
691 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
692 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
693 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
694 	u32 exit_scanlines;
695 
696 	/*
697 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
698 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
699 	 * is applied. B.Specs:49196
700 	 */
701 	return;
702 
703 	/*
704 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
705 	 * TODO: when the issue is addressed, this restriction should be removed.
706 	 */
707 	if (crtc_state->enable_psr2_sel_fetch)
708 		return;
709 
710 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
711 		return;
712 
713 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
714 		return;
715 
716 	/* Wa_16011303918:adl-p */
717 	if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
718 		return;
719 
720 	/*
721 	 * DC3CO Exit time 200us B.Spec 49196
722 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
723 	 */
724 	exit_scanlines =
725 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
726 
727 	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
728 		return;
729 
730 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
731 }
732 
733 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
734 					      struct intel_crtc_state *crtc_state)
735 {
736 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
737 
738 	if (!dev_priv->params.enable_psr2_sel_fetch &&
739 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
740 		drm_dbg_kms(&dev_priv->drm,
741 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
742 		return false;
743 	}
744 
745 	if (crtc_state->uapi.async_flip) {
746 		drm_dbg_kms(&dev_priv->drm,
747 			    "PSR2 sel fetch not enabled, async flip enabled\n");
748 		return false;
749 	}
750 
751 	return crtc_state->enable_psr2_sel_fetch = true;
752 }
753 
754 static bool psr2_granularity_check(struct intel_dp *intel_dp,
755 				   struct intel_crtc_state *crtc_state)
756 {
757 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
758 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
759 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
760 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
761 	u16 y_granularity = 0;
762 
763 	/* PSR2 HW only send full lines so we only need to validate the width */
764 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
765 		return false;
766 
767 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
768 		return false;
769 
770 	/* HW tracking is only aligned to 4 lines */
771 	if (!crtc_state->enable_psr2_sel_fetch)
772 		return intel_dp->psr.su_y_granularity == 4;
773 
774 	/*
775 	 * adl_p and mtl platforms have 1 line granularity.
776 	 * For other platforms with SW tracking we can adjust the y coordinates
777 	 * to match sink requirement if multiple of 4.
778 	 */
779 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
780 		y_granularity = intel_dp->psr.su_y_granularity;
781 	else if (intel_dp->psr.su_y_granularity <= 2)
782 		y_granularity = 4;
783 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
784 		y_granularity = intel_dp->psr.su_y_granularity;
785 
786 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
787 		return false;
788 
789 	if (crtc_state->dsc.compression_enable &&
790 	    vdsc_cfg->slice_height % y_granularity)
791 		return false;
792 
793 	crtc_state->su_y_granularity = y_granularity;
794 	return true;
795 }
796 
797 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
798 							struct intel_crtc_state *crtc_state)
799 {
800 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
801 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
802 	u32 hblank_total, hblank_ns, req_ns;
803 
804 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
805 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
806 
807 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
808 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
809 
810 	if ((hblank_ns - req_ns) > 100)
811 		return true;
812 
813 	/* Not supported <13 / Wa_22012279113:adl-p */
814 	if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
815 		return false;
816 
817 	crtc_state->req_psr2_sdp_prior_scanline = true;
818 	return true;
819 }
820 
821 static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
822 				     struct intel_crtc_state *crtc_state)
823 {
824 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
825 	int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
826 	u8 max_wake_lines;
827 
828 	if (DISPLAY_VER(i915) >= 12) {
829 		io_wake_time = 42;
830 		/*
831 		 * According to Bspec it's 42us, but based on testing
832 		 * it is not enough -> use 45 us.
833 		 */
834 		fast_wake_time = 45;
835 		max_wake_lines = 12;
836 	} else {
837 		io_wake_time = 50;
838 		fast_wake_time = 32;
839 		max_wake_lines = 8;
840 	}
841 
842 	io_wake_lines = intel_usecs_to_scanlines(
843 		&crtc_state->uapi.adjusted_mode, io_wake_time);
844 	fast_wake_lines = intel_usecs_to_scanlines(
845 		&crtc_state->uapi.adjusted_mode, fast_wake_time);
846 
847 	if (io_wake_lines > max_wake_lines ||
848 	    fast_wake_lines > max_wake_lines)
849 		return false;
850 
851 	if (i915->params.psr_safest_params)
852 		io_wake_lines = fast_wake_lines = max_wake_lines;
853 
854 	/* According to Bspec lower limit should be set as 7 lines. */
855 	intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
856 	intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
857 
858 	return true;
859 }
860 
861 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
862 				    struct intel_crtc_state *crtc_state)
863 {
864 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
865 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
866 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
867 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
868 
869 	if (!intel_dp->psr.sink_psr2_support)
870 		return false;
871 
872 	/* JSL and EHL only supports eDP 1.3 */
873 	if (IS_JSL_EHL(dev_priv)) {
874 		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
875 		return false;
876 	}
877 
878 	/* Wa_16011181250 */
879 	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
880 	    IS_DG2(dev_priv)) {
881 		drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
882 		return false;
883 	}
884 
885 	if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
886 		drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
887 		return false;
888 	}
889 
890 	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
891 		drm_dbg_kms(&dev_priv->drm,
892 			    "PSR2 not supported in transcoder %s\n",
893 			    transcoder_name(crtc_state->cpu_transcoder));
894 		return false;
895 	}
896 
897 	if (!psr2_global_enabled(intel_dp)) {
898 		drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
899 		return false;
900 	}
901 
902 	/*
903 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
904 	 * resolution requires DSC to be enabled, priority is given to DSC
905 	 * over PSR2.
906 	 */
907 	if (crtc_state->dsc.compression_enable &&
908 	    (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
909 		drm_dbg_kms(&dev_priv->drm,
910 			    "PSR2 cannot be enabled since DSC is enabled\n");
911 		return false;
912 	}
913 
914 	if (crtc_state->crc_enabled) {
915 		drm_dbg_kms(&dev_priv->drm,
916 			    "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
917 		return false;
918 	}
919 
920 	if (DISPLAY_VER(dev_priv) >= 12) {
921 		psr_max_h = 5120;
922 		psr_max_v = 3200;
923 		max_bpp = 30;
924 	} else if (DISPLAY_VER(dev_priv) >= 10) {
925 		psr_max_h = 4096;
926 		psr_max_v = 2304;
927 		max_bpp = 24;
928 	} else if (DISPLAY_VER(dev_priv) == 9) {
929 		psr_max_h = 3640;
930 		psr_max_v = 2304;
931 		max_bpp = 24;
932 	}
933 
934 	if (crtc_state->pipe_bpp > max_bpp) {
935 		drm_dbg_kms(&dev_priv->drm,
936 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
937 			    crtc_state->pipe_bpp, max_bpp);
938 		return false;
939 	}
940 
941 	/* Wa_16011303918:adl-p */
942 	if (crtc_state->vrr.enable &&
943 	    IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
944 		drm_dbg_kms(&dev_priv->drm,
945 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
946 		return false;
947 	}
948 
949 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
950 		drm_dbg_kms(&dev_priv->drm,
951 			    "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
952 		return false;
953 	}
954 
955 	if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
956 		drm_dbg_kms(&dev_priv->drm,
957 			    "PSR2 not enabled, Unable to use long enough wake times\n");
958 		return false;
959 	}
960 
961 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
962 		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
963 		    !HAS_PSR_HW_TRACKING(dev_priv)) {
964 			drm_dbg_kms(&dev_priv->drm,
965 				    "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
966 			return false;
967 		}
968 	}
969 
970 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
971 		drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
972 		goto unsupported;
973 	}
974 
975 	if (!crtc_state->enable_psr2_sel_fetch &&
976 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
977 		drm_dbg_kms(&dev_priv->drm,
978 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
979 			    crtc_hdisplay, crtc_vdisplay,
980 			    psr_max_h, psr_max_v);
981 		goto unsupported;
982 	}
983 
984 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
985 	return true;
986 
987 unsupported:
988 	crtc_state->enable_psr2_sel_fetch = false;
989 	return false;
990 }
991 
992 void intel_psr_compute_config(struct intel_dp *intel_dp,
993 			      struct intel_crtc_state *crtc_state,
994 			      struct drm_connector_state *conn_state)
995 {
996 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
997 	const struct drm_display_mode *adjusted_mode =
998 		&crtc_state->hw.adjusted_mode;
999 	int psr_setup_time;
1000 
1001 	/*
1002 	 * Current PSR panels don't work reliably with VRR enabled
1003 	 * So if VRR is enabled, do not enable PSR.
1004 	 */
1005 	if (crtc_state->vrr.enable)
1006 		return;
1007 
1008 	if (!CAN_PSR(intel_dp))
1009 		return;
1010 
1011 	if (!psr_global_enabled(intel_dp)) {
1012 		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1013 		return;
1014 	}
1015 
1016 	if (intel_dp->psr.sink_not_reliable) {
1017 		drm_dbg_kms(&dev_priv->drm,
1018 			    "PSR sink implementation is not reliable\n");
1019 		return;
1020 	}
1021 
1022 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1023 		drm_dbg_kms(&dev_priv->drm,
1024 			    "PSR condition failed: Interlaced mode enabled\n");
1025 		return;
1026 	}
1027 
1028 	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1029 	if (psr_setup_time < 0) {
1030 		drm_dbg_kms(&dev_priv->drm,
1031 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1032 			    intel_dp->psr_dpcd[1]);
1033 		return;
1034 	}
1035 
1036 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1037 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1038 		drm_dbg_kms(&dev_priv->drm,
1039 			    "PSR condition failed: PSR setup time (%d us) too long\n",
1040 			    psr_setup_time);
1041 		return;
1042 	}
1043 
1044 	crtc_state->has_psr = true;
1045 	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1046 
1047 	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1048 	intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1049 				     &crtc_state->psr_vsc);
1050 }
1051 
1052 void intel_psr_get_config(struct intel_encoder *encoder,
1053 			  struct intel_crtc_state *pipe_config)
1054 {
1055 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1056 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1057 	struct intel_dp *intel_dp;
1058 	u32 val;
1059 
1060 	if (!dig_port)
1061 		return;
1062 
1063 	intel_dp = &dig_port->dp;
1064 	if (!CAN_PSR(intel_dp))
1065 		return;
1066 
1067 	mutex_lock(&intel_dp->psr.lock);
1068 	if (!intel_dp->psr.enabled)
1069 		goto unlock;
1070 
1071 	/*
1072 	 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1073 	 * enabled/disabled because of frontbuffer tracking and others.
1074 	 */
1075 	pipe_config->has_psr = true;
1076 	pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1077 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1078 
1079 	if (!intel_dp->psr.psr2_enabled)
1080 		goto unlock;
1081 
1082 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1083 		val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
1084 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1085 			pipe_config->enable_psr2_sel_fetch = true;
1086 	}
1087 
1088 	if (DISPLAY_VER(dev_priv) >= 12) {
1089 		val = intel_de_read(dev_priv, TRANS_EXITLINE(intel_dp->psr.transcoder));
1090 		val &= EXITLINE_MASK;
1091 		pipe_config->dc3co_exitline = val;
1092 	}
1093 unlock:
1094 	mutex_unlock(&intel_dp->psr.lock);
1095 }
1096 
1097 static void intel_psr_activate(struct intel_dp *intel_dp)
1098 {
1099 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1100 	enum transcoder transcoder = intel_dp->psr.transcoder;
1101 
1102 	if (transcoder_has_psr2(dev_priv, transcoder))
1103 		drm_WARN_ON(&dev_priv->drm,
1104 			    intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
1105 
1106 	drm_WARN_ON(&dev_priv->drm,
1107 		    intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
1108 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1109 	lockdep_assert_held(&intel_dp->psr.lock);
1110 
1111 	/* psr1 and psr2 are mutually exclusive.*/
1112 	if (intel_dp->psr.psr2_enabled)
1113 		hsw_activate_psr2(intel_dp);
1114 	else
1115 		hsw_activate_psr1(intel_dp);
1116 
1117 	intel_dp->psr.active = true;
1118 }
1119 
1120 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1121 {
1122 	switch (intel_dp->psr.pipe) {
1123 	case PIPE_A:
1124 		return LATENCY_REPORTING_REMOVED_PIPE_A;
1125 	case PIPE_B:
1126 		return LATENCY_REPORTING_REMOVED_PIPE_B;
1127 	case PIPE_C:
1128 		return LATENCY_REPORTING_REMOVED_PIPE_C;
1129 	case PIPE_D:
1130 		return LATENCY_REPORTING_REMOVED_PIPE_D;
1131 	default:
1132 		MISSING_CASE(intel_dp->psr.pipe);
1133 		return 0;
1134 	}
1135 }
1136 
1137 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1138 				    const struct intel_crtc_state *crtc_state)
1139 {
1140 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1141 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1142 	u32 mask;
1143 
1144 	/*
1145 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1146 	 * mask LPSP to avoid dependency on other drivers that might block
1147 	 * runtime_pm besides preventing  other hw tracking issues now we
1148 	 * can rely on frontbuffer tracking.
1149 	 */
1150 	mask = EDP_PSR_DEBUG_MASK_MEMUP |
1151 	       EDP_PSR_DEBUG_MASK_HPD |
1152 	       EDP_PSR_DEBUG_MASK_LPSP |
1153 	       EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1154 
1155 	if (DISPLAY_VER(dev_priv) < 11)
1156 		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1157 
1158 	intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
1159 		       mask);
1160 
1161 	psr_irq_control(intel_dp);
1162 
1163 	/*
1164 	 * TODO: if future platforms supports DC3CO in more than one
1165 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1166 	 */
1167 	if (intel_dp->psr.dc3co_exitline)
1168 		intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1169 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1170 
1171 	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1172 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1173 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1174 			     IGNORE_PSR2_HW_TRACKING : 0);
1175 
1176 	/*
1177 	 * Wa_16013835468
1178 	 * Wa_14015648006
1179 	 */
1180 	if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1181 	    IS_DISPLAY_VER(dev_priv, 12, 13)) {
1182 		if (crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1183 		    crtc_state->hw.adjusted_mode.crtc_vdisplay)
1184 			intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0,
1185 				     wa_16013835468_bit_get(intel_dp));
1186 	}
1187 
1188 	if (intel_dp->psr.psr2_enabled) {
1189 		if (DISPLAY_VER(dev_priv) == 9)
1190 			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1191 				     PSR2_VSC_ENABLE_PROG_HEADER |
1192 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1193 
1194 		/*
1195 		 * Wa_16014451276:adlp,mtl[a0,b0]
1196 		 * All supported adlp panels have 1-based X granularity, this may
1197 		 * cause issues if non-supported panels are used.
1198 		 */
1199 		if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1200 			intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
1201 				     ADLP_1_BASED_X_GRANULARITY);
1202 		else if (IS_ALDERLAKE_P(dev_priv))
1203 			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1204 				     ADLP_1_BASED_X_GRANULARITY);
1205 
1206 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1207 		if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1208 			intel_de_rmw(dev_priv,
1209 				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1210 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1211 		else if (IS_ALDERLAKE_P(dev_priv))
1212 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1213 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1214 	}
1215 }
1216 
1217 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1218 {
1219 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1220 	u32 val;
1221 
1222 	/*
1223 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1224 	 * will still keep the error set even after the reset done in the
1225 	 * irq_preinstall and irq_uninstall hooks.
1226 	 * And enabling in this situation cause the screen to freeze in the
1227 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1228 	 * to avoid any rendering problems.
1229 	 */
1230 	if (DISPLAY_VER(dev_priv) >= 12)
1231 		val = intel_de_read(dev_priv,
1232 				    TRANS_PSR_IIR(intel_dp->psr.transcoder));
1233 	else
1234 		val = intel_de_read(dev_priv, EDP_PSR_IIR);
1235 	val &= psr_irq_psr_error_bit_get(intel_dp);
1236 	if (val) {
1237 		intel_dp->psr.sink_not_reliable = true;
1238 		drm_dbg_kms(&dev_priv->drm,
1239 			    "PSR interruption error set, not enabling PSR\n");
1240 		return false;
1241 	}
1242 
1243 	return true;
1244 }
1245 
1246 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1247 				    const struct intel_crtc_state *crtc_state)
1248 {
1249 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1250 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1251 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1252 	struct intel_encoder *encoder = &dig_port->base;
1253 	u32 val;
1254 
1255 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1256 
1257 	intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1258 	intel_dp->psr.busy_frontbuffer_bits = 0;
1259 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1260 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1261 	/* DC5/DC6 requires at least 6 idle frames */
1262 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1263 	intel_dp->psr.dc3co_exit_delay = val;
1264 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1265 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1266 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1267 	intel_dp->psr.req_psr2_sdp_prior_scanline =
1268 		crtc_state->req_psr2_sdp_prior_scanline;
1269 
1270 	if (!psr_interrupt_error_check(intel_dp))
1271 		return;
1272 
1273 	drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1274 		    intel_dp->psr.psr2_enabled ? "2" : "1");
1275 	intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1276 	intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1277 	intel_psr_enable_sink(intel_dp);
1278 	intel_psr_enable_source(intel_dp, crtc_state);
1279 	intel_dp->psr.enabled = true;
1280 	intel_dp->psr.paused = false;
1281 
1282 	intel_psr_activate(intel_dp);
1283 }
1284 
1285 static void intel_psr_exit(struct intel_dp *intel_dp)
1286 {
1287 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1288 	u32 val;
1289 
1290 	if (!intel_dp->psr.active) {
1291 		if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
1292 			val = intel_de_read(dev_priv,
1293 					    EDP_PSR2_CTL(intel_dp->psr.transcoder));
1294 			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1295 		}
1296 
1297 		val = intel_de_read(dev_priv,
1298 				    EDP_PSR_CTL(intel_dp->psr.transcoder));
1299 		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1300 
1301 		return;
1302 	}
1303 
1304 	if (intel_dp->psr.psr2_enabled) {
1305 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1306 		val = intel_de_read(dev_priv,
1307 				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
1308 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1309 		val &= ~EDP_PSR2_ENABLE;
1310 		intel_de_write(dev_priv,
1311 			       EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
1312 	} else {
1313 		val = intel_de_read(dev_priv,
1314 				    EDP_PSR_CTL(intel_dp->psr.transcoder));
1315 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1316 		val &= ~EDP_PSR_ENABLE;
1317 		intel_de_write(dev_priv,
1318 			       EDP_PSR_CTL(intel_dp->psr.transcoder), val);
1319 	}
1320 	intel_dp->psr.active = false;
1321 }
1322 
1323 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1324 {
1325 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1326 	i915_reg_t psr_status;
1327 	u32 psr_status_mask;
1328 
1329 	if (intel_dp->psr.psr2_enabled) {
1330 		psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1331 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1332 	} else {
1333 		psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1334 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1335 	}
1336 
1337 	/* Wait till PSR is idle */
1338 	if (intel_de_wait_for_clear(dev_priv, psr_status,
1339 				    psr_status_mask, 2000))
1340 		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1341 }
1342 
1343 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1344 {
1345 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1346 	enum phy phy = intel_port_to_phy(dev_priv,
1347 					 dp_to_dig_port(intel_dp)->base.port);
1348 
1349 	lockdep_assert_held(&intel_dp->psr.lock);
1350 
1351 	if (!intel_dp->psr.enabled)
1352 		return;
1353 
1354 	drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1355 		    intel_dp->psr.psr2_enabled ? "2" : "1");
1356 
1357 	intel_psr_exit(intel_dp);
1358 	intel_psr_wait_exit_locked(intel_dp);
1359 
1360 	/*
1361 	 * Wa_16013835468
1362 	 * Wa_14015648006
1363 	 */
1364 	if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1365 	    IS_DISPLAY_VER(dev_priv, 12, 13))
1366 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1367 			     wa_16013835468_bit_get(intel_dp), 0);
1368 
1369 	if (intel_dp->psr.psr2_enabled) {
1370 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1371 		if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1372 			intel_de_rmw(dev_priv,
1373 				     MTL_CLKGATE_DIS_TRANS(intel_dp->psr.transcoder),
1374 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1375 		else if (IS_ALDERLAKE_P(dev_priv))
1376 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1377 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1378 	}
1379 
1380 	intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1381 
1382 	/* Disable PSR on Sink */
1383 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1384 
1385 	if (intel_dp->psr.psr2_enabled)
1386 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1387 
1388 	intel_dp->psr.enabled = false;
1389 	intel_dp->psr.psr2_enabled = false;
1390 	intel_dp->psr.psr2_sel_fetch_enabled = false;
1391 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1392 }
1393 
1394 /**
1395  * intel_psr_disable - Disable PSR
1396  * @intel_dp: Intel DP
1397  * @old_crtc_state: old CRTC state
1398  *
1399  * This function needs to be called before disabling pipe.
1400  */
1401 void intel_psr_disable(struct intel_dp *intel_dp,
1402 		       const struct intel_crtc_state *old_crtc_state)
1403 {
1404 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1405 
1406 	if (!old_crtc_state->has_psr)
1407 		return;
1408 
1409 	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1410 		return;
1411 
1412 	mutex_lock(&intel_dp->psr.lock);
1413 
1414 	intel_psr_disable_locked(intel_dp);
1415 
1416 	mutex_unlock(&intel_dp->psr.lock);
1417 	cancel_work_sync(&intel_dp->psr.work);
1418 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1419 }
1420 
1421 /**
1422  * intel_psr_pause - Pause PSR
1423  * @intel_dp: Intel DP
1424  *
1425  * This function need to be called after enabling psr.
1426  */
1427 void intel_psr_pause(struct intel_dp *intel_dp)
1428 {
1429 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1430 	struct intel_psr *psr = &intel_dp->psr;
1431 
1432 	if (!CAN_PSR(intel_dp))
1433 		return;
1434 
1435 	mutex_lock(&psr->lock);
1436 
1437 	if (!psr->enabled) {
1438 		mutex_unlock(&psr->lock);
1439 		return;
1440 	}
1441 
1442 	/* If we ever hit this, we will need to add refcount to pause/resume */
1443 	drm_WARN_ON(&dev_priv->drm, psr->paused);
1444 
1445 	intel_psr_exit(intel_dp);
1446 	intel_psr_wait_exit_locked(intel_dp);
1447 	psr->paused = true;
1448 
1449 	mutex_unlock(&psr->lock);
1450 
1451 	cancel_work_sync(&psr->work);
1452 	cancel_delayed_work_sync(&psr->dc3co_work);
1453 }
1454 
1455 /**
1456  * intel_psr_resume - Resume PSR
1457  * @intel_dp: Intel DP
1458  *
1459  * This function need to be called after pausing psr.
1460  */
1461 void intel_psr_resume(struct intel_dp *intel_dp)
1462 {
1463 	struct intel_psr *psr = &intel_dp->psr;
1464 
1465 	if (!CAN_PSR(intel_dp))
1466 		return;
1467 
1468 	mutex_lock(&psr->lock);
1469 
1470 	if (!psr->paused)
1471 		goto unlock;
1472 
1473 	psr->paused = false;
1474 	intel_psr_activate(intel_dp);
1475 
1476 unlock:
1477 	mutex_unlock(&psr->lock);
1478 }
1479 
1480 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1481 {
1482 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1483 		PSR2_MAN_TRK_CTL_ENABLE;
1484 }
1485 
1486 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1487 {
1488 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1489 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1490 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1491 }
1492 
1493 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1494 {
1495 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1496 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1497 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1498 }
1499 
1500 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1501 {
1502 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1503 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1504 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1505 }
1506 
1507 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1508 {
1509 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1510 
1511 	if (intel_dp->psr.psr2_sel_fetch_enabled)
1512 		intel_de_write(dev_priv,
1513 			       PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
1514 			       man_trk_ctl_enable_bit_get(dev_priv) |
1515 			       man_trk_ctl_partial_frame_bit_get(dev_priv) |
1516 			       man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1517 			       man_trk_ctl_continuos_full_frame(dev_priv));
1518 
1519 	/*
1520 	 * Display WA #0884: skl+
1521 	 * This documented WA for bxt can be safely applied
1522 	 * broadly so we can force HW tracking to exit PSR
1523 	 * instead of disabling and re-enabling.
1524 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1525 	 * but it makes more sense write to the current active
1526 	 * pipe.
1527 	 *
1528 	 * This workaround do not exist for platforms with display 10 or newer
1529 	 * but testing proved that it works for up display 13, for newer
1530 	 * than that testing will be needed.
1531 	 */
1532 	intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1533 }
1534 
1535 void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
1536 					    const struct intel_crtc_state *crtc_state)
1537 {
1538 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1539 	enum pipe pipe = plane->pipe;
1540 
1541 	if (!crtc_state->enable_psr2_sel_fetch)
1542 		return;
1543 
1544 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
1545 }
1546 
1547 void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
1548 					    const struct intel_crtc_state *crtc_state,
1549 					    const struct intel_plane_state *plane_state)
1550 {
1551 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
1552 	enum pipe pipe = plane->pipe;
1553 
1554 	if (!crtc_state->enable_psr2_sel_fetch)
1555 		return;
1556 
1557 	if (plane->id == PLANE_CURSOR)
1558 		intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1559 				  plane_state->ctl);
1560 	else
1561 		intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1562 				  PLANE_SEL_FETCH_CTL_ENABLE);
1563 }
1564 
1565 void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
1566 					      const struct intel_crtc_state *crtc_state,
1567 					      const struct intel_plane_state *plane_state,
1568 					      int color_plane)
1569 {
1570 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1571 	enum pipe pipe = plane->pipe;
1572 	const struct drm_rect *clip;
1573 	u32 val;
1574 	int x, y;
1575 
1576 	if (!crtc_state->enable_psr2_sel_fetch)
1577 		return;
1578 
1579 	if (plane->id == PLANE_CURSOR)
1580 		return;
1581 
1582 	clip = &plane_state->psr2_sel_fetch_area;
1583 
1584 	val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1585 	val |= plane_state->uapi.dst.x1;
1586 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1587 
1588 	x = plane_state->view.color_plane[color_plane].x;
1589 
1590 	/*
1591 	 * From Bspec: UV surface Start Y Position = half of Y plane Y
1592 	 * start position.
1593 	 */
1594 	if (!color_plane)
1595 		y = plane_state->view.color_plane[color_plane].y + clip->y1;
1596 	else
1597 		y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
1598 
1599 	val = y << 16 | x;
1600 
1601 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1602 			  val);
1603 
1604 	/* Sizes are 0 based */
1605 	val = (drm_rect_height(clip) - 1) << 16;
1606 	val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1607 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1608 }
1609 
1610 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1611 {
1612 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1613 	struct intel_encoder *encoder;
1614 
1615 	if (!crtc_state->enable_psr2_sel_fetch)
1616 		return;
1617 
1618 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1619 					     crtc_state->uapi.encoder_mask) {
1620 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1621 
1622 		lockdep_assert_held(&intel_dp->psr.lock);
1623 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1624 			return;
1625 		break;
1626 	}
1627 
1628 	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
1629 		       crtc_state->psr2_man_track_ctl);
1630 }
1631 
1632 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1633 				  struct drm_rect *clip, bool full_update)
1634 {
1635 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1636 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1637 	u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1638 
1639 	/* SF partial frame enable has to be set even on full update */
1640 	val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1641 
1642 	if (full_update) {
1643 		val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1644 		val |= man_trk_ctl_continuos_full_frame(dev_priv);
1645 		goto exit;
1646 	}
1647 
1648 	if (clip->y1 == -1)
1649 		goto exit;
1650 
1651 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
1652 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1653 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1654 	} else {
1655 		drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1656 
1657 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1658 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1659 	}
1660 exit:
1661 	crtc_state->psr2_man_track_ctl = val;
1662 }
1663 
1664 static void clip_area_update(struct drm_rect *overlap_damage_area,
1665 			     struct drm_rect *damage_area,
1666 			     struct drm_rect *pipe_src)
1667 {
1668 	if (!drm_rect_intersect(damage_area, pipe_src))
1669 		return;
1670 
1671 	if (overlap_damage_area->y1 == -1) {
1672 		overlap_damage_area->y1 = damage_area->y1;
1673 		overlap_damage_area->y2 = damage_area->y2;
1674 		return;
1675 	}
1676 
1677 	if (damage_area->y1 < overlap_damage_area->y1)
1678 		overlap_damage_area->y1 = damage_area->y1;
1679 
1680 	if (damage_area->y2 > overlap_damage_area->y2)
1681 		overlap_damage_area->y2 = damage_area->y2;
1682 }
1683 
1684 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1685 						struct drm_rect *pipe_clip)
1686 {
1687 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1688 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1689 	u16 y_alignment;
1690 
1691 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
1692 	if (crtc_state->dsc.compression_enable &&
1693 	    (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
1694 		y_alignment = vdsc_cfg->slice_height;
1695 	else
1696 		y_alignment = crtc_state->su_y_granularity;
1697 
1698 	pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
1699 	if (pipe_clip->y2 % y_alignment)
1700 		pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
1701 }
1702 
1703 /*
1704  * TODO: Not clear how to handle planes with negative position,
1705  * also planes are not updated if they have a negative X
1706  * position so for now doing a full update in this cases
1707  *
1708  * Plane scaling and rotation is not supported by selective fetch and both
1709  * properties can change without a modeset, so need to be check at every
1710  * atomic commit.
1711  */
1712 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
1713 {
1714 	if (plane_state->uapi.dst.y1 < 0 ||
1715 	    plane_state->uapi.dst.x1 < 0 ||
1716 	    plane_state->scaler_id >= 0 ||
1717 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
1718 		return false;
1719 
1720 	return true;
1721 }
1722 
1723 /*
1724  * Check for pipe properties that is not supported by selective fetch.
1725  *
1726  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
1727  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
1728  * enabled and going to the full update path.
1729  */
1730 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
1731 {
1732 	if (crtc_state->scaler_state.scaler_id >= 0)
1733 		return false;
1734 
1735 	return true;
1736 }
1737 
1738 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1739 				struct intel_crtc *crtc)
1740 {
1741 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1742 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1743 	struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1744 	struct intel_plane_state *new_plane_state, *old_plane_state;
1745 	struct intel_plane *plane;
1746 	bool full_update = false;
1747 	int i, ret;
1748 
1749 	if (!crtc_state->enable_psr2_sel_fetch)
1750 		return 0;
1751 
1752 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
1753 		full_update = true;
1754 		goto skip_sel_fetch_set_loop;
1755 	}
1756 
1757 	/*
1758 	 * Calculate minimal selective fetch area of each plane and calculate
1759 	 * the pipe damaged area.
1760 	 * In the next loop the plane selective fetch area will actually be set
1761 	 * using whole pipe damaged area.
1762 	 */
1763 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1764 					     new_plane_state, i) {
1765 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
1766 						      .x2 = INT_MAX };
1767 
1768 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
1769 			continue;
1770 
1771 		if (!new_plane_state->uapi.visible &&
1772 		    !old_plane_state->uapi.visible)
1773 			continue;
1774 
1775 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
1776 			full_update = true;
1777 			break;
1778 		}
1779 
1780 		/*
1781 		 * If visibility or plane moved, mark the whole plane area as
1782 		 * damaged as it needs to be complete redraw in the new and old
1783 		 * position.
1784 		 */
1785 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
1786 		    !drm_rect_equals(&new_plane_state->uapi.dst,
1787 				     &old_plane_state->uapi.dst)) {
1788 			if (old_plane_state->uapi.visible) {
1789 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
1790 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
1791 				clip_area_update(&pipe_clip, &damaged_area,
1792 						 &crtc_state->pipe_src);
1793 			}
1794 
1795 			if (new_plane_state->uapi.visible) {
1796 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
1797 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
1798 				clip_area_update(&pipe_clip, &damaged_area,
1799 						 &crtc_state->pipe_src);
1800 			}
1801 			continue;
1802 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
1803 			/* If alpha changed mark the whole plane area as damaged */
1804 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
1805 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
1806 			clip_area_update(&pipe_clip, &damaged_area,
1807 					 &crtc_state->pipe_src);
1808 			continue;
1809 		}
1810 
1811 		src = drm_plane_state_src(&new_plane_state->uapi);
1812 		drm_rect_fp_to_int(&src, &src);
1813 
1814 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
1815 						     &new_plane_state->uapi, &damaged_area))
1816 			continue;
1817 
1818 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
1819 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
1820 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
1821 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
1822 
1823 		clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
1824 	}
1825 
1826 	/*
1827 	 * TODO: For now we are just using full update in case
1828 	 * selective fetch area calculation fails. To optimize this we
1829 	 * should identify cases where this happens and fix the area
1830 	 * calculation for those.
1831 	 */
1832 	if (pipe_clip.y1 == -1) {
1833 		drm_info_once(&dev_priv->drm,
1834 			      "Selective fetch area calculation failed in pipe %c\n",
1835 			      pipe_name(crtc->pipe));
1836 		full_update = true;
1837 	}
1838 
1839 	if (full_update)
1840 		goto skip_sel_fetch_set_loop;
1841 
1842 	/* Wa_14014971492 */
1843 	if ((IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1844 	     IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
1845 	    crtc_state->splitter.enable)
1846 		pipe_clip.y1 = 0;
1847 
1848 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
1849 	if (ret)
1850 		return ret;
1851 
1852 	intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
1853 
1854 	/*
1855 	 * Now that we have the pipe damaged area check if it intersect with
1856 	 * every plane, if it does set the plane selective fetch area.
1857 	 */
1858 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1859 					     new_plane_state, i) {
1860 		struct drm_rect *sel_fetch_area, inter;
1861 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
1862 
1863 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
1864 		    !new_plane_state->uapi.visible)
1865 			continue;
1866 
1867 		inter = pipe_clip;
1868 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
1869 			continue;
1870 
1871 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
1872 			full_update = true;
1873 			break;
1874 		}
1875 
1876 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
1877 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
1878 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
1879 		crtc_state->update_planes |= BIT(plane->id);
1880 
1881 		/*
1882 		 * Sel_fetch_area is calculated for UV plane. Use
1883 		 * same area for Y plane as well.
1884 		 */
1885 		if (linked) {
1886 			struct intel_plane_state *linked_new_plane_state;
1887 			struct drm_rect *linked_sel_fetch_area;
1888 
1889 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
1890 			if (IS_ERR(linked_new_plane_state))
1891 				return PTR_ERR(linked_new_plane_state);
1892 
1893 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
1894 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
1895 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
1896 			crtc_state->update_planes |= BIT(linked->id);
1897 		}
1898 	}
1899 
1900 skip_sel_fetch_set_loop:
1901 	psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
1902 	return 0;
1903 }
1904 
1905 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
1906 				struct intel_crtc *crtc)
1907 {
1908 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1909 	const struct intel_crtc_state *old_crtc_state =
1910 		intel_atomic_get_old_crtc_state(state, crtc);
1911 	const struct intel_crtc_state *new_crtc_state =
1912 		intel_atomic_get_new_crtc_state(state, crtc);
1913 	struct intel_encoder *encoder;
1914 
1915 	if (!HAS_PSR(i915))
1916 		return;
1917 
1918 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
1919 					     old_crtc_state->uapi.encoder_mask) {
1920 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1921 		struct intel_psr *psr = &intel_dp->psr;
1922 		bool needs_to_disable = false;
1923 
1924 		mutex_lock(&psr->lock);
1925 
1926 		/*
1927 		 * Reasons to disable:
1928 		 * - PSR disabled in new state
1929 		 * - All planes will go inactive
1930 		 * - Changing between PSR versions
1931 		 */
1932 		needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
1933 		needs_to_disable |= !new_crtc_state->has_psr;
1934 		needs_to_disable |= !new_crtc_state->active_planes;
1935 		needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
1936 
1937 		if (psr->enabled && needs_to_disable)
1938 			intel_psr_disable_locked(intel_dp);
1939 
1940 		mutex_unlock(&psr->lock);
1941 	}
1942 }
1943 
1944 static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
1945 					 const struct intel_crtc_state *crtc_state)
1946 {
1947 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1948 	struct intel_encoder *encoder;
1949 
1950 	if (!crtc_state->has_psr)
1951 		return;
1952 
1953 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
1954 					     crtc_state->uapi.encoder_mask) {
1955 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1956 		struct intel_psr *psr = &intel_dp->psr;
1957 
1958 		mutex_lock(&psr->lock);
1959 
1960 		if (psr->sink_not_reliable)
1961 			goto exit;
1962 
1963 		drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
1964 
1965 		/* Only enable if there is active planes */
1966 		if (!psr->enabled && crtc_state->active_planes)
1967 			intel_psr_enable_locked(intel_dp, crtc_state);
1968 
1969 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
1970 		if (crtc_state->crc_enabled && psr->enabled)
1971 			psr_force_hw_tracking_exit(intel_dp);
1972 
1973 exit:
1974 		mutex_unlock(&psr->lock);
1975 	}
1976 }
1977 
1978 void intel_psr_post_plane_update(const struct intel_atomic_state *state)
1979 {
1980 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1981 	struct intel_crtc_state *crtc_state;
1982 	struct intel_crtc *crtc;
1983 	int i;
1984 
1985 	if (!HAS_PSR(dev_priv))
1986 		return;
1987 
1988 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
1989 		_intel_psr_post_plane_update(state, crtc_state);
1990 }
1991 
1992 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
1993 {
1994 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1995 
1996 	/*
1997 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
1998 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
1999 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2000 	 */
2001 	return intel_de_wait_for_clear(dev_priv,
2002 				       EDP_PSR2_STATUS(intel_dp->psr.transcoder),
2003 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2004 }
2005 
2006 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2007 {
2008 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2009 
2010 	/*
2011 	 * From bspec: Panel Self Refresh (BDW+)
2012 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2013 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2014 	 * defensive enough to cover everything.
2015 	 */
2016 	return intel_de_wait_for_clear(dev_priv,
2017 				       EDP_PSR_STATUS(intel_dp->psr.transcoder),
2018 				       EDP_PSR_STATUS_STATE_MASK, 50);
2019 }
2020 
2021 /**
2022  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2023  * @new_crtc_state: new CRTC state
2024  *
2025  * This function is expected to be called from pipe_update_start() where it is
2026  * not expected to race with PSR enable or disable.
2027  */
2028 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2029 {
2030 	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2031 	struct intel_encoder *encoder;
2032 
2033 	if (!new_crtc_state->has_psr)
2034 		return;
2035 
2036 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2037 					     new_crtc_state->uapi.encoder_mask) {
2038 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2039 		int ret;
2040 
2041 		lockdep_assert_held(&intel_dp->psr.lock);
2042 
2043 		if (!intel_dp->psr.enabled)
2044 			continue;
2045 
2046 		if (intel_dp->psr.psr2_enabled)
2047 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2048 		else
2049 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2050 
2051 		if (ret)
2052 			drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2053 	}
2054 }
2055 
2056 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2057 {
2058 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2059 	i915_reg_t reg;
2060 	u32 mask;
2061 	int err;
2062 
2063 	if (!intel_dp->psr.enabled)
2064 		return false;
2065 
2066 	if (intel_dp->psr.psr2_enabled) {
2067 		reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
2068 		mask = EDP_PSR2_STATUS_STATE_MASK;
2069 	} else {
2070 		reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
2071 		mask = EDP_PSR_STATUS_STATE_MASK;
2072 	}
2073 
2074 	mutex_unlock(&intel_dp->psr.lock);
2075 
2076 	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2077 	if (err)
2078 		drm_err(&dev_priv->drm,
2079 			"Timed out waiting for PSR Idle for re-enable\n");
2080 
2081 	/* After the unlocked wait, verify that PSR is still wanted! */
2082 	mutex_lock(&intel_dp->psr.lock);
2083 	return err == 0 && intel_dp->psr.enabled;
2084 }
2085 
2086 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2087 {
2088 	struct drm_connector_list_iter conn_iter;
2089 	struct drm_modeset_acquire_ctx ctx;
2090 	struct drm_atomic_state *state;
2091 	struct drm_connector *conn;
2092 	int err = 0;
2093 
2094 	state = drm_atomic_state_alloc(&dev_priv->drm);
2095 	if (!state)
2096 		return -ENOMEM;
2097 
2098 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2099 	state->acquire_ctx = &ctx;
2100 
2101 retry:
2102 
2103 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2104 	drm_for_each_connector_iter(conn, &conn_iter) {
2105 		struct drm_connector_state *conn_state;
2106 		struct drm_crtc_state *crtc_state;
2107 
2108 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2109 			continue;
2110 
2111 		conn_state = drm_atomic_get_connector_state(state, conn);
2112 		if (IS_ERR(conn_state)) {
2113 			err = PTR_ERR(conn_state);
2114 			break;
2115 		}
2116 
2117 		if (!conn_state->crtc)
2118 			continue;
2119 
2120 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2121 		if (IS_ERR(crtc_state)) {
2122 			err = PTR_ERR(crtc_state);
2123 			break;
2124 		}
2125 
2126 		/* Mark mode as changed to trigger a pipe->update() */
2127 		crtc_state->mode_changed = true;
2128 	}
2129 	drm_connector_list_iter_end(&conn_iter);
2130 
2131 	if (err == 0)
2132 		err = drm_atomic_commit(state);
2133 
2134 	if (err == -EDEADLK) {
2135 		drm_atomic_state_clear(state);
2136 		err = drm_modeset_backoff(&ctx);
2137 		if (!err)
2138 			goto retry;
2139 	}
2140 
2141 	drm_modeset_drop_locks(&ctx);
2142 	drm_modeset_acquire_fini(&ctx);
2143 	drm_atomic_state_put(state);
2144 
2145 	return err;
2146 }
2147 
2148 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2149 {
2150 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2151 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2152 	u32 old_mode;
2153 	int ret;
2154 
2155 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2156 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2157 		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2158 		return -EINVAL;
2159 	}
2160 
2161 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2162 	if (ret)
2163 		return ret;
2164 
2165 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2166 	intel_dp->psr.debug = val;
2167 
2168 	/*
2169 	 * Do it right away if it's already enabled, otherwise it will be done
2170 	 * when enabling the source.
2171 	 */
2172 	if (intel_dp->psr.enabled)
2173 		psr_irq_control(intel_dp);
2174 
2175 	mutex_unlock(&intel_dp->psr.lock);
2176 
2177 	if (old_mode != mode)
2178 		ret = intel_psr_fastset_force(dev_priv);
2179 
2180 	return ret;
2181 }
2182 
2183 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2184 {
2185 	struct intel_psr *psr = &intel_dp->psr;
2186 
2187 	intel_psr_disable_locked(intel_dp);
2188 	psr->sink_not_reliable = true;
2189 	/* let's make sure that sink is awaken */
2190 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2191 }
2192 
2193 static void intel_psr_work(struct work_struct *work)
2194 {
2195 	struct intel_dp *intel_dp =
2196 		container_of(work, typeof(*intel_dp), psr.work);
2197 
2198 	mutex_lock(&intel_dp->psr.lock);
2199 
2200 	if (!intel_dp->psr.enabled)
2201 		goto unlock;
2202 
2203 	if (READ_ONCE(intel_dp->psr.irq_aux_error))
2204 		intel_psr_handle_irq(intel_dp);
2205 
2206 	/*
2207 	 * We have to make sure PSR is ready for re-enable
2208 	 * otherwise it keeps disabled until next full enable/disable cycle.
2209 	 * PSR might take some time to get fully disabled
2210 	 * and be ready for re-enable.
2211 	 */
2212 	if (!__psr_wait_for_idle_locked(intel_dp))
2213 		goto unlock;
2214 
2215 	/*
2216 	 * The delayed work can race with an invalidate hence we need to
2217 	 * recheck. Since psr_flush first clears this and then reschedules we
2218 	 * won't ever miss a flush when bailing out here.
2219 	 */
2220 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2221 		goto unlock;
2222 
2223 	intel_psr_activate(intel_dp);
2224 unlock:
2225 	mutex_unlock(&intel_dp->psr.lock);
2226 }
2227 
2228 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2229 {
2230 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2231 
2232 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2233 		u32 val;
2234 
2235 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2236 			/* Send one update otherwise lag is observed in screen */
2237 			intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2238 			return;
2239 		}
2240 
2241 		val = man_trk_ctl_enable_bit_get(dev_priv) |
2242 		      man_trk_ctl_partial_frame_bit_get(dev_priv) |
2243 		      man_trk_ctl_continuos_full_frame(dev_priv);
2244 		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), val);
2245 		intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2246 		intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2247 	} else {
2248 		intel_psr_exit(intel_dp);
2249 	}
2250 }
2251 
2252 /**
2253  * intel_psr_invalidate - Invalidate PSR
2254  * @dev_priv: i915 device
2255  * @frontbuffer_bits: frontbuffer plane tracking bits
2256  * @origin: which operation caused the invalidate
2257  *
2258  * Since the hardware frontbuffer tracking has gaps we need to integrate
2259  * with the software frontbuffer tracking. This function gets called every
2260  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2261  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2262  *
2263  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2264  */
2265 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2266 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
2267 {
2268 	struct intel_encoder *encoder;
2269 
2270 	if (origin == ORIGIN_FLIP)
2271 		return;
2272 
2273 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2274 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2275 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2276 
2277 		mutex_lock(&intel_dp->psr.lock);
2278 		if (!intel_dp->psr.enabled) {
2279 			mutex_unlock(&intel_dp->psr.lock);
2280 			continue;
2281 		}
2282 
2283 		pipe_frontbuffer_bits &=
2284 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2285 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2286 
2287 		if (pipe_frontbuffer_bits)
2288 			_psr_invalidate_handle(intel_dp);
2289 
2290 		mutex_unlock(&intel_dp->psr.lock);
2291 	}
2292 }
2293 /*
2294  * When we will be completely rely on PSR2 S/W tracking in future,
2295  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2296  * event also therefore tgl_dc3co_flush_locked() require to be changed
2297  * accordingly in future.
2298  */
2299 static void
2300 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2301 		       enum fb_op_origin origin)
2302 {
2303 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2304 	    !intel_dp->psr.active)
2305 		return;
2306 
2307 	/*
2308 	 * At every frontbuffer flush flip event modified delay of delayed work,
2309 	 * when delayed work schedules that means display has been idle.
2310 	 */
2311 	if (!(frontbuffer_bits &
2312 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2313 		return;
2314 
2315 	tgl_psr2_enable_dc3co(intel_dp);
2316 	mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
2317 			 intel_dp->psr.dc3co_exit_delay);
2318 }
2319 
2320 static void _psr_flush_handle(struct intel_dp *intel_dp)
2321 {
2322 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2323 
2324 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2325 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2326 			/* can we turn CFF off? */
2327 			if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2328 				u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2329 					man_trk_ctl_partial_frame_bit_get(dev_priv) |
2330 					man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2331 					man_trk_ctl_continuos_full_frame(dev_priv);
2332 
2333 				/*
2334 				 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2335 				 * updates. Still keep cff bit enabled as we don't have proper
2336 				 * SU configuration in case update is sent for any reason after
2337 				 * sff bit gets cleared by the HW on next vblank.
2338 				 */
2339 				intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
2340 					       val);
2341 				intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2342 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2343 			}
2344 		} else {
2345 			/*
2346 			 * continuous full frame is disabled, only a single full
2347 			 * frame is required
2348 			 */
2349 			psr_force_hw_tracking_exit(intel_dp);
2350 		}
2351 	} else {
2352 		psr_force_hw_tracking_exit(intel_dp);
2353 
2354 		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2355 			schedule_work(&intel_dp->psr.work);
2356 	}
2357 }
2358 
2359 /**
2360  * intel_psr_flush - Flush PSR
2361  * @dev_priv: i915 device
2362  * @frontbuffer_bits: frontbuffer plane tracking bits
2363  * @origin: which operation caused the flush
2364  *
2365  * Since the hardware frontbuffer tracking has gaps we need to integrate
2366  * with the software frontbuffer tracking. This function gets called every
2367  * time frontbuffer rendering has completed and flushed out to memory. PSR
2368  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2369  *
2370  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2371  */
2372 void intel_psr_flush(struct drm_i915_private *dev_priv,
2373 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
2374 {
2375 	struct intel_encoder *encoder;
2376 
2377 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2378 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2379 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2380 
2381 		mutex_lock(&intel_dp->psr.lock);
2382 		if (!intel_dp->psr.enabled) {
2383 			mutex_unlock(&intel_dp->psr.lock);
2384 			continue;
2385 		}
2386 
2387 		pipe_frontbuffer_bits &=
2388 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2389 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2390 
2391 		/*
2392 		 * If the PSR is paused by an explicit intel_psr_paused() call,
2393 		 * we have to ensure that the PSR is not activated until
2394 		 * intel_psr_resume() is called.
2395 		 */
2396 		if (intel_dp->psr.paused)
2397 			goto unlock;
2398 
2399 		if (origin == ORIGIN_FLIP ||
2400 		    (origin == ORIGIN_CURSOR_UPDATE &&
2401 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
2402 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2403 			goto unlock;
2404 		}
2405 
2406 		if (pipe_frontbuffer_bits == 0)
2407 			goto unlock;
2408 
2409 		/* By definition flush = invalidate + flush */
2410 		_psr_flush_handle(intel_dp);
2411 unlock:
2412 		mutex_unlock(&intel_dp->psr.lock);
2413 	}
2414 }
2415 
2416 /**
2417  * intel_psr_init - Init basic PSR work and mutex.
2418  * @intel_dp: Intel DP
2419  *
2420  * This function is called after the initializing connector.
2421  * (the initializing of connector treats the handling of connector capabilities)
2422  * And it initializes basic PSR stuff for each DP Encoder.
2423  */
2424 void intel_psr_init(struct intel_dp *intel_dp)
2425 {
2426 	struct intel_connector *connector = intel_dp->attached_connector;
2427 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2428 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2429 
2430 	if (!HAS_PSR(dev_priv))
2431 		return;
2432 
2433 	/*
2434 	 * HSW spec explicitly says PSR is tied to port A.
2435 	 * BDW+ platforms have a instance of PSR registers per transcoder but
2436 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2437 	 * than eDP one.
2438 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2439 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2440 	 * But GEN12 supports a instance of PSR registers per transcoder.
2441 	 */
2442 	if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2443 		drm_dbg_kms(&dev_priv->drm,
2444 			    "PSR condition failed: Port not supported\n");
2445 		return;
2446 	}
2447 
2448 	intel_dp->psr.source_support = true;
2449 
2450 	/* Set link_standby x link_off defaults */
2451 	if (DISPLAY_VER(dev_priv) < 12)
2452 		/* For new platforms up to TGL let's respect VBT back again */
2453 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2454 
2455 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2456 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2457 	mutex_init(&intel_dp->psr.lock);
2458 }
2459 
2460 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2461 					   u8 *status, u8 *error_status)
2462 {
2463 	struct drm_dp_aux *aux = &intel_dp->aux;
2464 	int ret;
2465 
2466 	ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2467 	if (ret != 1)
2468 		return ret;
2469 
2470 	ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
2471 	if (ret != 1)
2472 		return ret;
2473 
2474 	*status = *status & DP_PSR_SINK_STATE_MASK;
2475 
2476 	return 0;
2477 }
2478 
2479 static void psr_alpm_check(struct intel_dp *intel_dp)
2480 {
2481 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2482 	struct drm_dp_aux *aux = &intel_dp->aux;
2483 	struct intel_psr *psr = &intel_dp->psr;
2484 	u8 val;
2485 	int r;
2486 
2487 	if (!psr->psr2_enabled)
2488 		return;
2489 
2490 	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2491 	if (r != 1) {
2492 		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2493 		return;
2494 	}
2495 
2496 	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2497 		intel_psr_disable_locked(intel_dp);
2498 		psr->sink_not_reliable = true;
2499 		drm_dbg_kms(&dev_priv->drm,
2500 			    "ALPM lock timeout error, disabling PSR\n");
2501 
2502 		/* Clearing error */
2503 		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2504 	}
2505 }
2506 
2507 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2508 {
2509 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2510 	struct intel_psr *psr = &intel_dp->psr;
2511 	u8 val;
2512 	int r;
2513 
2514 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2515 	if (r != 1) {
2516 		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2517 		return;
2518 	}
2519 
2520 	if (val & DP_PSR_CAPS_CHANGE) {
2521 		intel_psr_disable_locked(intel_dp);
2522 		psr->sink_not_reliable = true;
2523 		drm_dbg_kms(&dev_priv->drm,
2524 			    "Sink PSR capability changed, disabling PSR\n");
2525 
2526 		/* Clearing it */
2527 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2528 	}
2529 }
2530 
2531 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2532 {
2533 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2534 	struct intel_psr *psr = &intel_dp->psr;
2535 	u8 status, error_status;
2536 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2537 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2538 			  DP_PSR_LINK_CRC_ERROR;
2539 
2540 	if (!CAN_PSR(intel_dp))
2541 		return;
2542 
2543 	mutex_lock(&psr->lock);
2544 
2545 	if (!psr->enabled)
2546 		goto exit;
2547 
2548 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2549 		drm_err(&dev_priv->drm,
2550 			"Error reading PSR status or error status\n");
2551 		goto exit;
2552 	}
2553 
2554 	if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2555 		intel_psr_disable_locked(intel_dp);
2556 		psr->sink_not_reliable = true;
2557 	}
2558 
2559 	if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2560 		drm_dbg_kms(&dev_priv->drm,
2561 			    "PSR sink internal error, disabling PSR\n");
2562 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2563 		drm_dbg_kms(&dev_priv->drm,
2564 			    "PSR RFB storage error, disabling PSR\n");
2565 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2566 		drm_dbg_kms(&dev_priv->drm,
2567 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
2568 	if (error_status & DP_PSR_LINK_CRC_ERROR)
2569 		drm_dbg_kms(&dev_priv->drm,
2570 			    "PSR Link CRC error, disabling PSR\n");
2571 
2572 	if (error_status & ~errors)
2573 		drm_err(&dev_priv->drm,
2574 			"PSR_ERROR_STATUS unhandled errors %x\n",
2575 			error_status & ~errors);
2576 	/* clear status register */
2577 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2578 
2579 	psr_alpm_check(intel_dp);
2580 	psr_capability_changed_check(intel_dp);
2581 
2582 exit:
2583 	mutex_unlock(&psr->lock);
2584 }
2585 
2586 bool intel_psr_enabled(struct intel_dp *intel_dp)
2587 {
2588 	bool ret;
2589 
2590 	if (!CAN_PSR(intel_dp))
2591 		return false;
2592 
2593 	mutex_lock(&intel_dp->psr.lock);
2594 	ret = intel_dp->psr.enabled;
2595 	mutex_unlock(&intel_dp->psr.lock);
2596 
2597 	return ret;
2598 }
2599 
2600 /**
2601  * intel_psr_lock - grab PSR lock
2602  * @crtc_state: the crtc state
2603  *
2604  * This is initially meant to be used by around CRTC update, when
2605  * vblank sensitive registers are updated and we need grab the lock
2606  * before it to avoid vblank evasion.
2607  */
2608 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2609 {
2610 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2611 	struct intel_encoder *encoder;
2612 
2613 	if (!crtc_state->has_psr)
2614 		return;
2615 
2616 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2617 					     crtc_state->uapi.encoder_mask) {
2618 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2619 
2620 		mutex_lock(&intel_dp->psr.lock);
2621 		break;
2622 	}
2623 }
2624 
2625 /**
2626  * intel_psr_unlock - release PSR lock
2627  * @crtc_state: the crtc state
2628  *
2629  * Release the PSR lock that was held during pipe update.
2630  */
2631 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2632 {
2633 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2634 	struct intel_encoder *encoder;
2635 
2636 	if (!crtc_state->has_psr)
2637 		return;
2638 
2639 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2640 					     crtc_state->uapi.encoder_mask) {
2641 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2642 
2643 		mutex_unlock(&intel_dp->psr.lock);
2644 		break;
2645 	}
2646 }
2647 
2648 static void
2649 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
2650 {
2651 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2652 	const char *status = "unknown";
2653 	u32 val, status_val;
2654 
2655 	if (intel_dp->psr.psr2_enabled) {
2656 		static const char * const live_status[] = {
2657 			"IDLE",
2658 			"CAPTURE",
2659 			"CAPTURE_FS",
2660 			"SLEEP",
2661 			"BUFON_FW",
2662 			"ML_UP",
2663 			"SU_STANDBY",
2664 			"FAST_SLEEP",
2665 			"DEEP_SLEEP",
2666 			"BUF_ON",
2667 			"TG_ON"
2668 		};
2669 		val = intel_de_read(dev_priv,
2670 				    EDP_PSR2_STATUS(intel_dp->psr.transcoder));
2671 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
2672 		if (status_val < ARRAY_SIZE(live_status))
2673 			status = live_status[status_val];
2674 	} else {
2675 		static const char * const live_status[] = {
2676 			"IDLE",
2677 			"SRDONACK",
2678 			"SRDENT",
2679 			"BUFOFF",
2680 			"BUFON",
2681 			"AUXACK",
2682 			"SRDOFFACK",
2683 			"SRDENT_ON",
2684 		};
2685 		val = intel_de_read(dev_priv,
2686 				    EDP_PSR_STATUS(intel_dp->psr.transcoder));
2687 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2688 			      EDP_PSR_STATUS_STATE_SHIFT;
2689 		if (status_val < ARRAY_SIZE(live_status))
2690 			status = live_status[status_val];
2691 	}
2692 
2693 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2694 }
2695 
2696 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
2697 {
2698 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2699 	struct intel_psr *psr = &intel_dp->psr;
2700 	intel_wakeref_t wakeref;
2701 	const char *status;
2702 	bool enabled;
2703 	u32 val;
2704 
2705 	seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
2706 	if (psr->sink_support)
2707 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
2708 	seq_puts(m, "\n");
2709 
2710 	if (!psr->sink_support)
2711 		return 0;
2712 
2713 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2714 	mutex_lock(&psr->lock);
2715 
2716 	if (psr->enabled)
2717 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2718 	else
2719 		status = "disabled";
2720 	seq_printf(m, "PSR mode: %s\n", status);
2721 
2722 	if (!psr->enabled) {
2723 		seq_printf(m, "PSR sink not reliable: %s\n",
2724 			   str_yes_no(psr->sink_not_reliable));
2725 
2726 		goto unlock;
2727 	}
2728 
2729 	if (psr->psr2_enabled) {
2730 		val = intel_de_read(dev_priv,
2731 				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
2732 		enabled = val & EDP_PSR2_ENABLE;
2733 	} else {
2734 		val = intel_de_read(dev_priv,
2735 				    EDP_PSR_CTL(intel_dp->psr.transcoder));
2736 		enabled = val & EDP_PSR_ENABLE;
2737 	}
2738 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2739 		   str_enabled_disabled(enabled), val);
2740 	psr_source_status(intel_dp, m);
2741 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2742 		   psr->busy_frontbuffer_bits);
2743 
2744 	/*
2745 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2746 	 */
2747 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2748 		val = intel_de_read(dev_priv,
2749 				    EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
2750 		val &= EDP_PSR_PERF_CNT_MASK;
2751 		seq_printf(m, "Performance counter: %u\n", val);
2752 	}
2753 
2754 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
2755 		seq_printf(m, "Last attempted entry at: %lld\n",
2756 			   psr->last_entry_attempt);
2757 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2758 	}
2759 
2760 	if (psr->psr2_enabled) {
2761 		u32 su_frames_val[3];
2762 		int frame;
2763 
2764 		/*
2765 		 * Reading all 3 registers before hand to minimize crossing a
2766 		 * frame boundary between register reads
2767 		 */
2768 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
2769 			val = intel_de_read(dev_priv,
2770 					    PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
2771 			su_frames_val[frame / 3] = val;
2772 		}
2773 
2774 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2775 
2776 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2777 			u32 su_blocks;
2778 
2779 			su_blocks = su_frames_val[frame / 3] &
2780 				    PSR2_SU_STATUS_MASK(frame);
2781 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2782 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
2783 		}
2784 
2785 		seq_printf(m, "PSR2 selective fetch: %s\n",
2786 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
2787 	}
2788 
2789 unlock:
2790 	mutex_unlock(&psr->lock);
2791 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2792 
2793 	return 0;
2794 }
2795 
2796 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
2797 {
2798 	struct drm_i915_private *dev_priv = m->private;
2799 	struct intel_dp *intel_dp = NULL;
2800 	struct intel_encoder *encoder;
2801 
2802 	if (!HAS_PSR(dev_priv))
2803 		return -ENODEV;
2804 
2805 	/* Find the first EDP which supports PSR */
2806 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2807 		intel_dp = enc_to_intel_dp(encoder);
2808 		break;
2809 	}
2810 
2811 	if (!intel_dp)
2812 		return -ENODEV;
2813 
2814 	return intel_psr_status(m, intel_dp);
2815 }
2816 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
2817 
2818 static int
2819 i915_edp_psr_debug_set(void *data, u64 val)
2820 {
2821 	struct drm_i915_private *dev_priv = data;
2822 	struct intel_encoder *encoder;
2823 	intel_wakeref_t wakeref;
2824 	int ret = -ENODEV;
2825 
2826 	if (!HAS_PSR(dev_priv))
2827 		return ret;
2828 
2829 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2830 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2831 
2832 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
2833 
2834 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2835 
2836 		// TODO: split to each transcoder's PSR debug state
2837 		ret = intel_psr_debug_set(intel_dp, val);
2838 
2839 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2840 	}
2841 
2842 	return ret;
2843 }
2844 
2845 static int
2846 i915_edp_psr_debug_get(void *data, u64 *val)
2847 {
2848 	struct drm_i915_private *dev_priv = data;
2849 	struct intel_encoder *encoder;
2850 
2851 	if (!HAS_PSR(dev_priv))
2852 		return -ENODEV;
2853 
2854 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2855 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2856 
2857 		// TODO: split to each transcoder's PSR debug state
2858 		*val = READ_ONCE(intel_dp->psr.debug);
2859 		return 0;
2860 	}
2861 
2862 	return -ENODEV;
2863 }
2864 
2865 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2866 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2867 			"%llu\n");
2868 
2869 void intel_psr_debugfs_register(struct drm_i915_private *i915)
2870 {
2871 	struct drm_minor *minor = i915->drm.primary;
2872 
2873 	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
2874 			    i915, &i915_edp_psr_debug_fops);
2875 
2876 	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
2877 			    i915, &i915_edp_psr_status_fops);
2878 }
2879 
2880 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2881 {
2882 	struct intel_connector *connector = m->private;
2883 	struct intel_dp *intel_dp = intel_attached_dp(connector);
2884 	static const char * const sink_status[] = {
2885 		"inactive",
2886 		"transition to active, capture and display",
2887 		"active, display from RFB",
2888 		"active, capture and display on sink device timings",
2889 		"transition to inactive, capture and display, timing re-sync",
2890 		"reserved",
2891 		"reserved",
2892 		"sink internal error",
2893 	};
2894 	const char *str;
2895 	int ret;
2896 	u8 val;
2897 
2898 	if (!CAN_PSR(intel_dp)) {
2899 		seq_puts(m, "PSR Unsupported\n");
2900 		return -ENODEV;
2901 	}
2902 
2903 	if (connector->base.status != connector_status_connected)
2904 		return -ENODEV;
2905 
2906 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2907 	if (ret != 1)
2908 		return ret < 0 ? ret : -EIO;
2909 
2910 	val &= DP_PSR_SINK_STATE_MASK;
2911 	if (val < ARRAY_SIZE(sink_status))
2912 		str = sink_status[val];
2913 	else
2914 		str = "unknown";
2915 
2916 	seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2917 
2918 	return 0;
2919 }
2920 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2921 
2922 static int i915_psr_status_show(struct seq_file *m, void *data)
2923 {
2924 	struct intel_connector *connector = m->private;
2925 	struct intel_dp *intel_dp = intel_attached_dp(connector);
2926 
2927 	return intel_psr_status(m, intel_dp);
2928 }
2929 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
2930 
2931 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
2932 {
2933 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2934 	struct dentry *root = connector->base.debugfs_entry;
2935 
2936 	if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2937 		return;
2938 
2939 	debugfs_create_file("i915_psr_sink_status", 0444, root,
2940 			    connector, &i915_psr_sink_status_fops);
2941 
2942 	if (HAS_PSR(i915))
2943 		debugfs_create_file("i915_psr_status", 0444, root,
2944 				    connector, &i915_psr_status_fops);
2945 }
2946