1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 
27 #include "i915_drv.h"
28 #include "i915_reg.h"
29 #include "intel_atomic.h"
30 #include "intel_crtc.h"
31 #include "intel_de.h"
32 #include "intel_display_types.h"
33 #include "intel_dp.h"
34 #include "intel_dp_aux.h"
35 #include "intel_hdmi.h"
36 #include "intel_psr.h"
37 #include "intel_snps_phy.h"
38 #include "skl_universal_plane.h"
39 
40 /**
41  * DOC: Panel Self Refresh (PSR/SRD)
42  *
43  * Since Haswell Display controller supports Panel Self-Refresh on display
44  * panels witch have a remote frame buffer (RFB) implemented according to PSR
45  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
46  * when system is idle but display is on as it eliminates display refresh
47  * request to DDR memory completely as long as the frame buffer for that
48  * display is unchanged.
49  *
50  * Panel Self Refresh must be supported by both Hardware (source) and
51  * Panel (sink).
52  *
53  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
54  * to power down the link and memory controller. For DSI panels the same idea
55  * is called "manual mode".
56  *
57  * The implementation uses the hardware-based PSR support which automatically
58  * enters/exits self-refresh mode. The hardware takes care of sending the
59  * required DP aux message and could even retrain the link (that part isn't
60  * enabled yet though). The hardware also keeps track of any frontbuffer
61  * changes to know when to exit self-refresh mode again. Unfortunately that
62  * part doesn't work too well, hence why the i915 PSR support uses the
63  * software frontbuffer tracking to make sure it doesn't miss a screen
64  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
65  * get called by the frontbuffer tracking code. Note that because of locking
66  * issues the self-refresh re-enable code is done from a work queue, which
67  * must be correctly synchronized/cancelled when shutting down the pipe."
68  *
69  * DC3CO (DC3 clock off)
70  *
71  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
72  * clock off automatically during PSR2 idle state.
73  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
74  * entry/exit allows the HW to enter a low-power state even when page flipping
75  * periodically (for instance a 30fps video playback scenario).
76  *
77  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
78  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
79  * frames, if no other flip occurs and the function above is executed, DC3CO is
80  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
81  * of another flip.
82  * Front buffer modifications do not trigger DC3CO activation on purpose as it
83  * would bring a lot of complexity and most of the moderns systems will only
84  * use page flips.
85  */
86 
87 static bool psr_global_enabled(struct intel_dp *intel_dp)
88 {
89 	struct intel_connector *connector = intel_dp->attached_connector;
90 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
91 
92 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
93 	case I915_PSR_DEBUG_DEFAULT:
94 		if (i915->params.enable_psr == -1)
95 			return connector->panel.vbt.psr.enable;
96 		return i915->params.enable_psr;
97 	case I915_PSR_DEBUG_DISABLE:
98 		return false;
99 	default:
100 		return true;
101 	}
102 }
103 
104 static bool psr2_global_enabled(struct intel_dp *intel_dp)
105 {
106 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
107 
108 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
109 	case I915_PSR_DEBUG_DISABLE:
110 	case I915_PSR_DEBUG_FORCE_PSR1:
111 		return false;
112 	default:
113 		if (i915->params.enable_psr == 1)
114 			return false;
115 		return true;
116 	}
117 }
118 
119 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
120 {
121 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
122 
123 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
124 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
125 }
126 
127 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
128 {
129 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
130 
131 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
132 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
133 }
134 
135 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
136 {
137 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
138 
139 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
140 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
141 }
142 
143 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
144 {
145 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
146 
147 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
148 		EDP_PSR_MASK(intel_dp->psr.transcoder);
149 }
150 
151 static void psr_irq_control(struct intel_dp *intel_dp)
152 {
153 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
154 	i915_reg_t imr_reg;
155 	u32 mask;
156 
157 	if (DISPLAY_VER(dev_priv) >= 12)
158 		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
159 	else
160 		imr_reg = EDP_PSR_IMR;
161 
162 	mask = psr_irq_psr_error_bit_get(intel_dp);
163 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
164 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
165 			psr_irq_pre_entry_bit_get(intel_dp);
166 
167 	intel_de_rmw(dev_priv, imr_reg, psr_irq_mask_get(intel_dp), ~mask);
168 }
169 
170 static void psr_event_print(struct drm_i915_private *i915,
171 			    u32 val, bool psr2_enabled)
172 {
173 	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
174 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
175 		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
176 	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
177 		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
178 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
179 		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
180 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
181 		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
182 	if (val & PSR_EVENT_GRAPHICS_RESET)
183 		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
184 	if (val & PSR_EVENT_PCH_INTERRUPT)
185 		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
186 	if (val & PSR_EVENT_MEMORY_UP)
187 		drm_dbg_kms(&i915->drm, "\tMemory up\n");
188 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
189 		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
190 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
191 		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
192 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
193 		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
194 	if (val & PSR_EVENT_REGISTER_UPDATE)
195 		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
196 	if (val & PSR_EVENT_HDCP_ENABLE)
197 		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
198 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
199 		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
200 	if (val & PSR_EVENT_VBI_ENABLE)
201 		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
202 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
203 		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
204 	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
205 		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
206 }
207 
208 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
209 {
210 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
211 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
212 	ktime_t time_ns =  ktime_get();
213 	i915_reg_t imr_reg;
214 
215 	if (DISPLAY_VER(dev_priv) >= 12)
216 		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
217 	else
218 		imr_reg = EDP_PSR_IMR;
219 
220 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
221 		intel_dp->psr.last_entry_attempt = time_ns;
222 		drm_dbg_kms(&dev_priv->drm,
223 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
224 			    transcoder_name(cpu_transcoder));
225 	}
226 
227 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
228 		intel_dp->psr.last_exit = time_ns;
229 		drm_dbg_kms(&dev_priv->drm,
230 			    "[transcoder %s] PSR exit completed\n",
231 			    transcoder_name(cpu_transcoder));
232 
233 		if (DISPLAY_VER(dev_priv) >= 9) {
234 			u32 val = intel_de_read(dev_priv,
235 						PSR_EVENT(cpu_transcoder));
236 			bool psr2_enabled = intel_dp->psr.psr2_enabled;
237 
238 			intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
239 				       val);
240 			psr_event_print(dev_priv, val, psr2_enabled);
241 		}
242 	}
243 
244 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
245 		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
246 			 transcoder_name(cpu_transcoder));
247 
248 		intel_dp->psr.irq_aux_error = true;
249 
250 		/*
251 		 * If this interruption is not masked it will keep
252 		 * interrupting so fast that it prevents the scheduled
253 		 * work to run.
254 		 * Also after a PSR error, we don't want to arm PSR
255 		 * again so we don't care about unmask the interruption
256 		 * or unset irq_aux_error.
257 		 */
258 		intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp));
259 
260 		schedule_work(&intel_dp->psr.work);
261 	}
262 }
263 
264 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
265 {
266 	u8 alpm_caps = 0;
267 
268 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
269 			      &alpm_caps) != 1)
270 		return false;
271 	return alpm_caps & DP_ALPM_CAP;
272 }
273 
274 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
275 {
276 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
277 	u8 val = 8; /* assume the worst if we can't read the value */
278 
279 	if (drm_dp_dpcd_readb(&intel_dp->aux,
280 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
281 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
282 	else
283 		drm_dbg_kms(&i915->drm,
284 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
285 	return val;
286 }
287 
288 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
289 {
290 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
291 	ssize_t r;
292 	u16 w;
293 	u8 y;
294 
295 	/* If sink don't have specific granularity requirements set legacy ones */
296 	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
297 		/* As PSR2 HW sends full lines, we do not care about x granularity */
298 		w = 4;
299 		y = 4;
300 		goto exit;
301 	}
302 
303 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
304 	if (r != 2)
305 		drm_dbg_kms(&i915->drm,
306 			    "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
307 	/*
308 	 * Spec says that if the value read is 0 the default granularity should
309 	 * be used instead.
310 	 */
311 	if (r != 2 || w == 0)
312 		w = 4;
313 
314 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
315 	if (r != 1) {
316 		drm_dbg_kms(&i915->drm,
317 			    "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
318 		y = 4;
319 	}
320 	if (y == 0)
321 		y = 1;
322 
323 exit:
324 	intel_dp->psr.su_w_granularity = w;
325 	intel_dp->psr.su_y_granularity = y;
326 }
327 
328 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
329 {
330 	struct drm_i915_private *dev_priv =
331 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
332 
333 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
334 			 sizeof(intel_dp->psr_dpcd));
335 
336 	if (!intel_dp->psr_dpcd[0])
337 		return;
338 	drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
339 		    intel_dp->psr_dpcd[0]);
340 
341 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
342 		drm_dbg_kms(&dev_priv->drm,
343 			    "PSR support not currently available for this panel\n");
344 		return;
345 	}
346 
347 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
348 		drm_dbg_kms(&dev_priv->drm,
349 			    "Panel lacks power state control, PSR cannot be enabled\n");
350 		return;
351 	}
352 
353 	intel_dp->psr.sink_support = true;
354 	intel_dp->psr.sink_sync_latency =
355 		intel_dp_get_sink_sync_latency(intel_dp);
356 
357 	if (DISPLAY_VER(dev_priv) >= 9 &&
358 	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
359 		bool y_req = intel_dp->psr_dpcd[1] &
360 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
361 		bool alpm = intel_dp_get_alpm_status(intel_dp);
362 
363 		/*
364 		 * All panels that supports PSR version 03h (PSR2 +
365 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
366 		 * only sure that it is going to be used when required by the
367 		 * panel. This way panel is capable to do selective update
368 		 * without a aux frame sync.
369 		 *
370 		 * To support PSR version 02h and PSR version 03h without
371 		 * Y-coordinate requirement panels we would need to enable
372 		 * GTC first.
373 		 */
374 		intel_dp->psr.sink_psr2_support = y_req && alpm;
375 		drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
376 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
377 
378 		if (intel_dp->psr.sink_psr2_support) {
379 			intel_dp->psr.colorimetry_support =
380 				intel_dp_get_colorimetry_status(intel_dp);
381 			intel_dp_get_su_granularity(intel_dp);
382 		}
383 	}
384 }
385 
386 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
387 {
388 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
389 	u8 dpcd_val = DP_PSR_ENABLE;
390 
391 	/* Enable ALPM at sink for psr2 */
392 	if (intel_dp->psr.psr2_enabled) {
393 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
394 				   DP_ALPM_ENABLE |
395 				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
396 
397 		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
398 	} else {
399 		if (intel_dp->psr.link_standby)
400 			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
401 
402 		if (DISPLAY_VER(dev_priv) >= 8)
403 			dpcd_val |= DP_PSR_CRC_VERIFICATION;
404 	}
405 
406 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
407 		dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
408 
409 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
410 
411 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
412 }
413 
414 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
415 {
416 	struct intel_connector *connector = intel_dp->attached_connector;
417 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
418 	u32 val = 0;
419 
420 	if (DISPLAY_VER(dev_priv) >= 11)
421 		val |= EDP_PSR_TP4_TIME_0US;
422 
423 	if (dev_priv->params.psr_safest_params) {
424 		val |= EDP_PSR_TP1_TIME_2500us;
425 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
426 		goto check_tp3_sel;
427 	}
428 
429 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
430 		val |= EDP_PSR_TP1_TIME_0us;
431 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
432 		val |= EDP_PSR_TP1_TIME_100us;
433 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
434 		val |= EDP_PSR_TP1_TIME_500us;
435 	else
436 		val |= EDP_PSR_TP1_TIME_2500us;
437 
438 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
439 		val |= EDP_PSR_TP2_TP3_TIME_0us;
440 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
441 		val |= EDP_PSR_TP2_TP3_TIME_100us;
442 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
443 		val |= EDP_PSR_TP2_TP3_TIME_500us;
444 	else
445 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
446 
447 check_tp3_sel:
448 	if (intel_dp_source_supports_tps3(dev_priv) &&
449 	    drm_dp_tps3_supported(intel_dp->dpcd))
450 		val |= EDP_PSR_TP1_TP3_SEL;
451 	else
452 		val |= EDP_PSR_TP1_TP2_SEL;
453 
454 	return val;
455 }
456 
457 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
458 {
459 	struct intel_connector *connector = intel_dp->attached_connector;
460 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
461 	int idle_frames;
462 
463 	/* Let's use 6 as the minimum to cover all known cases including the
464 	 * off-by-one issue that HW has in some cases.
465 	 */
466 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
467 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
468 
469 	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
470 		idle_frames = 0xf;
471 
472 	return idle_frames;
473 }
474 
475 static void hsw_activate_psr1(struct intel_dp *intel_dp)
476 {
477 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
478 	u32 max_sleep_time = 0x1f;
479 	u32 val = EDP_PSR_ENABLE;
480 
481 	val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
482 
483 	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
484 	if (IS_HASWELL(dev_priv))
485 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
486 
487 	if (intel_dp->psr.link_standby)
488 		val |= EDP_PSR_LINK_STANDBY;
489 
490 	val |= intel_psr1_get_tp_time(intel_dp);
491 
492 	if (DISPLAY_VER(dev_priv) >= 8)
493 		val |= EDP_PSR_CRC_ENABLE;
494 
495 	val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
496 		EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
497 	intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
498 }
499 
500 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
501 {
502 	struct intel_connector *connector = intel_dp->attached_connector;
503 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
504 	u32 val = 0;
505 
506 	if (dev_priv->params.psr_safest_params)
507 		return EDP_PSR2_TP2_TIME_2500us;
508 
509 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
510 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
511 		val |= EDP_PSR2_TP2_TIME_50us;
512 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
513 		val |= EDP_PSR2_TP2_TIME_100us;
514 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
515 		val |= EDP_PSR2_TP2_TIME_500us;
516 	else
517 		val |= EDP_PSR2_TP2_TIME_2500us;
518 
519 	return val;
520 }
521 
522 static int psr2_block_count_lines(struct intel_dp *intel_dp)
523 {
524 	return intel_dp->psr.io_wake_lines < 9 &&
525 		intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
526 }
527 
528 static int psr2_block_count(struct intel_dp *intel_dp)
529 {
530 	return psr2_block_count_lines(intel_dp) / 4;
531 }
532 
533 static void hsw_activate_psr2(struct intel_dp *intel_dp)
534 {
535 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
536 	u32 val = EDP_PSR2_ENABLE;
537 
538 	val |= psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
539 
540 	if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
541 		val |= EDP_SU_TRACK_ENABLE;
542 
543 	if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
544 		val |= EDP_Y_COORDINATE_ENABLE;
545 
546 	val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
547 	val |= intel_psr2_get_tp_time(intel_dp);
548 
549 	if (DISPLAY_VER(dev_priv) >= 12) {
550 		if (psr2_block_count(intel_dp) > 2)
551 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
552 		else
553 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
554 	}
555 
556 	/* Wa_22012278275:adl-p */
557 	if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
558 		static const u8 map[] = {
559 			2, /* 5 lines */
560 			1, /* 6 lines */
561 			0, /* 7 lines */
562 			3, /* 8 lines */
563 			6, /* 9 lines */
564 			5, /* 10 lines */
565 			4, /* 11 lines */
566 			7, /* 12 lines */
567 		};
568 		/*
569 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
570 		 * comments bellow for more information
571 		 */
572 		u32 tmp;
573 
574 		tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
575 		tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
576 		val |= tmp;
577 
578 		tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
579 		tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
580 		val |= tmp;
581 	} else if (DISPLAY_VER(dev_priv) >= 12) {
582 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
583 		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
584 	} else if (DISPLAY_VER(dev_priv) >= 9) {
585 		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
586 		val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
587 	}
588 
589 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
590 		val |= EDP_PSR2_SU_SDP_SCANLINE;
591 
592 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
593 		u32 tmp;
594 
595 		tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
596 		drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
597 	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
598 		intel_de_write(dev_priv,
599 			       PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
600 	}
601 
602 	/*
603 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
604 	 * recommending keep this bit unset while PSR2 is enabled.
605 	 */
606 	intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
607 
608 	intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
609 }
610 
611 static bool
612 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
613 {
614 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
615 		return trans == TRANSCODER_A || trans == TRANSCODER_B;
616 	else if (DISPLAY_VER(dev_priv) >= 12)
617 		return trans == TRANSCODER_A;
618 	else
619 		return trans == TRANSCODER_EDP;
620 }
621 
622 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
623 {
624 	if (!cstate || !cstate->hw.active)
625 		return 0;
626 
627 	return DIV_ROUND_UP(1000 * 1000,
628 			    drm_mode_vrefresh(&cstate->hw.adjusted_mode));
629 }
630 
631 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
632 				     u32 idle_frames)
633 {
634 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
635 
636 	idle_frames <<=  EDP_PSR2_IDLE_FRAME_SHIFT;
637 	intel_de_rmw(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder),
638 		     EDP_PSR2_IDLE_FRAME_MASK, idle_frames);
639 }
640 
641 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
642 {
643 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
644 
645 	psr2_program_idle_frames(intel_dp, 0);
646 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
647 }
648 
649 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
650 {
651 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
652 
653 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
654 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
655 }
656 
657 static void tgl_dc3co_disable_work(struct work_struct *work)
658 {
659 	struct intel_dp *intel_dp =
660 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
661 
662 	mutex_lock(&intel_dp->psr.lock);
663 	/* If delayed work is pending, it is not idle */
664 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
665 		goto unlock;
666 
667 	tgl_psr2_disable_dc3co(intel_dp);
668 unlock:
669 	mutex_unlock(&intel_dp->psr.lock);
670 }
671 
672 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
673 {
674 	if (!intel_dp->psr.dc3co_exitline)
675 		return;
676 
677 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
678 	/* Before PSR2 exit disallow dc3co*/
679 	tgl_psr2_disable_dc3co(intel_dp);
680 }
681 
682 static bool
683 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
684 			      struct intel_crtc_state *crtc_state)
685 {
686 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
687 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
688 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
689 	enum port port = dig_port->base.port;
690 
691 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
692 		return pipe <= PIPE_B && port <= PORT_B;
693 	else
694 		return pipe == PIPE_A && port == PORT_A;
695 }
696 
697 static void
698 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
699 				  struct intel_crtc_state *crtc_state)
700 {
701 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
702 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
703 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
704 	u32 exit_scanlines;
705 
706 	/*
707 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
708 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
709 	 * is applied. B.Specs:49196
710 	 */
711 	return;
712 
713 	/*
714 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
715 	 * TODO: when the issue is addressed, this restriction should be removed.
716 	 */
717 	if (crtc_state->enable_psr2_sel_fetch)
718 		return;
719 
720 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
721 		return;
722 
723 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
724 		return;
725 
726 	/* Wa_16011303918:adl-p */
727 	if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
728 		return;
729 
730 	/*
731 	 * DC3CO Exit time 200us B.Spec 49196
732 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
733 	 */
734 	exit_scanlines =
735 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
736 
737 	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
738 		return;
739 
740 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
741 }
742 
743 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
744 					      struct intel_crtc_state *crtc_state)
745 {
746 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
747 
748 	if (!dev_priv->params.enable_psr2_sel_fetch &&
749 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
750 		drm_dbg_kms(&dev_priv->drm,
751 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
752 		return false;
753 	}
754 
755 	if (crtc_state->uapi.async_flip) {
756 		drm_dbg_kms(&dev_priv->drm,
757 			    "PSR2 sel fetch not enabled, async flip enabled\n");
758 		return false;
759 	}
760 
761 	return crtc_state->enable_psr2_sel_fetch = true;
762 }
763 
764 static bool psr2_granularity_check(struct intel_dp *intel_dp,
765 				   struct intel_crtc_state *crtc_state)
766 {
767 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
768 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
769 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
770 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
771 	u16 y_granularity = 0;
772 
773 	/* PSR2 HW only send full lines so we only need to validate the width */
774 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
775 		return false;
776 
777 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
778 		return false;
779 
780 	/* HW tracking is only aligned to 4 lines */
781 	if (!crtc_state->enable_psr2_sel_fetch)
782 		return intel_dp->psr.su_y_granularity == 4;
783 
784 	/*
785 	 * adl_p and mtl platforms have 1 line granularity.
786 	 * For other platforms with SW tracking we can adjust the y coordinates
787 	 * to match sink requirement if multiple of 4.
788 	 */
789 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
790 		y_granularity = intel_dp->psr.su_y_granularity;
791 	else if (intel_dp->psr.su_y_granularity <= 2)
792 		y_granularity = 4;
793 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
794 		y_granularity = intel_dp->psr.su_y_granularity;
795 
796 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
797 		return false;
798 
799 	if (crtc_state->dsc.compression_enable &&
800 	    vdsc_cfg->slice_height % y_granularity)
801 		return false;
802 
803 	crtc_state->su_y_granularity = y_granularity;
804 	return true;
805 }
806 
807 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
808 							struct intel_crtc_state *crtc_state)
809 {
810 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
811 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
812 	u32 hblank_total, hblank_ns, req_ns;
813 
814 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
815 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
816 
817 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
818 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
819 
820 	if ((hblank_ns - req_ns) > 100)
821 		return true;
822 
823 	/* Not supported <13 / Wa_22012279113:adl-p */
824 	if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
825 		return false;
826 
827 	crtc_state->req_psr2_sdp_prior_scanline = true;
828 	return true;
829 }
830 
831 static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
832 				     struct intel_crtc_state *crtc_state)
833 {
834 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
835 	int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
836 	u8 max_wake_lines;
837 
838 	if (DISPLAY_VER(i915) >= 12) {
839 		io_wake_time = 42;
840 		/*
841 		 * According to Bspec it's 42us, but based on testing
842 		 * it is not enough -> use 45 us.
843 		 */
844 		fast_wake_time = 45;
845 		max_wake_lines = 12;
846 	} else {
847 		io_wake_time = 50;
848 		fast_wake_time = 32;
849 		max_wake_lines = 8;
850 	}
851 
852 	io_wake_lines = intel_usecs_to_scanlines(
853 		&crtc_state->uapi.adjusted_mode, io_wake_time);
854 	fast_wake_lines = intel_usecs_to_scanlines(
855 		&crtc_state->uapi.adjusted_mode, fast_wake_time);
856 
857 	if (io_wake_lines > max_wake_lines ||
858 	    fast_wake_lines > max_wake_lines)
859 		return false;
860 
861 	if (i915->params.psr_safest_params)
862 		io_wake_lines = fast_wake_lines = max_wake_lines;
863 
864 	/* According to Bspec lower limit should be set as 7 lines. */
865 	intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
866 	intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
867 
868 	return true;
869 }
870 
871 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
872 				    struct intel_crtc_state *crtc_state)
873 {
874 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
875 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
876 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
877 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
878 
879 	if (!intel_dp->psr.sink_psr2_support)
880 		return false;
881 
882 	/* JSL and EHL only supports eDP 1.3 */
883 	if (IS_JSL_EHL(dev_priv)) {
884 		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
885 		return false;
886 	}
887 
888 	/* Wa_16011181250 */
889 	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
890 	    IS_DG2(dev_priv)) {
891 		drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
892 		return false;
893 	}
894 
895 	if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
896 		drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
897 		return false;
898 	}
899 
900 	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
901 		drm_dbg_kms(&dev_priv->drm,
902 			    "PSR2 not supported in transcoder %s\n",
903 			    transcoder_name(crtc_state->cpu_transcoder));
904 		return false;
905 	}
906 
907 	if (!psr2_global_enabled(intel_dp)) {
908 		drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
909 		return false;
910 	}
911 
912 	/*
913 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
914 	 * resolution requires DSC to be enabled, priority is given to DSC
915 	 * over PSR2.
916 	 */
917 	if (crtc_state->dsc.compression_enable &&
918 	    (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
919 		drm_dbg_kms(&dev_priv->drm,
920 			    "PSR2 cannot be enabled since DSC is enabled\n");
921 		return false;
922 	}
923 
924 	if (crtc_state->crc_enabled) {
925 		drm_dbg_kms(&dev_priv->drm,
926 			    "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
927 		return false;
928 	}
929 
930 	if (DISPLAY_VER(dev_priv) >= 12) {
931 		psr_max_h = 5120;
932 		psr_max_v = 3200;
933 		max_bpp = 30;
934 	} else if (DISPLAY_VER(dev_priv) >= 10) {
935 		psr_max_h = 4096;
936 		psr_max_v = 2304;
937 		max_bpp = 24;
938 	} else if (DISPLAY_VER(dev_priv) == 9) {
939 		psr_max_h = 3640;
940 		psr_max_v = 2304;
941 		max_bpp = 24;
942 	}
943 
944 	if (crtc_state->pipe_bpp > max_bpp) {
945 		drm_dbg_kms(&dev_priv->drm,
946 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
947 			    crtc_state->pipe_bpp, max_bpp);
948 		return false;
949 	}
950 
951 	/* Wa_16011303918:adl-p */
952 	if (crtc_state->vrr.enable &&
953 	    IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
954 		drm_dbg_kms(&dev_priv->drm,
955 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
956 		return false;
957 	}
958 
959 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
960 		drm_dbg_kms(&dev_priv->drm,
961 			    "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
962 		return false;
963 	}
964 
965 	if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
966 		drm_dbg_kms(&dev_priv->drm,
967 			    "PSR2 not enabled, Unable to use long enough wake times\n");
968 		return false;
969 	}
970 
971 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
972 	if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
973 	    crtc_state->hw.adjusted_mode.crtc_vblank_start <
974 	    psr2_block_count_lines(intel_dp)) {
975 		drm_dbg_kms(&dev_priv->drm,
976 			    "PSR2 not enabled, too short vblank time\n");
977 		return false;
978 	}
979 
980 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
981 		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
982 		    !HAS_PSR_HW_TRACKING(dev_priv)) {
983 			drm_dbg_kms(&dev_priv->drm,
984 				    "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
985 			return false;
986 		}
987 	}
988 
989 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
990 		drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
991 		goto unsupported;
992 	}
993 
994 	if (!crtc_state->enable_psr2_sel_fetch &&
995 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
996 		drm_dbg_kms(&dev_priv->drm,
997 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
998 			    crtc_hdisplay, crtc_vdisplay,
999 			    psr_max_h, psr_max_v);
1000 		goto unsupported;
1001 	}
1002 
1003 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1004 	return true;
1005 
1006 unsupported:
1007 	crtc_state->enable_psr2_sel_fetch = false;
1008 	return false;
1009 }
1010 
1011 void intel_psr_compute_config(struct intel_dp *intel_dp,
1012 			      struct intel_crtc_state *crtc_state,
1013 			      struct drm_connector_state *conn_state)
1014 {
1015 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1016 	const struct drm_display_mode *adjusted_mode =
1017 		&crtc_state->hw.adjusted_mode;
1018 	int psr_setup_time;
1019 
1020 	/*
1021 	 * Current PSR panels don't work reliably with VRR enabled
1022 	 * So if VRR is enabled, do not enable PSR.
1023 	 */
1024 	if (crtc_state->vrr.enable)
1025 		return;
1026 
1027 	if (!CAN_PSR(intel_dp))
1028 		return;
1029 
1030 	if (!psr_global_enabled(intel_dp)) {
1031 		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1032 		return;
1033 	}
1034 
1035 	if (intel_dp->psr.sink_not_reliable) {
1036 		drm_dbg_kms(&dev_priv->drm,
1037 			    "PSR sink implementation is not reliable\n");
1038 		return;
1039 	}
1040 
1041 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1042 		drm_dbg_kms(&dev_priv->drm,
1043 			    "PSR condition failed: Interlaced mode enabled\n");
1044 		return;
1045 	}
1046 
1047 	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1048 	if (psr_setup_time < 0) {
1049 		drm_dbg_kms(&dev_priv->drm,
1050 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1051 			    intel_dp->psr_dpcd[1]);
1052 		return;
1053 	}
1054 
1055 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1056 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1057 		drm_dbg_kms(&dev_priv->drm,
1058 			    "PSR condition failed: PSR setup time (%d us) too long\n",
1059 			    psr_setup_time);
1060 		return;
1061 	}
1062 
1063 	crtc_state->has_psr = true;
1064 	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1065 
1066 	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1067 	intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1068 				     &crtc_state->psr_vsc);
1069 }
1070 
1071 void intel_psr_get_config(struct intel_encoder *encoder,
1072 			  struct intel_crtc_state *pipe_config)
1073 {
1074 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1075 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1076 	struct intel_dp *intel_dp;
1077 	u32 val;
1078 
1079 	if (!dig_port)
1080 		return;
1081 
1082 	intel_dp = &dig_port->dp;
1083 	if (!CAN_PSR(intel_dp))
1084 		return;
1085 
1086 	mutex_lock(&intel_dp->psr.lock);
1087 	if (!intel_dp->psr.enabled)
1088 		goto unlock;
1089 
1090 	/*
1091 	 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1092 	 * enabled/disabled because of frontbuffer tracking and others.
1093 	 */
1094 	pipe_config->has_psr = true;
1095 	pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1096 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1097 
1098 	if (!intel_dp->psr.psr2_enabled)
1099 		goto unlock;
1100 
1101 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1102 		val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
1103 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1104 			pipe_config->enable_psr2_sel_fetch = true;
1105 	}
1106 
1107 	if (DISPLAY_VER(dev_priv) >= 12) {
1108 		val = intel_de_read(dev_priv, TRANS_EXITLINE(intel_dp->psr.transcoder));
1109 		val &= EXITLINE_MASK;
1110 		pipe_config->dc3co_exitline = val;
1111 	}
1112 unlock:
1113 	mutex_unlock(&intel_dp->psr.lock);
1114 }
1115 
1116 static void intel_psr_activate(struct intel_dp *intel_dp)
1117 {
1118 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1119 	enum transcoder transcoder = intel_dp->psr.transcoder;
1120 
1121 	if (transcoder_has_psr2(dev_priv, transcoder))
1122 		drm_WARN_ON(&dev_priv->drm,
1123 			    intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
1124 
1125 	drm_WARN_ON(&dev_priv->drm,
1126 		    intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
1127 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1128 	lockdep_assert_held(&intel_dp->psr.lock);
1129 
1130 	/* psr1 and psr2 are mutually exclusive.*/
1131 	if (intel_dp->psr.psr2_enabled)
1132 		hsw_activate_psr2(intel_dp);
1133 	else
1134 		hsw_activate_psr1(intel_dp);
1135 
1136 	intel_dp->psr.active = true;
1137 }
1138 
1139 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1140 {
1141 	switch (intel_dp->psr.pipe) {
1142 	case PIPE_A:
1143 		return LATENCY_REPORTING_REMOVED_PIPE_A;
1144 	case PIPE_B:
1145 		return LATENCY_REPORTING_REMOVED_PIPE_B;
1146 	case PIPE_C:
1147 		return LATENCY_REPORTING_REMOVED_PIPE_C;
1148 	case PIPE_D:
1149 		return LATENCY_REPORTING_REMOVED_PIPE_D;
1150 	default:
1151 		MISSING_CASE(intel_dp->psr.pipe);
1152 		return 0;
1153 	}
1154 }
1155 
1156 /*
1157  * Wa_16013835468
1158  * Wa_14015648006
1159  */
1160 static void wm_optimization_wa(struct intel_dp *intel_dp,
1161 			       const struct intel_crtc_state *crtc_state)
1162 {
1163 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1164 	bool set_wa_bit = false;
1165 
1166 	/* Wa_14015648006 */
1167 	if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1168 	    IS_DISPLAY_VER(dev_priv, 11, 13))
1169 		set_wa_bit |= crtc_state->wm_level_disabled;
1170 
1171 	/* Wa_16013835468 */
1172 	if (DISPLAY_VER(dev_priv) == 12)
1173 		set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1174 			crtc_state->hw.adjusted_mode.crtc_vdisplay;
1175 
1176 	if (set_wa_bit)
1177 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1178 			     0, wa_16013835468_bit_get(intel_dp));
1179 	else
1180 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1181 			     wa_16013835468_bit_get(intel_dp), 0);
1182 }
1183 
1184 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1185 				    const struct intel_crtc_state *crtc_state)
1186 {
1187 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1188 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1189 	u32 mask;
1190 
1191 	/*
1192 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1193 	 * mask LPSP to avoid dependency on other drivers that might block
1194 	 * runtime_pm besides preventing  other hw tracking issues now we
1195 	 * can rely on frontbuffer tracking.
1196 	 */
1197 	mask = EDP_PSR_DEBUG_MASK_MEMUP |
1198 	       EDP_PSR_DEBUG_MASK_HPD |
1199 	       EDP_PSR_DEBUG_MASK_LPSP |
1200 	       EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1201 
1202 	if (DISPLAY_VER(dev_priv) < 11)
1203 		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1204 
1205 	intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
1206 		       mask);
1207 
1208 	psr_irq_control(intel_dp);
1209 
1210 	/*
1211 	 * TODO: if future platforms supports DC3CO in more than one
1212 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1213 	 */
1214 	if (intel_dp->psr.dc3co_exitline)
1215 		intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1216 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1217 
1218 	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1219 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1220 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1221 			     IGNORE_PSR2_HW_TRACKING : 0);
1222 
1223 	/*
1224 	 * Wa_16013835468
1225 	 * Wa_14015648006
1226 	 */
1227 	wm_optimization_wa(intel_dp, crtc_state);
1228 
1229 	if (intel_dp->psr.psr2_enabled) {
1230 		if (DISPLAY_VER(dev_priv) == 9)
1231 			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1232 				     PSR2_VSC_ENABLE_PROG_HEADER |
1233 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1234 
1235 		/*
1236 		 * Wa_16014451276:adlp,mtl[a0,b0]
1237 		 * All supported adlp panels have 1-based X granularity, this may
1238 		 * cause issues if non-supported panels are used.
1239 		 */
1240 		if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1241 			intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
1242 				     ADLP_1_BASED_X_GRANULARITY);
1243 		else if (IS_ALDERLAKE_P(dev_priv))
1244 			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1245 				     ADLP_1_BASED_X_GRANULARITY);
1246 
1247 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1248 		if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1249 			intel_de_rmw(dev_priv,
1250 				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1251 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1252 		else if (IS_ALDERLAKE_P(dev_priv))
1253 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1254 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1255 	}
1256 }
1257 
1258 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1259 {
1260 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1261 	u32 val;
1262 
1263 	/*
1264 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1265 	 * will still keep the error set even after the reset done in the
1266 	 * irq_preinstall and irq_uninstall hooks.
1267 	 * And enabling in this situation cause the screen to freeze in the
1268 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1269 	 * to avoid any rendering problems.
1270 	 */
1271 	if (DISPLAY_VER(dev_priv) >= 12)
1272 		val = intel_de_read(dev_priv,
1273 				    TRANS_PSR_IIR(intel_dp->psr.transcoder));
1274 	else
1275 		val = intel_de_read(dev_priv, EDP_PSR_IIR);
1276 	val &= psr_irq_psr_error_bit_get(intel_dp);
1277 	if (val) {
1278 		intel_dp->psr.sink_not_reliable = true;
1279 		drm_dbg_kms(&dev_priv->drm,
1280 			    "PSR interruption error set, not enabling PSR\n");
1281 		return false;
1282 	}
1283 
1284 	return true;
1285 }
1286 
1287 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1288 				    const struct intel_crtc_state *crtc_state)
1289 {
1290 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1291 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1292 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1293 	struct intel_encoder *encoder = &dig_port->base;
1294 	u32 val;
1295 
1296 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1297 
1298 	intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1299 	intel_dp->psr.busy_frontbuffer_bits = 0;
1300 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1301 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1302 	/* DC5/DC6 requires at least 6 idle frames */
1303 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1304 	intel_dp->psr.dc3co_exit_delay = val;
1305 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1306 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1307 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1308 	intel_dp->psr.req_psr2_sdp_prior_scanline =
1309 		crtc_state->req_psr2_sdp_prior_scanline;
1310 
1311 	if (!psr_interrupt_error_check(intel_dp))
1312 		return;
1313 
1314 	drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1315 		    intel_dp->psr.psr2_enabled ? "2" : "1");
1316 	intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1317 	intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1318 	intel_psr_enable_sink(intel_dp);
1319 	intel_psr_enable_source(intel_dp, crtc_state);
1320 	intel_dp->psr.enabled = true;
1321 	intel_dp->psr.paused = false;
1322 
1323 	intel_psr_activate(intel_dp);
1324 }
1325 
1326 static void intel_psr_exit(struct intel_dp *intel_dp)
1327 {
1328 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1329 	u32 val;
1330 
1331 	if (!intel_dp->psr.active) {
1332 		if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
1333 			val = intel_de_read(dev_priv,
1334 					    EDP_PSR2_CTL(intel_dp->psr.transcoder));
1335 			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1336 		}
1337 
1338 		val = intel_de_read(dev_priv,
1339 				    EDP_PSR_CTL(intel_dp->psr.transcoder));
1340 		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1341 
1342 		return;
1343 	}
1344 
1345 	if (intel_dp->psr.psr2_enabled) {
1346 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1347 		val = intel_de_read(dev_priv,
1348 				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
1349 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1350 		val &= ~EDP_PSR2_ENABLE;
1351 		intel_de_write(dev_priv,
1352 			       EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
1353 	} else {
1354 		val = intel_de_read(dev_priv,
1355 				    EDP_PSR_CTL(intel_dp->psr.transcoder));
1356 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1357 		val &= ~EDP_PSR_ENABLE;
1358 		intel_de_write(dev_priv,
1359 			       EDP_PSR_CTL(intel_dp->psr.transcoder), val);
1360 	}
1361 	intel_dp->psr.active = false;
1362 }
1363 
1364 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1365 {
1366 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1367 	i915_reg_t psr_status;
1368 	u32 psr_status_mask;
1369 
1370 	if (intel_dp->psr.psr2_enabled) {
1371 		psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1372 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1373 	} else {
1374 		psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1375 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1376 	}
1377 
1378 	/* Wait till PSR is idle */
1379 	if (intel_de_wait_for_clear(dev_priv, psr_status,
1380 				    psr_status_mask, 2000))
1381 		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1382 }
1383 
1384 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1385 {
1386 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1387 	enum phy phy = intel_port_to_phy(dev_priv,
1388 					 dp_to_dig_port(intel_dp)->base.port);
1389 
1390 	lockdep_assert_held(&intel_dp->psr.lock);
1391 
1392 	if (!intel_dp->psr.enabled)
1393 		return;
1394 
1395 	drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1396 		    intel_dp->psr.psr2_enabled ? "2" : "1");
1397 
1398 	intel_psr_exit(intel_dp);
1399 	intel_psr_wait_exit_locked(intel_dp);
1400 
1401 	/*
1402 	 * Wa_16013835468
1403 	 * Wa_14015648006
1404 	 */
1405 	if (DISPLAY_VER(dev_priv) >= 11)
1406 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1407 			     wa_16013835468_bit_get(intel_dp), 0);
1408 
1409 	if (intel_dp->psr.psr2_enabled) {
1410 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1411 		if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1412 			intel_de_rmw(dev_priv,
1413 				     MTL_CLKGATE_DIS_TRANS(intel_dp->psr.transcoder),
1414 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1415 		else if (IS_ALDERLAKE_P(dev_priv))
1416 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1417 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1418 	}
1419 
1420 	intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1421 
1422 	/* Disable PSR on Sink */
1423 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1424 
1425 	if (intel_dp->psr.psr2_enabled)
1426 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1427 
1428 	intel_dp->psr.enabled = false;
1429 	intel_dp->psr.psr2_enabled = false;
1430 	intel_dp->psr.psr2_sel_fetch_enabled = false;
1431 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1432 }
1433 
1434 /**
1435  * intel_psr_disable - Disable PSR
1436  * @intel_dp: Intel DP
1437  * @old_crtc_state: old CRTC state
1438  *
1439  * This function needs to be called before disabling pipe.
1440  */
1441 void intel_psr_disable(struct intel_dp *intel_dp,
1442 		       const struct intel_crtc_state *old_crtc_state)
1443 {
1444 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1445 
1446 	if (!old_crtc_state->has_psr)
1447 		return;
1448 
1449 	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1450 		return;
1451 
1452 	mutex_lock(&intel_dp->psr.lock);
1453 
1454 	intel_psr_disable_locked(intel_dp);
1455 
1456 	mutex_unlock(&intel_dp->psr.lock);
1457 	cancel_work_sync(&intel_dp->psr.work);
1458 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1459 }
1460 
1461 /**
1462  * intel_psr_pause - Pause PSR
1463  * @intel_dp: Intel DP
1464  *
1465  * This function need to be called after enabling psr.
1466  */
1467 void intel_psr_pause(struct intel_dp *intel_dp)
1468 {
1469 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1470 	struct intel_psr *psr = &intel_dp->psr;
1471 
1472 	if (!CAN_PSR(intel_dp))
1473 		return;
1474 
1475 	mutex_lock(&psr->lock);
1476 
1477 	if (!psr->enabled) {
1478 		mutex_unlock(&psr->lock);
1479 		return;
1480 	}
1481 
1482 	/* If we ever hit this, we will need to add refcount to pause/resume */
1483 	drm_WARN_ON(&dev_priv->drm, psr->paused);
1484 
1485 	intel_psr_exit(intel_dp);
1486 	intel_psr_wait_exit_locked(intel_dp);
1487 	psr->paused = true;
1488 
1489 	mutex_unlock(&psr->lock);
1490 
1491 	cancel_work_sync(&psr->work);
1492 	cancel_delayed_work_sync(&psr->dc3co_work);
1493 }
1494 
1495 /**
1496  * intel_psr_resume - Resume PSR
1497  * @intel_dp: Intel DP
1498  *
1499  * This function need to be called after pausing psr.
1500  */
1501 void intel_psr_resume(struct intel_dp *intel_dp)
1502 {
1503 	struct intel_psr *psr = &intel_dp->psr;
1504 
1505 	if (!CAN_PSR(intel_dp))
1506 		return;
1507 
1508 	mutex_lock(&psr->lock);
1509 
1510 	if (!psr->paused)
1511 		goto unlock;
1512 
1513 	psr->paused = false;
1514 	intel_psr_activate(intel_dp);
1515 
1516 unlock:
1517 	mutex_unlock(&psr->lock);
1518 }
1519 
1520 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1521 {
1522 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1523 		PSR2_MAN_TRK_CTL_ENABLE;
1524 }
1525 
1526 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1527 {
1528 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1529 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1530 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1531 }
1532 
1533 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1534 {
1535 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1536 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1537 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1538 }
1539 
1540 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1541 {
1542 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1543 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1544 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1545 }
1546 
1547 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1548 {
1549 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1550 
1551 	if (intel_dp->psr.psr2_sel_fetch_enabled)
1552 		intel_de_write(dev_priv,
1553 			       PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
1554 			       man_trk_ctl_enable_bit_get(dev_priv) |
1555 			       man_trk_ctl_partial_frame_bit_get(dev_priv) |
1556 			       man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1557 			       man_trk_ctl_continuos_full_frame(dev_priv));
1558 
1559 	/*
1560 	 * Display WA #0884: skl+
1561 	 * This documented WA for bxt can be safely applied
1562 	 * broadly so we can force HW tracking to exit PSR
1563 	 * instead of disabling and re-enabling.
1564 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1565 	 * but it makes more sense write to the current active
1566 	 * pipe.
1567 	 *
1568 	 * This workaround do not exist for platforms with display 10 or newer
1569 	 * but testing proved that it works for up display 13, for newer
1570 	 * than that testing will be needed.
1571 	 */
1572 	intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1573 }
1574 
1575 void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
1576 					    const struct intel_crtc_state *crtc_state)
1577 {
1578 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1579 	enum pipe pipe = plane->pipe;
1580 
1581 	if (!crtc_state->enable_psr2_sel_fetch)
1582 		return;
1583 
1584 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
1585 }
1586 
1587 void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
1588 					    const struct intel_crtc_state *crtc_state,
1589 					    const struct intel_plane_state *plane_state)
1590 {
1591 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
1592 	enum pipe pipe = plane->pipe;
1593 
1594 	if (!crtc_state->enable_psr2_sel_fetch)
1595 		return;
1596 
1597 	if (plane->id == PLANE_CURSOR)
1598 		intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1599 				  plane_state->ctl);
1600 	else
1601 		intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1602 				  PLANE_SEL_FETCH_CTL_ENABLE);
1603 }
1604 
1605 void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
1606 					      const struct intel_crtc_state *crtc_state,
1607 					      const struct intel_plane_state *plane_state,
1608 					      int color_plane)
1609 {
1610 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1611 	enum pipe pipe = plane->pipe;
1612 	const struct drm_rect *clip;
1613 	u32 val;
1614 	int x, y;
1615 
1616 	if (!crtc_state->enable_psr2_sel_fetch)
1617 		return;
1618 
1619 	if (plane->id == PLANE_CURSOR)
1620 		return;
1621 
1622 	clip = &plane_state->psr2_sel_fetch_area;
1623 
1624 	val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1625 	val |= plane_state->uapi.dst.x1;
1626 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1627 
1628 	x = plane_state->view.color_plane[color_plane].x;
1629 
1630 	/*
1631 	 * From Bspec: UV surface Start Y Position = half of Y plane Y
1632 	 * start position.
1633 	 */
1634 	if (!color_plane)
1635 		y = plane_state->view.color_plane[color_plane].y + clip->y1;
1636 	else
1637 		y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
1638 
1639 	val = y << 16 | x;
1640 
1641 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1642 			  val);
1643 
1644 	/* Sizes are 0 based */
1645 	val = (drm_rect_height(clip) - 1) << 16;
1646 	val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1647 	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1648 }
1649 
1650 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1651 {
1652 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1653 	struct intel_encoder *encoder;
1654 
1655 	if (!crtc_state->enable_psr2_sel_fetch)
1656 		return;
1657 
1658 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1659 					     crtc_state->uapi.encoder_mask) {
1660 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1661 
1662 		lockdep_assert_held(&intel_dp->psr.lock);
1663 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1664 			return;
1665 		break;
1666 	}
1667 
1668 	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
1669 		       crtc_state->psr2_man_track_ctl);
1670 }
1671 
1672 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1673 				  struct drm_rect *clip, bool full_update)
1674 {
1675 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1676 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1677 	u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1678 
1679 	/* SF partial frame enable has to be set even on full update */
1680 	val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1681 
1682 	if (full_update) {
1683 		val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1684 		val |= man_trk_ctl_continuos_full_frame(dev_priv);
1685 		goto exit;
1686 	}
1687 
1688 	if (clip->y1 == -1)
1689 		goto exit;
1690 
1691 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
1692 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1693 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1694 	} else {
1695 		drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1696 
1697 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1698 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1699 	}
1700 exit:
1701 	crtc_state->psr2_man_track_ctl = val;
1702 }
1703 
1704 static void clip_area_update(struct drm_rect *overlap_damage_area,
1705 			     struct drm_rect *damage_area,
1706 			     struct drm_rect *pipe_src)
1707 {
1708 	if (!drm_rect_intersect(damage_area, pipe_src))
1709 		return;
1710 
1711 	if (overlap_damage_area->y1 == -1) {
1712 		overlap_damage_area->y1 = damage_area->y1;
1713 		overlap_damage_area->y2 = damage_area->y2;
1714 		return;
1715 	}
1716 
1717 	if (damage_area->y1 < overlap_damage_area->y1)
1718 		overlap_damage_area->y1 = damage_area->y1;
1719 
1720 	if (damage_area->y2 > overlap_damage_area->y2)
1721 		overlap_damage_area->y2 = damage_area->y2;
1722 }
1723 
1724 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1725 						struct drm_rect *pipe_clip)
1726 {
1727 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1728 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1729 	u16 y_alignment;
1730 
1731 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
1732 	if (crtc_state->dsc.compression_enable &&
1733 	    (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
1734 		y_alignment = vdsc_cfg->slice_height;
1735 	else
1736 		y_alignment = crtc_state->su_y_granularity;
1737 
1738 	pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
1739 	if (pipe_clip->y2 % y_alignment)
1740 		pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
1741 }
1742 
1743 /*
1744  * TODO: Not clear how to handle planes with negative position,
1745  * also planes are not updated if they have a negative X
1746  * position so for now doing a full update in this cases
1747  *
1748  * Plane scaling and rotation is not supported by selective fetch and both
1749  * properties can change without a modeset, so need to be check at every
1750  * atomic commit.
1751  */
1752 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
1753 {
1754 	if (plane_state->uapi.dst.y1 < 0 ||
1755 	    plane_state->uapi.dst.x1 < 0 ||
1756 	    plane_state->scaler_id >= 0 ||
1757 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
1758 		return false;
1759 
1760 	return true;
1761 }
1762 
1763 /*
1764  * Check for pipe properties that is not supported by selective fetch.
1765  *
1766  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
1767  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
1768  * enabled and going to the full update path.
1769  */
1770 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
1771 {
1772 	if (crtc_state->scaler_state.scaler_id >= 0)
1773 		return false;
1774 
1775 	return true;
1776 }
1777 
1778 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1779 				struct intel_crtc *crtc)
1780 {
1781 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1782 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1783 	struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1784 	struct intel_plane_state *new_plane_state, *old_plane_state;
1785 	struct intel_plane *plane;
1786 	bool full_update = false;
1787 	int i, ret;
1788 
1789 	if (!crtc_state->enable_psr2_sel_fetch)
1790 		return 0;
1791 
1792 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
1793 		full_update = true;
1794 		goto skip_sel_fetch_set_loop;
1795 	}
1796 
1797 	/*
1798 	 * Calculate minimal selective fetch area of each plane and calculate
1799 	 * the pipe damaged area.
1800 	 * In the next loop the plane selective fetch area will actually be set
1801 	 * using whole pipe damaged area.
1802 	 */
1803 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1804 					     new_plane_state, i) {
1805 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
1806 						      .x2 = INT_MAX };
1807 
1808 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
1809 			continue;
1810 
1811 		if (!new_plane_state->uapi.visible &&
1812 		    !old_plane_state->uapi.visible)
1813 			continue;
1814 
1815 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
1816 			full_update = true;
1817 			break;
1818 		}
1819 
1820 		/*
1821 		 * If visibility or plane moved, mark the whole plane area as
1822 		 * damaged as it needs to be complete redraw in the new and old
1823 		 * position.
1824 		 */
1825 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
1826 		    !drm_rect_equals(&new_plane_state->uapi.dst,
1827 				     &old_plane_state->uapi.dst)) {
1828 			if (old_plane_state->uapi.visible) {
1829 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
1830 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
1831 				clip_area_update(&pipe_clip, &damaged_area,
1832 						 &crtc_state->pipe_src);
1833 			}
1834 
1835 			if (new_plane_state->uapi.visible) {
1836 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
1837 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
1838 				clip_area_update(&pipe_clip, &damaged_area,
1839 						 &crtc_state->pipe_src);
1840 			}
1841 			continue;
1842 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
1843 			/* If alpha changed mark the whole plane area as damaged */
1844 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
1845 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
1846 			clip_area_update(&pipe_clip, &damaged_area,
1847 					 &crtc_state->pipe_src);
1848 			continue;
1849 		}
1850 
1851 		src = drm_plane_state_src(&new_plane_state->uapi);
1852 		drm_rect_fp_to_int(&src, &src);
1853 
1854 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
1855 						     &new_plane_state->uapi, &damaged_area))
1856 			continue;
1857 
1858 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
1859 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
1860 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
1861 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
1862 
1863 		clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
1864 	}
1865 
1866 	/*
1867 	 * TODO: For now we are just using full update in case
1868 	 * selective fetch area calculation fails. To optimize this we
1869 	 * should identify cases where this happens and fix the area
1870 	 * calculation for those.
1871 	 */
1872 	if (pipe_clip.y1 == -1) {
1873 		drm_info_once(&dev_priv->drm,
1874 			      "Selective fetch area calculation failed in pipe %c\n",
1875 			      pipe_name(crtc->pipe));
1876 		full_update = true;
1877 	}
1878 
1879 	if (full_update)
1880 		goto skip_sel_fetch_set_loop;
1881 
1882 	/* Wa_14014971492 */
1883 	if ((IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1884 	     IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
1885 	    crtc_state->splitter.enable)
1886 		pipe_clip.y1 = 0;
1887 
1888 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
1889 	if (ret)
1890 		return ret;
1891 
1892 	intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
1893 
1894 	/*
1895 	 * Now that we have the pipe damaged area check if it intersect with
1896 	 * every plane, if it does set the plane selective fetch area.
1897 	 */
1898 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1899 					     new_plane_state, i) {
1900 		struct drm_rect *sel_fetch_area, inter;
1901 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
1902 
1903 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
1904 		    !new_plane_state->uapi.visible)
1905 			continue;
1906 
1907 		inter = pipe_clip;
1908 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
1909 			continue;
1910 
1911 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
1912 			full_update = true;
1913 			break;
1914 		}
1915 
1916 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
1917 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
1918 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
1919 		crtc_state->update_planes |= BIT(plane->id);
1920 
1921 		/*
1922 		 * Sel_fetch_area is calculated for UV plane. Use
1923 		 * same area for Y plane as well.
1924 		 */
1925 		if (linked) {
1926 			struct intel_plane_state *linked_new_plane_state;
1927 			struct drm_rect *linked_sel_fetch_area;
1928 
1929 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
1930 			if (IS_ERR(linked_new_plane_state))
1931 				return PTR_ERR(linked_new_plane_state);
1932 
1933 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
1934 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
1935 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
1936 			crtc_state->update_planes |= BIT(linked->id);
1937 		}
1938 	}
1939 
1940 skip_sel_fetch_set_loop:
1941 	psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
1942 	return 0;
1943 }
1944 
1945 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
1946 				struct intel_crtc *crtc)
1947 {
1948 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1949 	const struct intel_crtc_state *old_crtc_state =
1950 		intel_atomic_get_old_crtc_state(state, crtc);
1951 	const struct intel_crtc_state *new_crtc_state =
1952 		intel_atomic_get_new_crtc_state(state, crtc);
1953 	struct intel_encoder *encoder;
1954 
1955 	if (!HAS_PSR(i915))
1956 		return;
1957 
1958 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
1959 					     old_crtc_state->uapi.encoder_mask) {
1960 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1961 		struct intel_psr *psr = &intel_dp->psr;
1962 		bool needs_to_disable = false;
1963 
1964 		mutex_lock(&psr->lock);
1965 
1966 		/*
1967 		 * Reasons to disable:
1968 		 * - PSR disabled in new state
1969 		 * - All planes will go inactive
1970 		 * - Changing between PSR versions
1971 		 * - Display WA #1136: skl, bxt
1972 		 */
1973 		needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
1974 		needs_to_disable |= !new_crtc_state->has_psr;
1975 		needs_to_disable |= !new_crtc_state->active_planes;
1976 		needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
1977 		needs_to_disable |= DISPLAY_VER(i915) < 11 &&
1978 			new_crtc_state->wm_level_disabled;
1979 
1980 		if (psr->enabled && needs_to_disable)
1981 			intel_psr_disable_locked(intel_dp);
1982 		else if (psr->enabled && new_crtc_state->wm_level_disabled)
1983 			/* Wa_14015648006 */
1984 			wm_optimization_wa(intel_dp, new_crtc_state);
1985 
1986 		mutex_unlock(&psr->lock);
1987 	}
1988 }
1989 
1990 static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
1991 					 const struct intel_crtc_state *crtc_state)
1992 {
1993 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1994 	struct intel_encoder *encoder;
1995 
1996 	if (!crtc_state->has_psr)
1997 		return;
1998 
1999 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2000 					     crtc_state->uapi.encoder_mask) {
2001 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2002 		struct intel_psr *psr = &intel_dp->psr;
2003 		bool keep_disabled = false;
2004 
2005 		mutex_lock(&psr->lock);
2006 
2007 		drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2008 
2009 		keep_disabled |= psr->sink_not_reliable;
2010 		keep_disabled |= !crtc_state->active_planes;
2011 
2012 		/* Display WA #1136: skl, bxt */
2013 		keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2014 			crtc_state->wm_level_disabled;
2015 
2016 		if (!psr->enabled && !keep_disabled)
2017 			intel_psr_enable_locked(intel_dp, crtc_state);
2018 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2019 			/* Wa_14015648006 */
2020 			wm_optimization_wa(intel_dp, crtc_state);
2021 
2022 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2023 		if (crtc_state->crc_enabled && psr->enabled)
2024 			psr_force_hw_tracking_exit(intel_dp);
2025 
2026 		mutex_unlock(&psr->lock);
2027 	}
2028 }
2029 
2030 void intel_psr_post_plane_update(const struct intel_atomic_state *state)
2031 {
2032 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2033 	struct intel_crtc_state *crtc_state;
2034 	struct intel_crtc *crtc;
2035 	int i;
2036 
2037 	if (!HAS_PSR(dev_priv))
2038 		return;
2039 
2040 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
2041 		_intel_psr_post_plane_update(state, crtc_state);
2042 }
2043 
2044 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2045 {
2046 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2047 
2048 	/*
2049 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2050 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2051 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2052 	 */
2053 	return intel_de_wait_for_clear(dev_priv,
2054 				       EDP_PSR2_STATUS(intel_dp->psr.transcoder),
2055 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2056 }
2057 
2058 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2059 {
2060 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2061 
2062 	/*
2063 	 * From bspec: Panel Self Refresh (BDW+)
2064 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2065 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2066 	 * defensive enough to cover everything.
2067 	 */
2068 	return intel_de_wait_for_clear(dev_priv,
2069 				       EDP_PSR_STATUS(intel_dp->psr.transcoder),
2070 				       EDP_PSR_STATUS_STATE_MASK, 50);
2071 }
2072 
2073 /**
2074  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2075  * @new_crtc_state: new CRTC state
2076  *
2077  * This function is expected to be called from pipe_update_start() where it is
2078  * not expected to race with PSR enable or disable.
2079  */
2080 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2081 {
2082 	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2083 	struct intel_encoder *encoder;
2084 
2085 	if (!new_crtc_state->has_psr)
2086 		return;
2087 
2088 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2089 					     new_crtc_state->uapi.encoder_mask) {
2090 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2091 		int ret;
2092 
2093 		lockdep_assert_held(&intel_dp->psr.lock);
2094 
2095 		if (!intel_dp->psr.enabled)
2096 			continue;
2097 
2098 		if (intel_dp->psr.psr2_enabled)
2099 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2100 		else
2101 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2102 
2103 		if (ret)
2104 			drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2105 	}
2106 }
2107 
2108 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2109 {
2110 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2111 	i915_reg_t reg;
2112 	u32 mask;
2113 	int err;
2114 
2115 	if (!intel_dp->psr.enabled)
2116 		return false;
2117 
2118 	if (intel_dp->psr.psr2_enabled) {
2119 		reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
2120 		mask = EDP_PSR2_STATUS_STATE_MASK;
2121 	} else {
2122 		reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
2123 		mask = EDP_PSR_STATUS_STATE_MASK;
2124 	}
2125 
2126 	mutex_unlock(&intel_dp->psr.lock);
2127 
2128 	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2129 	if (err)
2130 		drm_err(&dev_priv->drm,
2131 			"Timed out waiting for PSR Idle for re-enable\n");
2132 
2133 	/* After the unlocked wait, verify that PSR is still wanted! */
2134 	mutex_lock(&intel_dp->psr.lock);
2135 	return err == 0 && intel_dp->psr.enabled;
2136 }
2137 
2138 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2139 {
2140 	struct drm_connector_list_iter conn_iter;
2141 	struct drm_modeset_acquire_ctx ctx;
2142 	struct drm_atomic_state *state;
2143 	struct drm_connector *conn;
2144 	int err = 0;
2145 
2146 	state = drm_atomic_state_alloc(&dev_priv->drm);
2147 	if (!state)
2148 		return -ENOMEM;
2149 
2150 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2151 	state->acquire_ctx = &ctx;
2152 
2153 retry:
2154 
2155 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2156 	drm_for_each_connector_iter(conn, &conn_iter) {
2157 		struct drm_connector_state *conn_state;
2158 		struct drm_crtc_state *crtc_state;
2159 
2160 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2161 			continue;
2162 
2163 		conn_state = drm_atomic_get_connector_state(state, conn);
2164 		if (IS_ERR(conn_state)) {
2165 			err = PTR_ERR(conn_state);
2166 			break;
2167 		}
2168 
2169 		if (!conn_state->crtc)
2170 			continue;
2171 
2172 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2173 		if (IS_ERR(crtc_state)) {
2174 			err = PTR_ERR(crtc_state);
2175 			break;
2176 		}
2177 
2178 		/* Mark mode as changed to trigger a pipe->update() */
2179 		crtc_state->mode_changed = true;
2180 	}
2181 	drm_connector_list_iter_end(&conn_iter);
2182 
2183 	if (err == 0)
2184 		err = drm_atomic_commit(state);
2185 
2186 	if (err == -EDEADLK) {
2187 		drm_atomic_state_clear(state);
2188 		err = drm_modeset_backoff(&ctx);
2189 		if (!err)
2190 			goto retry;
2191 	}
2192 
2193 	drm_modeset_drop_locks(&ctx);
2194 	drm_modeset_acquire_fini(&ctx);
2195 	drm_atomic_state_put(state);
2196 
2197 	return err;
2198 }
2199 
2200 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2201 {
2202 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2203 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2204 	u32 old_mode;
2205 	int ret;
2206 
2207 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2208 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2209 		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2210 		return -EINVAL;
2211 	}
2212 
2213 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2214 	if (ret)
2215 		return ret;
2216 
2217 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2218 	intel_dp->psr.debug = val;
2219 
2220 	/*
2221 	 * Do it right away if it's already enabled, otherwise it will be done
2222 	 * when enabling the source.
2223 	 */
2224 	if (intel_dp->psr.enabled)
2225 		psr_irq_control(intel_dp);
2226 
2227 	mutex_unlock(&intel_dp->psr.lock);
2228 
2229 	if (old_mode != mode)
2230 		ret = intel_psr_fastset_force(dev_priv);
2231 
2232 	return ret;
2233 }
2234 
2235 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2236 {
2237 	struct intel_psr *psr = &intel_dp->psr;
2238 
2239 	intel_psr_disable_locked(intel_dp);
2240 	psr->sink_not_reliable = true;
2241 	/* let's make sure that sink is awaken */
2242 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2243 }
2244 
2245 static void intel_psr_work(struct work_struct *work)
2246 {
2247 	struct intel_dp *intel_dp =
2248 		container_of(work, typeof(*intel_dp), psr.work);
2249 
2250 	mutex_lock(&intel_dp->psr.lock);
2251 
2252 	if (!intel_dp->psr.enabled)
2253 		goto unlock;
2254 
2255 	if (READ_ONCE(intel_dp->psr.irq_aux_error))
2256 		intel_psr_handle_irq(intel_dp);
2257 
2258 	/*
2259 	 * We have to make sure PSR is ready for re-enable
2260 	 * otherwise it keeps disabled until next full enable/disable cycle.
2261 	 * PSR might take some time to get fully disabled
2262 	 * and be ready for re-enable.
2263 	 */
2264 	if (!__psr_wait_for_idle_locked(intel_dp))
2265 		goto unlock;
2266 
2267 	/*
2268 	 * The delayed work can race with an invalidate hence we need to
2269 	 * recheck. Since psr_flush first clears this and then reschedules we
2270 	 * won't ever miss a flush when bailing out here.
2271 	 */
2272 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2273 		goto unlock;
2274 
2275 	intel_psr_activate(intel_dp);
2276 unlock:
2277 	mutex_unlock(&intel_dp->psr.lock);
2278 }
2279 
2280 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2281 {
2282 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2283 
2284 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2285 		u32 val;
2286 
2287 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2288 			/* Send one update otherwise lag is observed in screen */
2289 			intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2290 			return;
2291 		}
2292 
2293 		val = man_trk_ctl_enable_bit_get(dev_priv) |
2294 		      man_trk_ctl_partial_frame_bit_get(dev_priv) |
2295 		      man_trk_ctl_continuos_full_frame(dev_priv);
2296 		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), val);
2297 		intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2298 		intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2299 	} else {
2300 		intel_psr_exit(intel_dp);
2301 	}
2302 }
2303 
2304 /**
2305  * intel_psr_invalidate - Invalidate PSR
2306  * @dev_priv: i915 device
2307  * @frontbuffer_bits: frontbuffer plane tracking bits
2308  * @origin: which operation caused the invalidate
2309  *
2310  * Since the hardware frontbuffer tracking has gaps we need to integrate
2311  * with the software frontbuffer tracking. This function gets called every
2312  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2313  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2314  *
2315  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2316  */
2317 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2318 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
2319 {
2320 	struct intel_encoder *encoder;
2321 
2322 	if (origin == ORIGIN_FLIP)
2323 		return;
2324 
2325 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2326 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2327 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2328 
2329 		mutex_lock(&intel_dp->psr.lock);
2330 		if (!intel_dp->psr.enabled) {
2331 			mutex_unlock(&intel_dp->psr.lock);
2332 			continue;
2333 		}
2334 
2335 		pipe_frontbuffer_bits &=
2336 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2337 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2338 
2339 		if (pipe_frontbuffer_bits)
2340 			_psr_invalidate_handle(intel_dp);
2341 
2342 		mutex_unlock(&intel_dp->psr.lock);
2343 	}
2344 }
2345 /*
2346  * When we will be completely rely on PSR2 S/W tracking in future,
2347  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2348  * event also therefore tgl_dc3co_flush_locked() require to be changed
2349  * accordingly in future.
2350  */
2351 static void
2352 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2353 		       enum fb_op_origin origin)
2354 {
2355 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2356 	    !intel_dp->psr.active)
2357 		return;
2358 
2359 	/*
2360 	 * At every frontbuffer flush flip event modified delay of delayed work,
2361 	 * when delayed work schedules that means display has been idle.
2362 	 */
2363 	if (!(frontbuffer_bits &
2364 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2365 		return;
2366 
2367 	tgl_psr2_enable_dc3co(intel_dp);
2368 	mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
2369 			 intel_dp->psr.dc3co_exit_delay);
2370 }
2371 
2372 static void _psr_flush_handle(struct intel_dp *intel_dp)
2373 {
2374 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2375 
2376 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2377 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2378 			/* can we turn CFF off? */
2379 			if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2380 				u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2381 					man_trk_ctl_partial_frame_bit_get(dev_priv) |
2382 					man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2383 					man_trk_ctl_continuos_full_frame(dev_priv);
2384 
2385 				/*
2386 				 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2387 				 * updates. Still keep cff bit enabled as we don't have proper
2388 				 * SU configuration in case update is sent for any reason after
2389 				 * sff bit gets cleared by the HW on next vblank.
2390 				 */
2391 				intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
2392 					       val);
2393 				intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2394 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2395 			}
2396 		} else {
2397 			/*
2398 			 * continuous full frame is disabled, only a single full
2399 			 * frame is required
2400 			 */
2401 			psr_force_hw_tracking_exit(intel_dp);
2402 		}
2403 	} else {
2404 		psr_force_hw_tracking_exit(intel_dp);
2405 
2406 		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2407 			schedule_work(&intel_dp->psr.work);
2408 	}
2409 }
2410 
2411 /**
2412  * intel_psr_flush - Flush PSR
2413  * @dev_priv: i915 device
2414  * @frontbuffer_bits: frontbuffer plane tracking bits
2415  * @origin: which operation caused the flush
2416  *
2417  * Since the hardware frontbuffer tracking has gaps we need to integrate
2418  * with the software frontbuffer tracking. This function gets called every
2419  * time frontbuffer rendering has completed and flushed out to memory. PSR
2420  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2421  *
2422  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2423  */
2424 void intel_psr_flush(struct drm_i915_private *dev_priv,
2425 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
2426 {
2427 	struct intel_encoder *encoder;
2428 
2429 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2430 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2431 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2432 
2433 		mutex_lock(&intel_dp->psr.lock);
2434 		if (!intel_dp->psr.enabled) {
2435 			mutex_unlock(&intel_dp->psr.lock);
2436 			continue;
2437 		}
2438 
2439 		pipe_frontbuffer_bits &=
2440 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2441 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2442 
2443 		/*
2444 		 * If the PSR is paused by an explicit intel_psr_paused() call,
2445 		 * we have to ensure that the PSR is not activated until
2446 		 * intel_psr_resume() is called.
2447 		 */
2448 		if (intel_dp->psr.paused)
2449 			goto unlock;
2450 
2451 		if (origin == ORIGIN_FLIP ||
2452 		    (origin == ORIGIN_CURSOR_UPDATE &&
2453 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
2454 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2455 			goto unlock;
2456 		}
2457 
2458 		if (pipe_frontbuffer_bits == 0)
2459 			goto unlock;
2460 
2461 		/* By definition flush = invalidate + flush */
2462 		_psr_flush_handle(intel_dp);
2463 unlock:
2464 		mutex_unlock(&intel_dp->psr.lock);
2465 	}
2466 }
2467 
2468 /**
2469  * intel_psr_init - Init basic PSR work and mutex.
2470  * @intel_dp: Intel DP
2471  *
2472  * This function is called after the initializing connector.
2473  * (the initializing of connector treats the handling of connector capabilities)
2474  * And it initializes basic PSR stuff for each DP Encoder.
2475  */
2476 void intel_psr_init(struct intel_dp *intel_dp)
2477 {
2478 	struct intel_connector *connector = intel_dp->attached_connector;
2479 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2480 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2481 
2482 	if (!HAS_PSR(dev_priv))
2483 		return;
2484 
2485 	/*
2486 	 * HSW spec explicitly says PSR is tied to port A.
2487 	 * BDW+ platforms have a instance of PSR registers per transcoder but
2488 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2489 	 * than eDP one.
2490 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2491 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2492 	 * But GEN12 supports a instance of PSR registers per transcoder.
2493 	 */
2494 	if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2495 		drm_dbg_kms(&dev_priv->drm,
2496 			    "PSR condition failed: Port not supported\n");
2497 		return;
2498 	}
2499 
2500 	intel_dp->psr.source_support = true;
2501 
2502 	/* Set link_standby x link_off defaults */
2503 	if (DISPLAY_VER(dev_priv) < 12)
2504 		/* For new platforms up to TGL let's respect VBT back again */
2505 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2506 
2507 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2508 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2509 	mutex_init(&intel_dp->psr.lock);
2510 }
2511 
2512 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2513 					   u8 *status, u8 *error_status)
2514 {
2515 	struct drm_dp_aux *aux = &intel_dp->aux;
2516 	int ret;
2517 
2518 	ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2519 	if (ret != 1)
2520 		return ret;
2521 
2522 	ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
2523 	if (ret != 1)
2524 		return ret;
2525 
2526 	*status = *status & DP_PSR_SINK_STATE_MASK;
2527 
2528 	return 0;
2529 }
2530 
2531 static void psr_alpm_check(struct intel_dp *intel_dp)
2532 {
2533 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2534 	struct drm_dp_aux *aux = &intel_dp->aux;
2535 	struct intel_psr *psr = &intel_dp->psr;
2536 	u8 val;
2537 	int r;
2538 
2539 	if (!psr->psr2_enabled)
2540 		return;
2541 
2542 	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2543 	if (r != 1) {
2544 		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2545 		return;
2546 	}
2547 
2548 	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2549 		intel_psr_disable_locked(intel_dp);
2550 		psr->sink_not_reliable = true;
2551 		drm_dbg_kms(&dev_priv->drm,
2552 			    "ALPM lock timeout error, disabling PSR\n");
2553 
2554 		/* Clearing error */
2555 		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2556 	}
2557 }
2558 
2559 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2560 {
2561 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2562 	struct intel_psr *psr = &intel_dp->psr;
2563 	u8 val;
2564 	int r;
2565 
2566 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2567 	if (r != 1) {
2568 		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2569 		return;
2570 	}
2571 
2572 	if (val & DP_PSR_CAPS_CHANGE) {
2573 		intel_psr_disable_locked(intel_dp);
2574 		psr->sink_not_reliable = true;
2575 		drm_dbg_kms(&dev_priv->drm,
2576 			    "Sink PSR capability changed, disabling PSR\n");
2577 
2578 		/* Clearing it */
2579 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2580 	}
2581 }
2582 
2583 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2584 {
2585 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2586 	struct intel_psr *psr = &intel_dp->psr;
2587 	u8 status, error_status;
2588 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2589 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2590 			  DP_PSR_LINK_CRC_ERROR;
2591 
2592 	if (!CAN_PSR(intel_dp))
2593 		return;
2594 
2595 	mutex_lock(&psr->lock);
2596 
2597 	if (!psr->enabled)
2598 		goto exit;
2599 
2600 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2601 		drm_err(&dev_priv->drm,
2602 			"Error reading PSR status or error status\n");
2603 		goto exit;
2604 	}
2605 
2606 	if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2607 		intel_psr_disable_locked(intel_dp);
2608 		psr->sink_not_reliable = true;
2609 	}
2610 
2611 	if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2612 		drm_dbg_kms(&dev_priv->drm,
2613 			    "PSR sink internal error, disabling PSR\n");
2614 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2615 		drm_dbg_kms(&dev_priv->drm,
2616 			    "PSR RFB storage error, disabling PSR\n");
2617 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2618 		drm_dbg_kms(&dev_priv->drm,
2619 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
2620 	if (error_status & DP_PSR_LINK_CRC_ERROR)
2621 		drm_dbg_kms(&dev_priv->drm,
2622 			    "PSR Link CRC error, disabling PSR\n");
2623 
2624 	if (error_status & ~errors)
2625 		drm_err(&dev_priv->drm,
2626 			"PSR_ERROR_STATUS unhandled errors %x\n",
2627 			error_status & ~errors);
2628 	/* clear status register */
2629 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2630 
2631 	psr_alpm_check(intel_dp);
2632 	psr_capability_changed_check(intel_dp);
2633 
2634 exit:
2635 	mutex_unlock(&psr->lock);
2636 }
2637 
2638 bool intel_psr_enabled(struct intel_dp *intel_dp)
2639 {
2640 	bool ret;
2641 
2642 	if (!CAN_PSR(intel_dp))
2643 		return false;
2644 
2645 	mutex_lock(&intel_dp->psr.lock);
2646 	ret = intel_dp->psr.enabled;
2647 	mutex_unlock(&intel_dp->psr.lock);
2648 
2649 	return ret;
2650 }
2651 
2652 /**
2653  * intel_psr_lock - grab PSR lock
2654  * @crtc_state: the crtc state
2655  *
2656  * This is initially meant to be used by around CRTC update, when
2657  * vblank sensitive registers are updated and we need grab the lock
2658  * before it to avoid vblank evasion.
2659  */
2660 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2661 {
2662 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2663 	struct intel_encoder *encoder;
2664 
2665 	if (!crtc_state->has_psr)
2666 		return;
2667 
2668 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2669 					     crtc_state->uapi.encoder_mask) {
2670 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2671 
2672 		mutex_lock(&intel_dp->psr.lock);
2673 		break;
2674 	}
2675 }
2676 
2677 /**
2678  * intel_psr_unlock - release PSR lock
2679  * @crtc_state: the crtc state
2680  *
2681  * Release the PSR lock that was held during pipe update.
2682  */
2683 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2684 {
2685 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2686 	struct intel_encoder *encoder;
2687 
2688 	if (!crtc_state->has_psr)
2689 		return;
2690 
2691 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2692 					     crtc_state->uapi.encoder_mask) {
2693 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2694 
2695 		mutex_unlock(&intel_dp->psr.lock);
2696 		break;
2697 	}
2698 }
2699 
2700 static void
2701 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
2702 {
2703 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2704 	const char *status = "unknown";
2705 	u32 val, status_val;
2706 
2707 	if (intel_dp->psr.psr2_enabled) {
2708 		static const char * const live_status[] = {
2709 			"IDLE",
2710 			"CAPTURE",
2711 			"CAPTURE_FS",
2712 			"SLEEP",
2713 			"BUFON_FW",
2714 			"ML_UP",
2715 			"SU_STANDBY",
2716 			"FAST_SLEEP",
2717 			"DEEP_SLEEP",
2718 			"BUF_ON",
2719 			"TG_ON"
2720 		};
2721 		val = intel_de_read(dev_priv,
2722 				    EDP_PSR2_STATUS(intel_dp->psr.transcoder));
2723 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
2724 		if (status_val < ARRAY_SIZE(live_status))
2725 			status = live_status[status_val];
2726 	} else {
2727 		static const char * const live_status[] = {
2728 			"IDLE",
2729 			"SRDONACK",
2730 			"SRDENT",
2731 			"BUFOFF",
2732 			"BUFON",
2733 			"AUXACK",
2734 			"SRDOFFACK",
2735 			"SRDENT_ON",
2736 		};
2737 		val = intel_de_read(dev_priv,
2738 				    EDP_PSR_STATUS(intel_dp->psr.transcoder));
2739 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2740 			      EDP_PSR_STATUS_STATE_SHIFT;
2741 		if (status_val < ARRAY_SIZE(live_status))
2742 			status = live_status[status_val];
2743 	}
2744 
2745 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2746 }
2747 
2748 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
2749 {
2750 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2751 	struct intel_psr *psr = &intel_dp->psr;
2752 	intel_wakeref_t wakeref;
2753 	const char *status;
2754 	bool enabled;
2755 	u32 val;
2756 
2757 	seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
2758 	if (psr->sink_support)
2759 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
2760 	seq_puts(m, "\n");
2761 
2762 	if (!psr->sink_support)
2763 		return 0;
2764 
2765 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2766 	mutex_lock(&psr->lock);
2767 
2768 	if (psr->enabled)
2769 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2770 	else
2771 		status = "disabled";
2772 	seq_printf(m, "PSR mode: %s\n", status);
2773 
2774 	if (!psr->enabled) {
2775 		seq_printf(m, "PSR sink not reliable: %s\n",
2776 			   str_yes_no(psr->sink_not_reliable));
2777 
2778 		goto unlock;
2779 	}
2780 
2781 	if (psr->psr2_enabled) {
2782 		val = intel_de_read(dev_priv,
2783 				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
2784 		enabled = val & EDP_PSR2_ENABLE;
2785 	} else {
2786 		val = intel_de_read(dev_priv,
2787 				    EDP_PSR_CTL(intel_dp->psr.transcoder));
2788 		enabled = val & EDP_PSR_ENABLE;
2789 	}
2790 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2791 		   str_enabled_disabled(enabled), val);
2792 	psr_source_status(intel_dp, m);
2793 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2794 		   psr->busy_frontbuffer_bits);
2795 
2796 	/*
2797 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2798 	 */
2799 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2800 		val = intel_de_read(dev_priv,
2801 				    EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
2802 		val &= EDP_PSR_PERF_CNT_MASK;
2803 		seq_printf(m, "Performance counter: %u\n", val);
2804 	}
2805 
2806 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
2807 		seq_printf(m, "Last attempted entry at: %lld\n",
2808 			   psr->last_entry_attempt);
2809 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2810 	}
2811 
2812 	if (psr->psr2_enabled) {
2813 		u32 su_frames_val[3];
2814 		int frame;
2815 
2816 		/*
2817 		 * Reading all 3 registers before hand to minimize crossing a
2818 		 * frame boundary between register reads
2819 		 */
2820 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
2821 			val = intel_de_read(dev_priv,
2822 					    PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
2823 			su_frames_val[frame / 3] = val;
2824 		}
2825 
2826 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2827 
2828 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2829 			u32 su_blocks;
2830 
2831 			su_blocks = su_frames_val[frame / 3] &
2832 				    PSR2_SU_STATUS_MASK(frame);
2833 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2834 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
2835 		}
2836 
2837 		seq_printf(m, "PSR2 selective fetch: %s\n",
2838 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
2839 	}
2840 
2841 unlock:
2842 	mutex_unlock(&psr->lock);
2843 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2844 
2845 	return 0;
2846 }
2847 
2848 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
2849 {
2850 	struct drm_i915_private *dev_priv = m->private;
2851 	struct intel_dp *intel_dp = NULL;
2852 	struct intel_encoder *encoder;
2853 
2854 	if (!HAS_PSR(dev_priv))
2855 		return -ENODEV;
2856 
2857 	/* Find the first EDP which supports PSR */
2858 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2859 		intel_dp = enc_to_intel_dp(encoder);
2860 		break;
2861 	}
2862 
2863 	if (!intel_dp)
2864 		return -ENODEV;
2865 
2866 	return intel_psr_status(m, intel_dp);
2867 }
2868 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
2869 
2870 static int
2871 i915_edp_psr_debug_set(void *data, u64 val)
2872 {
2873 	struct drm_i915_private *dev_priv = data;
2874 	struct intel_encoder *encoder;
2875 	intel_wakeref_t wakeref;
2876 	int ret = -ENODEV;
2877 
2878 	if (!HAS_PSR(dev_priv))
2879 		return ret;
2880 
2881 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2882 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2883 
2884 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
2885 
2886 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2887 
2888 		// TODO: split to each transcoder's PSR debug state
2889 		ret = intel_psr_debug_set(intel_dp, val);
2890 
2891 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2892 	}
2893 
2894 	return ret;
2895 }
2896 
2897 static int
2898 i915_edp_psr_debug_get(void *data, u64 *val)
2899 {
2900 	struct drm_i915_private *dev_priv = data;
2901 	struct intel_encoder *encoder;
2902 
2903 	if (!HAS_PSR(dev_priv))
2904 		return -ENODEV;
2905 
2906 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2907 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2908 
2909 		// TODO: split to each transcoder's PSR debug state
2910 		*val = READ_ONCE(intel_dp->psr.debug);
2911 		return 0;
2912 	}
2913 
2914 	return -ENODEV;
2915 }
2916 
2917 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2918 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2919 			"%llu\n");
2920 
2921 void intel_psr_debugfs_register(struct drm_i915_private *i915)
2922 {
2923 	struct drm_minor *minor = i915->drm.primary;
2924 
2925 	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
2926 			    i915, &i915_edp_psr_debug_fops);
2927 
2928 	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
2929 			    i915, &i915_edp_psr_status_fops);
2930 }
2931 
2932 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2933 {
2934 	struct intel_connector *connector = m->private;
2935 	struct intel_dp *intel_dp = intel_attached_dp(connector);
2936 	static const char * const sink_status[] = {
2937 		"inactive",
2938 		"transition to active, capture and display",
2939 		"active, display from RFB",
2940 		"active, capture and display on sink device timings",
2941 		"transition to inactive, capture and display, timing re-sync",
2942 		"reserved",
2943 		"reserved",
2944 		"sink internal error",
2945 	};
2946 	const char *str;
2947 	int ret;
2948 	u8 val;
2949 
2950 	if (!CAN_PSR(intel_dp)) {
2951 		seq_puts(m, "PSR Unsupported\n");
2952 		return -ENODEV;
2953 	}
2954 
2955 	if (connector->base.status != connector_status_connected)
2956 		return -ENODEV;
2957 
2958 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2959 	if (ret != 1)
2960 		return ret < 0 ? ret : -EIO;
2961 
2962 	val &= DP_PSR_SINK_STATE_MASK;
2963 	if (val < ARRAY_SIZE(sink_status))
2964 		str = sink_status[val];
2965 	else
2966 		str = "unknown";
2967 
2968 	seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2969 
2970 	return 0;
2971 }
2972 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2973 
2974 static int i915_psr_status_show(struct seq_file *m, void *data)
2975 {
2976 	struct intel_connector *connector = m->private;
2977 	struct intel_dp *intel_dp = intel_attached_dp(connector);
2978 
2979 	return intel_psr_status(m, intel_dp);
2980 }
2981 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
2982 
2983 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
2984 {
2985 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2986 	struct dentry *root = connector->base.debugfs_entry;
2987 
2988 	if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2989 		return;
2990 
2991 	debugfs_create_file("i915_psr_sink_status", 0444, root,
2992 			    connector, &i915_psr_sink_status_fops);
2993 
2994 	if (HAS_PSR(i915))
2995 		debugfs_create_file("i915_psr_status", 0444, root,
2996 				    connector, &i915_psr_status_fops);
2997 }
2998