1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_atomic_helper.h>
25 
26 #include "display/intel_dp.h"
27 
28 #include "i915_drv.h"
29 #include "intel_display_types.h"
30 #include "intel_psr.h"
31 #include "intel_sprite.h"
32 
33 /**
34  * DOC: Panel Self Refresh (PSR/SRD)
35  *
36  * Since Haswell Display controller supports Panel Self-Refresh on display
37  * panels witch have a remote frame buffer (RFB) implemented according to PSR
38  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
39  * when system is idle but display is on as it eliminates display refresh
40  * request to DDR memory completely as long as the frame buffer for that
41  * display is unchanged.
42  *
43  * Panel Self Refresh must be supported by both Hardware (source) and
44  * Panel (sink).
45  *
46  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
47  * to power down the link and memory controller. For DSI panels the same idea
48  * is called "manual mode".
49  *
50  * The implementation uses the hardware-based PSR support which automatically
51  * enters/exits self-refresh mode. The hardware takes care of sending the
52  * required DP aux message and could even retrain the link (that part isn't
53  * enabled yet though). The hardware also keeps track of any frontbuffer
54  * changes to know when to exit self-refresh mode again. Unfortunately that
55  * part doesn't work too well, hence why the i915 PSR support uses the
56  * software frontbuffer tracking to make sure it doesn't miss a screen
57  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
58  * get called by the frontbuffer tracking code. Note that because of locking
59  * issues the self-refresh re-enable code is done from a work queue, which
60  * must be correctly synchronized/cancelled when shutting down the pipe."
61  */
62 
63 static bool psr_global_enabled(u32 debug)
64 {
65 	switch (debug & I915_PSR_DEBUG_MODE_MASK) {
66 	case I915_PSR_DEBUG_DEFAULT:
67 		return i915_modparams.enable_psr;
68 	case I915_PSR_DEBUG_DISABLE:
69 		return false;
70 	default:
71 		return true;
72 	}
73 }
74 
75 static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
76 			       const struct intel_crtc_state *crtc_state)
77 {
78 	/* Cannot enable DSC and PSR2 simultaneously */
79 	WARN_ON(crtc_state->dsc_params.compression_enable &&
80 		crtc_state->has_psr2);
81 
82 	switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
83 	case I915_PSR_DEBUG_DISABLE:
84 	case I915_PSR_DEBUG_FORCE_PSR1:
85 		return false;
86 	default:
87 		return crtc_state->has_psr2;
88 	}
89 }
90 
91 static void psr_irq_control(struct drm_i915_private *dev_priv)
92 {
93 	enum transcoder trans_shift;
94 	u32 mask, val;
95 	i915_reg_t imr_reg;
96 
97 	/*
98 	 * gen12+ has registers relative to transcoder and one per transcoder
99 	 * using the same bit definition: handle it as TRANSCODER_EDP to force
100 	 * 0 shift in bit definition
101 	 */
102 	if (INTEL_GEN(dev_priv) >= 12) {
103 		trans_shift = 0;
104 		imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
105 	} else {
106 		trans_shift = dev_priv->psr.transcoder;
107 		imr_reg = EDP_PSR_IMR;
108 	}
109 
110 	mask = EDP_PSR_ERROR(trans_shift);
111 	if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ)
112 		mask |= EDP_PSR_POST_EXIT(trans_shift) |
113 			EDP_PSR_PRE_ENTRY(trans_shift);
114 
115 	/* Warning: it is masking/setting reserved bits too */
116 	val = I915_READ(imr_reg);
117 	val &= ~EDP_PSR_TRANS_MASK(trans_shift);
118 	val |= ~mask;
119 	I915_WRITE(imr_reg, val);
120 }
121 
122 static void psr_event_print(u32 val, bool psr2_enabled)
123 {
124 	DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
125 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
126 		DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
127 	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
128 		DRM_DEBUG_KMS("\tPSR2 disabled\n");
129 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
130 		DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
131 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
132 		DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
133 	if (val & PSR_EVENT_GRAPHICS_RESET)
134 		DRM_DEBUG_KMS("\tGraphics reset\n");
135 	if (val & PSR_EVENT_PCH_INTERRUPT)
136 		DRM_DEBUG_KMS("\tPCH interrupt\n");
137 	if (val & PSR_EVENT_MEMORY_UP)
138 		DRM_DEBUG_KMS("\tMemory up\n");
139 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
140 		DRM_DEBUG_KMS("\tFront buffer modification\n");
141 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
142 		DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
143 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
144 		DRM_DEBUG_KMS("\tPIPE registers updated\n");
145 	if (val & PSR_EVENT_REGISTER_UPDATE)
146 		DRM_DEBUG_KMS("\tRegister updated\n");
147 	if (val & PSR_EVENT_HDCP_ENABLE)
148 		DRM_DEBUG_KMS("\tHDCP enabled\n");
149 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
150 		DRM_DEBUG_KMS("\tKVMR session enabled\n");
151 	if (val & PSR_EVENT_VBI_ENABLE)
152 		DRM_DEBUG_KMS("\tVBI enabled\n");
153 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
154 		DRM_DEBUG_KMS("\tLPSP mode exited\n");
155 	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
156 		DRM_DEBUG_KMS("\tPSR disabled\n");
157 }
158 
159 void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
160 {
161 	enum transcoder cpu_transcoder = dev_priv->psr.transcoder;
162 	enum transcoder trans_shift;
163 	i915_reg_t imr_reg;
164 	ktime_t time_ns =  ktime_get();
165 
166 	if (INTEL_GEN(dev_priv) >= 12) {
167 		trans_shift = 0;
168 		imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
169 	} else {
170 		trans_shift = dev_priv->psr.transcoder;
171 		imr_reg = EDP_PSR_IMR;
172 	}
173 
174 	if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
175 		dev_priv->psr.last_entry_attempt = time_ns;
176 		DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
177 			      transcoder_name(cpu_transcoder));
178 	}
179 
180 	if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
181 		dev_priv->psr.last_exit = time_ns;
182 		DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
183 			      transcoder_name(cpu_transcoder));
184 
185 		if (INTEL_GEN(dev_priv) >= 9) {
186 			u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
187 			bool psr2_enabled = dev_priv->psr.psr2_enabled;
188 
189 			I915_WRITE(PSR_EVENT(cpu_transcoder), val);
190 			psr_event_print(val, psr2_enabled);
191 		}
192 	}
193 
194 	if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
195 		u32 val;
196 
197 		DRM_WARN("[transcoder %s] PSR aux error\n",
198 			 transcoder_name(cpu_transcoder));
199 
200 		dev_priv->psr.irq_aux_error = true;
201 
202 		/*
203 		 * If this interruption is not masked it will keep
204 		 * interrupting so fast that it prevents the scheduled
205 		 * work to run.
206 		 * Also after a PSR error, we don't want to arm PSR
207 		 * again so we don't care about unmask the interruption
208 		 * or unset irq_aux_error.
209 		 */
210 		val = I915_READ(imr_reg);
211 		val |= EDP_PSR_ERROR(trans_shift);
212 		I915_WRITE(imr_reg, val);
213 
214 		schedule_work(&dev_priv->psr.work);
215 	}
216 }
217 
218 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
219 {
220 	u8 alpm_caps = 0;
221 
222 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
223 			      &alpm_caps) != 1)
224 		return false;
225 	return alpm_caps & DP_ALPM_CAP;
226 }
227 
228 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
229 {
230 	u8 val = 8; /* assume the worst if we can't read the value */
231 
232 	if (drm_dp_dpcd_readb(&intel_dp->aux,
233 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
234 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
235 	else
236 		DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
237 	return val;
238 }
239 
240 static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
241 {
242 	u16 val;
243 	ssize_t r;
244 
245 	/*
246 	 * Returning the default X granularity if granularity not required or
247 	 * if DPCD read fails
248 	 */
249 	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
250 		return 4;
251 
252 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
253 	if (r != 2)
254 		DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
255 
256 	/*
257 	 * Spec says that if the value read is 0 the default granularity should
258 	 * be used instead.
259 	 */
260 	if (r != 2 || val == 0)
261 		val = 4;
262 
263 	return val;
264 }
265 
266 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
267 {
268 	struct drm_i915_private *dev_priv =
269 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
270 
271 	if (dev_priv->psr.dp) {
272 		DRM_WARN("More than one eDP panel found, PSR support should be extended\n");
273 		return;
274 	}
275 
276 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
277 			 sizeof(intel_dp->psr_dpcd));
278 
279 	if (!intel_dp->psr_dpcd[0])
280 		return;
281 	DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
282 		      intel_dp->psr_dpcd[0]);
283 
284 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
285 		DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
286 		return;
287 	}
288 
289 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
290 		DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
291 		return;
292 	}
293 
294 	dev_priv->psr.sink_support = true;
295 	dev_priv->psr.sink_sync_latency =
296 		intel_dp_get_sink_sync_latency(intel_dp);
297 
298 	dev_priv->psr.dp = intel_dp;
299 
300 	if (INTEL_GEN(dev_priv) >= 9 &&
301 	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
302 		bool y_req = intel_dp->psr_dpcd[1] &
303 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
304 		bool alpm = intel_dp_get_alpm_status(intel_dp);
305 
306 		/*
307 		 * All panels that supports PSR version 03h (PSR2 +
308 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
309 		 * only sure that it is going to be used when required by the
310 		 * panel. This way panel is capable to do selective update
311 		 * without a aux frame sync.
312 		 *
313 		 * To support PSR version 02h and PSR version 03h without
314 		 * Y-coordinate requirement panels we would need to enable
315 		 * GTC first.
316 		 */
317 		dev_priv->psr.sink_psr2_support = y_req && alpm;
318 		DRM_DEBUG_KMS("PSR2 %ssupported\n",
319 			      dev_priv->psr.sink_psr2_support ? "" : "not ");
320 
321 		if (dev_priv->psr.sink_psr2_support) {
322 			dev_priv->psr.colorimetry_support =
323 				intel_dp_get_colorimetry_status(intel_dp);
324 			dev_priv->psr.su_x_granularity =
325 				intel_dp_get_su_x_granulartiy(intel_dp);
326 		}
327 	}
328 }
329 
330 static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
331 				const struct intel_crtc_state *crtc_state)
332 {
333 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
334 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
335 	struct dp_sdp psr_vsc;
336 
337 	if (dev_priv->psr.psr2_enabled) {
338 		/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
339 		memset(&psr_vsc, 0, sizeof(psr_vsc));
340 		psr_vsc.sdp_header.HB0 = 0;
341 		psr_vsc.sdp_header.HB1 = 0x7;
342 		if (dev_priv->psr.colorimetry_support) {
343 			psr_vsc.sdp_header.HB2 = 0x5;
344 			psr_vsc.sdp_header.HB3 = 0x13;
345 		} else {
346 			psr_vsc.sdp_header.HB2 = 0x4;
347 			psr_vsc.sdp_header.HB3 = 0xe;
348 		}
349 	} else {
350 		/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
351 		memset(&psr_vsc, 0, sizeof(psr_vsc));
352 		psr_vsc.sdp_header.HB0 = 0;
353 		psr_vsc.sdp_header.HB1 = 0x7;
354 		psr_vsc.sdp_header.HB2 = 0x2;
355 		psr_vsc.sdp_header.HB3 = 0x8;
356 	}
357 
358 	intel_dig_port->write_infoframe(&intel_dig_port->base,
359 					crtc_state,
360 					DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
361 }
362 
363 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
364 {
365 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
366 	u32 aux_clock_divider, aux_ctl;
367 	int i;
368 	static const u8 aux_msg[] = {
369 		[0] = DP_AUX_NATIVE_WRITE << 4,
370 		[1] = DP_SET_POWER >> 8,
371 		[2] = DP_SET_POWER & 0xff,
372 		[3] = 1 - 1,
373 		[4] = DP_SET_POWER_D0,
374 	};
375 	u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
376 			   EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
377 			   EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
378 			   EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
379 
380 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
381 	for (i = 0; i < sizeof(aux_msg); i += 4)
382 		I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
383 			   intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
384 
385 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
386 
387 	/* Start with bits set for DDI_AUX_CTL register */
388 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
389 					     aux_clock_divider);
390 
391 	/* Select only valid bits for SRD_AUX_CTL */
392 	aux_ctl &= psr_aux_mask;
393 	I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl);
394 }
395 
396 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
397 {
398 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
399 	u8 dpcd_val = DP_PSR_ENABLE;
400 
401 	/* Enable ALPM at sink for psr2 */
402 	if (dev_priv->psr.psr2_enabled) {
403 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
404 				   DP_ALPM_ENABLE);
405 		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
406 	} else {
407 		if (dev_priv->psr.link_standby)
408 			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
409 
410 		if (INTEL_GEN(dev_priv) >= 8)
411 			dpcd_val |= DP_PSR_CRC_VERIFICATION;
412 	}
413 
414 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
415 
416 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
417 }
418 
419 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
420 {
421 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
422 	u32 val = 0;
423 
424 	if (INTEL_GEN(dev_priv) >= 11)
425 		val |= EDP_PSR_TP4_TIME_0US;
426 
427 	if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
428 		val |= EDP_PSR_TP1_TIME_0us;
429 	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
430 		val |= EDP_PSR_TP1_TIME_100us;
431 	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
432 		val |= EDP_PSR_TP1_TIME_500us;
433 	else
434 		val |= EDP_PSR_TP1_TIME_2500us;
435 
436 	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
437 		val |= EDP_PSR_TP2_TP3_TIME_0us;
438 	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
439 		val |= EDP_PSR_TP2_TP3_TIME_100us;
440 	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
441 		val |= EDP_PSR_TP2_TP3_TIME_500us;
442 	else
443 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
444 
445 	if (intel_dp_source_supports_hbr2(intel_dp) &&
446 	    drm_dp_tps3_supported(intel_dp->dpcd))
447 		val |= EDP_PSR_TP1_TP3_SEL;
448 	else
449 		val |= EDP_PSR_TP1_TP2_SEL;
450 
451 	return val;
452 }
453 
454 static void hsw_activate_psr1(struct intel_dp *intel_dp)
455 {
456 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
457 	u32 max_sleep_time = 0x1f;
458 	u32 val = EDP_PSR_ENABLE;
459 
460 	/* Let's use 6 as the minimum to cover all known cases including the
461 	 * off-by-one issue that HW has in some cases.
462 	 */
463 	int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
464 
465 	/* sink_sync_latency of 8 means source has to wait for more than 8
466 	 * frames, we'll go with 9 frames for now
467 	 */
468 	idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
469 	val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
470 
471 	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
472 	if (IS_HASWELL(dev_priv))
473 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
474 
475 	if (dev_priv->psr.link_standby)
476 		val |= EDP_PSR_LINK_STANDBY;
477 
478 	val |= intel_psr1_get_tp_time(intel_dp);
479 
480 	if (INTEL_GEN(dev_priv) >= 8)
481 		val |= EDP_PSR_CRC_ENABLE;
482 
483 	val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) &
484 		EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
485 	I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
486 }
487 
488 static void hsw_activate_psr2(struct intel_dp *intel_dp)
489 {
490 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
491 	u32 val;
492 
493 	/* Let's use 6 as the minimum to cover all known cases including the
494 	 * off-by-one issue that HW has in some cases.
495 	 */
496 	int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
497 
498 	idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
499 	val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
500 
501 	val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
502 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
503 		val |= EDP_Y_COORDINATE_ENABLE;
504 
505 	val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
506 
507 	if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
508 	    dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
509 		val |= EDP_PSR2_TP2_TIME_50us;
510 	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
511 		val |= EDP_PSR2_TP2_TIME_100us;
512 	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
513 		val |= EDP_PSR2_TP2_TIME_500us;
514 	else
515 		val |= EDP_PSR2_TP2_TIME_2500us;
516 
517 	/*
518 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
519 	 * recommending keep this bit unset while PSR2 is enabled.
520 	 */
521 	I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
522 
523 	I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
524 }
525 
526 static bool
527 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
528 {
529 	if (INTEL_GEN(dev_priv) < 9)
530 		return false;
531 	else if (INTEL_GEN(dev_priv) >= 12)
532 		return trans == TRANSCODER_A;
533 	else
534 		return trans == TRANSCODER_EDP;
535 }
536 
537 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
538 				    struct intel_crtc_state *crtc_state)
539 {
540 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
541 	int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
542 	int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
543 	int psr_max_h = 0, psr_max_v = 0;
544 
545 	if (!dev_priv->psr.sink_psr2_support)
546 		return false;
547 
548 	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
549 		DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n",
550 			      transcoder_name(crtc_state->cpu_transcoder));
551 		return false;
552 	}
553 
554 	/*
555 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
556 	 * resolution requires DSC to be enabled, priority is given to DSC
557 	 * over PSR2.
558 	 */
559 	if (crtc_state->dsc_params.compression_enable) {
560 		DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
561 		return false;
562 	}
563 
564 	if (INTEL_GEN(dev_priv) >= 12) {
565 		psr_max_h = 5120;
566 		psr_max_v = 3200;
567 	} else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
568 		psr_max_h = 4096;
569 		psr_max_v = 2304;
570 	} else if (IS_GEN(dev_priv, 9)) {
571 		psr_max_h = 3640;
572 		psr_max_v = 2304;
573 	}
574 
575 	if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
576 		DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
577 			      crtc_hdisplay, crtc_vdisplay,
578 			      psr_max_h, psr_max_v);
579 		return false;
580 	}
581 
582 	/*
583 	 * HW sends SU blocks of size four scan lines, which means the starting
584 	 * X coordinate and Y granularity requirements will always be met. We
585 	 * only need to validate the SU block width is a multiple of
586 	 * x granularity.
587 	 */
588 	if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
589 		DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
590 			      crtc_hdisplay, dev_priv->psr.su_x_granularity);
591 		return false;
592 	}
593 
594 	if (crtc_state->crc_enabled) {
595 		DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
596 		return false;
597 	}
598 
599 	return true;
600 }
601 
602 void intel_psr_compute_config(struct intel_dp *intel_dp,
603 			      struct intel_crtc_state *crtc_state)
604 {
605 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
606 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
607 	const struct drm_display_mode *adjusted_mode =
608 		&crtc_state->base.adjusted_mode;
609 	int psr_setup_time;
610 
611 	if (!CAN_PSR(dev_priv))
612 		return;
613 
614 	if (intel_dp != dev_priv->psr.dp)
615 		return;
616 
617 	/*
618 	 * HSW spec explicitly says PSR is tied to port A.
619 	 * BDW+ platforms have a instance of PSR registers per transcoder but
620 	 * for now it only supports one instance of PSR, so lets keep it
621 	 * hardcoded to PORT_A
622 	 */
623 	if (dig_port->base.port != PORT_A) {
624 		DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
625 		return;
626 	}
627 
628 	if (dev_priv->psr.sink_not_reliable) {
629 		DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
630 		return;
631 	}
632 
633 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
634 		DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n");
635 		return;
636 	}
637 
638 	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
639 	if (psr_setup_time < 0) {
640 		DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
641 			      intel_dp->psr_dpcd[1]);
642 		return;
643 	}
644 
645 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
646 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
647 		DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
648 			      psr_setup_time);
649 		return;
650 	}
651 
652 	crtc_state->has_psr = true;
653 	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
654 }
655 
656 static void intel_psr_activate(struct intel_dp *intel_dp)
657 {
658 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
659 
660 	if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
661 		WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
662 
663 	WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
664 	WARN_ON(dev_priv->psr.active);
665 	lockdep_assert_held(&dev_priv->psr.lock);
666 
667 	/* psr1 and psr2 are mutually exclusive.*/
668 	if (dev_priv->psr.psr2_enabled)
669 		hsw_activate_psr2(intel_dp);
670 	else
671 		hsw_activate_psr1(intel_dp);
672 
673 	dev_priv->psr.active = true;
674 }
675 
676 static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
677 					 enum transcoder cpu_transcoder)
678 {
679 	static const i915_reg_t regs[] = {
680 		[TRANSCODER_A] = CHICKEN_TRANS_A,
681 		[TRANSCODER_B] = CHICKEN_TRANS_B,
682 		[TRANSCODER_C] = CHICKEN_TRANS_C,
683 		[TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
684 	};
685 
686 	WARN_ON(INTEL_GEN(dev_priv) < 9);
687 
688 	if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
689 		    !regs[cpu_transcoder].reg))
690 		cpu_transcoder = TRANSCODER_A;
691 
692 	return regs[cpu_transcoder];
693 }
694 
695 static void intel_psr_enable_source(struct intel_dp *intel_dp,
696 				    const struct intel_crtc_state *crtc_state)
697 {
698 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
699 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
700 	u32 mask;
701 
702 	/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
703 	 * use hardcoded values PSR AUX transactions
704 	 */
705 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
706 		hsw_psr_setup_aux(intel_dp);
707 
708 	if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
709 					   !IS_GEMINILAKE(dev_priv))) {
710 		i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
711 							cpu_transcoder);
712 		u32 chicken = I915_READ(reg);
713 
714 		chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
715 			   PSR2_ADD_VERTICAL_LINE_COUNT;
716 		I915_WRITE(reg, chicken);
717 	}
718 
719 	/*
720 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
721 	 * mask LPSP to avoid dependency on other drivers that might block
722 	 * runtime_pm besides preventing  other hw tracking issues now we
723 	 * can rely on frontbuffer tracking.
724 	 */
725 	mask = EDP_PSR_DEBUG_MASK_MEMUP |
726 	       EDP_PSR_DEBUG_MASK_HPD |
727 	       EDP_PSR_DEBUG_MASK_LPSP |
728 	       EDP_PSR_DEBUG_MASK_MAX_SLEEP;
729 
730 	if (INTEL_GEN(dev_priv) < 11)
731 		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
732 
733 	I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask);
734 
735 	psr_irq_control(dev_priv);
736 }
737 
738 static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
739 				    const struct intel_crtc_state *crtc_state)
740 {
741 	struct intel_dp *intel_dp = dev_priv->psr.dp;
742 	u32 val;
743 
744 	WARN_ON(dev_priv->psr.enabled);
745 
746 	dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
747 	dev_priv->psr.busy_frontbuffer_bits = 0;
748 	dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
749 	dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
750 
751 	/*
752 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
753 	 * will still keep the error set even after the reset done in the
754 	 * irq_preinstall and irq_uninstall hooks.
755 	 * And enabling in this situation cause the screen to freeze in the
756 	 * first time that PSR HW tries to activate so lets keep PSR disabled
757 	 * to avoid any rendering problems.
758 	 */
759 	if (INTEL_GEN(dev_priv) >= 12) {
760 		val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder));
761 		val &= EDP_PSR_ERROR(0);
762 	} else {
763 		val = I915_READ(EDP_PSR_IIR);
764 		val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
765 	}
766 	if (val) {
767 		dev_priv->psr.sink_not_reliable = true;
768 		DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n");
769 		return;
770 	}
771 
772 	DRM_DEBUG_KMS("Enabling PSR%s\n",
773 		      dev_priv->psr.psr2_enabled ? "2" : "1");
774 	intel_psr_setup_vsc(intel_dp, crtc_state);
775 	intel_psr_enable_sink(intel_dp);
776 	intel_psr_enable_source(intel_dp, crtc_state);
777 	dev_priv->psr.enabled = true;
778 
779 	intel_psr_activate(intel_dp);
780 }
781 
782 /**
783  * intel_psr_enable - Enable PSR
784  * @intel_dp: Intel DP
785  * @crtc_state: new CRTC state
786  *
787  * This function can only be called after the pipe is fully trained and enabled.
788  */
789 void intel_psr_enable(struct intel_dp *intel_dp,
790 		      const struct intel_crtc_state *crtc_state)
791 {
792 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
793 
794 	if (!crtc_state->has_psr)
795 		return;
796 
797 	if (WARN_ON(!CAN_PSR(dev_priv)))
798 		return;
799 
800 	WARN_ON(dev_priv->drrs.dp);
801 
802 	mutex_lock(&dev_priv->psr.lock);
803 
804 	if (!psr_global_enabled(dev_priv->psr.debug)) {
805 		DRM_DEBUG_KMS("PSR disabled by flag\n");
806 		goto unlock;
807 	}
808 
809 	intel_psr_enable_locked(dev_priv, crtc_state);
810 
811 unlock:
812 	mutex_unlock(&dev_priv->psr.lock);
813 }
814 
815 static void intel_psr_exit(struct drm_i915_private *dev_priv)
816 {
817 	u32 val;
818 
819 	if (!dev_priv->psr.active) {
820 		if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
821 			val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
822 			WARN_ON(val & EDP_PSR2_ENABLE);
823 		}
824 
825 		val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
826 		WARN_ON(val & EDP_PSR_ENABLE);
827 
828 		return;
829 	}
830 
831 	if (dev_priv->psr.psr2_enabled) {
832 		val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
833 		WARN_ON(!(val & EDP_PSR2_ENABLE));
834 		val &= ~EDP_PSR2_ENABLE;
835 		I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
836 	} else {
837 		val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
838 		WARN_ON(!(val & EDP_PSR_ENABLE));
839 		val &= ~EDP_PSR_ENABLE;
840 		I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
841 	}
842 	dev_priv->psr.active = false;
843 }
844 
845 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
846 {
847 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
848 	i915_reg_t psr_status;
849 	u32 psr_status_mask;
850 
851 	lockdep_assert_held(&dev_priv->psr.lock);
852 
853 	if (!dev_priv->psr.enabled)
854 		return;
855 
856 	DRM_DEBUG_KMS("Disabling PSR%s\n",
857 		      dev_priv->psr.psr2_enabled ? "2" : "1");
858 
859 	intel_psr_exit(dev_priv);
860 
861 	if (dev_priv->psr.psr2_enabled) {
862 		psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
863 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
864 	} else {
865 		psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder);
866 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
867 	}
868 
869 	/* Wait till PSR is idle */
870 	if (intel_de_wait_for_clear(dev_priv, psr_status,
871 				    psr_status_mask, 2000))
872 		DRM_ERROR("Timed out waiting PSR idle state\n");
873 
874 	/* Disable PSR on Sink */
875 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
876 
877 	dev_priv->psr.enabled = false;
878 }
879 
880 /**
881  * intel_psr_disable - Disable PSR
882  * @intel_dp: Intel DP
883  * @old_crtc_state: old CRTC state
884  *
885  * This function needs to be called before disabling pipe.
886  */
887 void intel_psr_disable(struct intel_dp *intel_dp,
888 		       const struct intel_crtc_state *old_crtc_state)
889 {
890 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
891 
892 	if (!old_crtc_state->has_psr)
893 		return;
894 
895 	if (WARN_ON(!CAN_PSR(dev_priv)))
896 		return;
897 
898 	mutex_lock(&dev_priv->psr.lock);
899 
900 	intel_psr_disable_locked(intel_dp);
901 
902 	mutex_unlock(&dev_priv->psr.lock);
903 	cancel_work_sync(&dev_priv->psr.work);
904 }
905 
906 static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
907 {
908 	if (INTEL_GEN(dev_priv) >= 9)
909 		/*
910 		 * Display WA #0884: skl+
911 		 * This documented WA for bxt can be safely applied
912 		 * broadly so we can force HW tracking to exit PSR
913 		 * instead of disabling and re-enabling.
914 		 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
915 		 * but it makes more sense write to the current active
916 		 * pipe.
917 		 */
918 		I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
919 	else
920 		/*
921 		 * A write to CURSURFLIVE do not cause HW tracking to exit PSR
922 		 * on older gens so doing the manual exit instead.
923 		 */
924 		intel_psr_exit(dev_priv);
925 }
926 
927 /**
928  * intel_psr_update - Update PSR state
929  * @intel_dp: Intel DP
930  * @crtc_state: new CRTC state
931  *
932  * This functions will update PSR states, disabling, enabling or switching PSR
933  * version when executing fastsets. For full modeset, intel_psr_disable() and
934  * intel_psr_enable() should be called instead.
935  */
936 void intel_psr_update(struct intel_dp *intel_dp,
937 		      const struct intel_crtc_state *crtc_state)
938 {
939 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
940 	struct i915_psr *psr = &dev_priv->psr;
941 	bool enable, psr2_enable;
942 
943 	if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
944 		return;
945 
946 	mutex_lock(&dev_priv->psr.lock);
947 
948 	enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
949 	psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
950 
951 	if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
952 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
953 		if (crtc_state->crc_enabled && psr->enabled)
954 			psr_force_hw_tracking_exit(dev_priv);
955 		else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) {
956 			/*
957 			 * Activate PSR again after a force exit when enabling
958 			 * CRC in older gens
959 			 */
960 			if (!dev_priv->psr.active &&
961 			    !dev_priv->psr.busy_frontbuffer_bits)
962 				schedule_work(&dev_priv->psr.work);
963 		}
964 
965 		goto unlock;
966 	}
967 
968 	if (psr->enabled)
969 		intel_psr_disable_locked(intel_dp);
970 
971 	if (enable)
972 		intel_psr_enable_locked(dev_priv, crtc_state);
973 
974 unlock:
975 	mutex_unlock(&dev_priv->psr.lock);
976 }
977 
978 /**
979  * intel_psr_wait_for_idle - wait for PSR1 to idle
980  * @new_crtc_state: new CRTC state
981  * @out_value: PSR status in case of failure
982  *
983  * This function is expected to be called from pipe_update_start() where it is
984  * not expected to race with PSR enable or disable.
985  *
986  * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
987  */
988 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
989 			    u32 *out_value)
990 {
991 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
992 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
993 
994 	if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
995 		return 0;
996 
997 	/* FIXME: Update this for PSR2 if we need to wait for idle */
998 	if (READ_ONCE(dev_priv->psr.psr2_enabled))
999 		return 0;
1000 
1001 	/*
1002 	 * From bspec: Panel Self Refresh (BDW+)
1003 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
1004 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
1005 	 * defensive enough to cover everything.
1006 	 */
1007 
1008 	return __intel_wait_for_register(&dev_priv->uncore,
1009 					 EDP_PSR_STATUS(dev_priv->psr.transcoder),
1010 					 EDP_PSR_STATUS_STATE_MASK,
1011 					 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
1012 					 out_value);
1013 }
1014 
1015 static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
1016 {
1017 	i915_reg_t reg;
1018 	u32 mask;
1019 	int err;
1020 
1021 	if (!dev_priv->psr.enabled)
1022 		return false;
1023 
1024 	if (dev_priv->psr.psr2_enabled) {
1025 		reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
1026 		mask = EDP_PSR2_STATUS_STATE_MASK;
1027 	} else {
1028 		reg = EDP_PSR_STATUS(dev_priv->psr.transcoder);
1029 		mask = EDP_PSR_STATUS_STATE_MASK;
1030 	}
1031 
1032 	mutex_unlock(&dev_priv->psr.lock);
1033 
1034 	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
1035 	if (err)
1036 		DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
1037 
1038 	/* After the unlocked wait, verify that PSR is still wanted! */
1039 	mutex_lock(&dev_priv->psr.lock);
1040 	return err == 0 && dev_priv->psr.enabled;
1041 }
1042 
1043 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
1044 {
1045 	struct drm_device *dev = &dev_priv->drm;
1046 	struct drm_modeset_acquire_ctx ctx;
1047 	struct drm_atomic_state *state;
1048 	struct drm_crtc *crtc;
1049 	int err;
1050 
1051 	state = drm_atomic_state_alloc(dev);
1052 	if (!state)
1053 		return -ENOMEM;
1054 
1055 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1056 	state->acquire_ctx = &ctx;
1057 
1058 retry:
1059 	drm_for_each_crtc(crtc, dev) {
1060 		struct drm_crtc_state *crtc_state;
1061 		struct intel_crtc_state *intel_crtc_state;
1062 
1063 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
1064 		if (IS_ERR(crtc_state)) {
1065 			err = PTR_ERR(crtc_state);
1066 			goto error;
1067 		}
1068 
1069 		intel_crtc_state = to_intel_crtc_state(crtc_state);
1070 
1071 		if (crtc_state->active && intel_crtc_state->has_psr) {
1072 			/* Mark mode as changed to trigger a pipe->update() */
1073 			crtc_state->mode_changed = true;
1074 			break;
1075 		}
1076 	}
1077 
1078 	err = drm_atomic_commit(state);
1079 
1080 error:
1081 	if (err == -EDEADLK) {
1082 		drm_atomic_state_clear(state);
1083 		err = drm_modeset_backoff(&ctx);
1084 		if (!err)
1085 			goto retry;
1086 	}
1087 
1088 	drm_modeset_drop_locks(&ctx);
1089 	drm_modeset_acquire_fini(&ctx);
1090 	drm_atomic_state_put(state);
1091 
1092 	return err;
1093 }
1094 
1095 int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
1096 {
1097 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
1098 	u32 old_mode;
1099 	int ret;
1100 
1101 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
1102 	    mode > I915_PSR_DEBUG_FORCE_PSR1) {
1103 		DRM_DEBUG_KMS("Invalid debug mask %llx\n", val);
1104 		return -EINVAL;
1105 	}
1106 
1107 	ret = mutex_lock_interruptible(&dev_priv->psr.lock);
1108 	if (ret)
1109 		return ret;
1110 
1111 	old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
1112 	dev_priv->psr.debug = val;
1113 
1114 	/*
1115 	 * Do it right away if it's already enabled, otherwise it will be done
1116 	 * when enabling the source.
1117 	 */
1118 	if (dev_priv->psr.enabled)
1119 		psr_irq_control(dev_priv);
1120 
1121 	mutex_unlock(&dev_priv->psr.lock);
1122 
1123 	if (old_mode != mode)
1124 		ret = intel_psr_fastset_force(dev_priv);
1125 
1126 	return ret;
1127 }
1128 
1129 static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
1130 {
1131 	struct i915_psr *psr = &dev_priv->psr;
1132 
1133 	intel_psr_disable_locked(psr->dp);
1134 	psr->sink_not_reliable = true;
1135 	/* let's make sure that sink is awaken */
1136 	drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
1137 }
1138 
1139 static void intel_psr_work(struct work_struct *work)
1140 {
1141 	struct drm_i915_private *dev_priv =
1142 		container_of(work, typeof(*dev_priv), psr.work);
1143 
1144 	mutex_lock(&dev_priv->psr.lock);
1145 
1146 	if (!dev_priv->psr.enabled)
1147 		goto unlock;
1148 
1149 	if (READ_ONCE(dev_priv->psr.irq_aux_error))
1150 		intel_psr_handle_irq(dev_priv);
1151 
1152 	/*
1153 	 * We have to make sure PSR is ready for re-enable
1154 	 * otherwise it keeps disabled until next full enable/disable cycle.
1155 	 * PSR might take some time to get fully disabled
1156 	 * and be ready for re-enable.
1157 	 */
1158 	if (!__psr_wait_for_idle_locked(dev_priv))
1159 		goto unlock;
1160 
1161 	/*
1162 	 * The delayed work can race with an invalidate hence we need to
1163 	 * recheck. Since psr_flush first clears this and then reschedules we
1164 	 * won't ever miss a flush when bailing out here.
1165 	 */
1166 	if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
1167 		goto unlock;
1168 
1169 	intel_psr_activate(dev_priv->psr.dp);
1170 unlock:
1171 	mutex_unlock(&dev_priv->psr.lock);
1172 }
1173 
1174 /**
1175  * intel_psr_invalidate - Invalidade PSR
1176  * @dev_priv: i915 device
1177  * @frontbuffer_bits: frontbuffer plane tracking bits
1178  * @origin: which operation caused the invalidate
1179  *
1180  * Since the hardware frontbuffer tracking has gaps we need to integrate
1181  * with the software frontbuffer tracking. This function gets called every
1182  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
1183  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
1184  *
1185  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
1186  */
1187 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
1188 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
1189 {
1190 	if (!CAN_PSR(dev_priv))
1191 		return;
1192 
1193 	if (origin == ORIGIN_FLIP)
1194 		return;
1195 
1196 	mutex_lock(&dev_priv->psr.lock);
1197 	if (!dev_priv->psr.enabled) {
1198 		mutex_unlock(&dev_priv->psr.lock);
1199 		return;
1200 	}
1201 
1202 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
1203 	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
1204 
1205 	if (frontbuffer_bits)
1206 		intel_psr_exit(dev_priv);
1207 
1208 	mutex_unlock(&dev_priv->psr.lock);
1209 }
1210 
1211 /**
1212  * intel_psr_flush - Flush PSR
1213  * @dev_priv: i915 device
1214  * @frontbuffer_bits: frontbuffer plane tracking bits
1215  * @origin: which operation caused the flush
1216  *
1217  * Since the hardware frontbuffer tracking has gaps we need to integrate
1218  * with the software frontbuffer tracking. This function gets called every
1219  * time frontbuffer rendering has completed and flushed out to memory. PSR
1220  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
1221  *
1222  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
1223  */
1224 void intel_psr_flush(struct drm_i915_private *dev_priv,
1225 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
1226 {
1227 	if (!CAN_PSR(dev_priv))
1228 		return;
1229 
1230 	if (origin == ORIGIN_FLIP)
1231 		return;
1232 
1233 	mutex_lock(&dev_priv->psr.lock);
1234 	if (!dev_priv->psr.enabled) {
1235 		mutex_unlock(&dev_priv->psr.lock);
1236 		return;
1237 	}
1238 
1239 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
1240 	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
1241 
1242 	/* By definition flush = invalidate + flush */
1243 	if (frontbuffer_bits)
1244 		psr_force_hw_tracking_exit(dev_priv);
1245 
1246 	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
1247 		schedule_work(&dev_priv->psr.work);
1248 	mutex_unlock(&dev_priv->psr.lock);
1249 }
1250 
1251 /**
1252  * intel_psr_init - Init basic PSR work and mutex.
1253  * @dev_priv: i915 device private
1254  *
1255  * This function is  called only once at driver load to initialize basic
1256  * PSR stuff.
1257  */
1258 void intel_psr_init(struct drm_i915_private *dev_priv)
1259 {
1260 	if (!HAS_PSR(dev_priv))
1261 		return;
1262 
1263 	if (!dev_priv->psr.sink_support)
1264 		return;
1265 
1266 	if (IS_HASWELL(dev_priv))
1267 		/*
1268 		 * HSW don't have PSR registers on the same space as transcoder
1269 		 * so set this to a value that when subtract to the register
1270 		 * in transcoder space results in the right offset for HSW
1271 		 */
1272 		dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
1273 
1274 	if (i915_modparams.enable_psr == -1)
1275 		if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
1276 			i915_modparams.enable_psr = 0;
1277 
1278 	/* Set link_standby x link_off defaults */
1279 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1280 		/* HSW and BDW require workarounds that we don't implement. */
1281 		dev_priv->psr.link_standby = false;
1282 	else if (INTEL_GEN(dev_priv) < 12)
1283 		/* For new platforms up to TGL let's respect VBT back again */
1284 		dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
1285 
1286 	INIT_WORK(&dev_priv->psr.work, intel_psr_work);
1287 	mutex_init(&dev_priv->psr.lock);
1288 }
1289 
1290 void intel_psr_short_pulse(struct intel_dp *intel_dp)
1291 {
1292 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1293 	struct i915_psr *psr = &dev_priv->psr;
1294 	u8 val;
1295 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
1296 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
1297 			  DP_PSR_LINK_CRC_ERROR;
1298 
1299 	if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
1300 		return;
1301 
1302 	mutex_lock(&psr->lock);
1303 
1304 	if (!psr->enabled || psr->dp != intel_dp)
1305 		goto exit;
1306 
1307 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
1308 		DRM_ERROR("PSR_STATUS dpcd read failed\n");
1309 		goto exit;
1310 	}
1311 
1312 	if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
1313 		DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
1314 		intel_psr_disable_locked(intel_dp);
1315 		psr->sink_not_reliable = true;
1316 	}
1317 
1318 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
1319 		DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
1320 		goto exit;
1321 	}
1322 
1323 	if (val & DP_PSR_RFB_STORAGE_ERROR)
1324 		DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
1325 	if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
1326 		DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
1327 	if (val & DP_PSR_LINK_CRC_ERROR)
1328 		DRM_ERROR("PSR Link CRC error, disabling PSR\n");
1329 
1330 	if (val & ~errors)
1331 		DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
1332 			  val & ~errors);
1333 	if (val & errors) {
1334 		intel_psr_disable_locked(intel_dp);
1335 		psr->sink_not_reliable = true;
1336 	}
1337 	/* clear status register */
1338 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
1339 exit:
1340 	mutex_unlock(&psr->lock);
1341 }
1342 
1343 bool intel_psr_enabled(struct intel_dp *intel_dp)
1344 {
1345 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1346 	bool ret;
1347 
1348 	if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
1349 		return false;
1350 
1351 	mutex_lock(&dev_priv->psr.lock);
1352 	ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
1353 	mutex_unlock(&dev_priv->psr.lock);
1354 
1355 	return ret;
1356 }
1357