xref: /openbmc/linux/drivers/gpu/drm/i915/display/intel_psr.c (revision 7d7ae873b5e0f46d19e5dc818d1a7809e4b7cc81)
1  /*
2   * Copyright © 2014 Intel Corporation
3   *
4   * Permission is hereby granted, free of charge, to any person obtaining a
5   * copy of this software and associated documentation files (the "Software"),
6   * to deal in the Software without restriction, including without limitation
7   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8   * and/or sell copies of the Software, and to permit persons to whom the
9   * Software is furnished to do so, subject to the following conditions:
10   *
11   * The above copyright notice and this permission notice (including the next
12   * paragraph) shall be included in all copies or substantial portions of the
13   * Software.
14   *
15   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18   * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19   * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20   * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21   * DEALINGS IN THE SOFTWARE.
22   */
23  
24  #include <drm/drm_atomic_helper.h>
25  #include <drm/drm_damage_helper.h>
26  
27  #include "i915_drv.h"
28  #include "i915_reg.h"
29  #include "intel_atomic.h"
30  #include "intel_crtc.h"
31  #include "intel_de.h"
32  #include "intel_display_types.h"
33  #include "intel_dp.h"
34  #include "intel_dp_aux.h"
35  #include "intel_hdmi.h"
36  #include "intel_psr.h"
37  #include "intel_psr_regs.h"
38  #include "intel_snps_phy.h"
39  #include "skl_universal_plane.h"
40  
41  /**
42   * DOC: Panel Self Refresh (PSR/SRD)
43   *
44   * Since Haswell Display controller supports Panel Self-Refresh on display
45   * panels witch have a remote frame buffer (RFB) implemented according to PSR
46   * spec in eDP1.3. PSR feature allows the display to go to lower standby states
47   * when system is idle but display is on as it eliminates display refresh
48   * request to DDR memory completely as long as the frame buffer for that
49   * display is unchanged.
50   *
51   * Panel Self Refresh must be supported by both Hardware (source) and
52   * Panel (sink).
53   *
54   * PSR saves power by caching the framebuffer in the panel RFB, which allows us
55   * to power down the link and memory controller. For DSI panels the same idea
56   * is called "manual mode".
57   *
58   * The implementation uses the hardware-based PSR support which automatically
59   * enters/exits self-refresh mode. The hardware takes care of sending the
60   * required DP aux message and could even retrain the link (that part isn't
61   * enabled yet though). The hardware also keeps track of any frontbuffer
62   * changes to know when to exit self-refresh mode again. Unfortunately that
63   * part doesn't work too well, hence why the i915 PSR support uses the
64   * software frontbuffer tracking to make sure it doesn't miss a screen
65   * update. For this integration intel_psr_invalidate() and intel_psr_flush()
66   * get called by the frontbuffer tracking code. Note that because of locking
67   * issues the self-refresh re-enable code is done from a work queue, which
68   * must be correctly synchronized/cancelled when shutting down the pipe."
69   *
70   * DC3CO (DC3 clock off)
71   *
72   * On top of PSR2, GEN12 adds a intermediate power savings state that turns
73   * clock off automatically during PSR2 idle state.
74   * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
75   * entry/exit allows the HW to enter a low-power state even when page flipping
76   * periodically (for instance a 30fps video playback scenario).
77   *
78   * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
79   * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
80   * frames, if no other flip occurs and the function above is executed, DC3CO is
81   * disabled and PSR2 is configured to enter deep sleep, resetting again in case
82   * of another flip.
83   * Front buffer modifications do not trigger DC3CO activation on purpose as it
84   * would bring a lot of complexity and most of the moderns systems will only
85   * use page flips.
86   */
87  
88  /*
89   * Description of PSR mask bits:
90   *
91   * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
92   *
93   *  When unmasked (nearly) all display register writes (eg. even
94   *  SWF) trigger a PSR exit. Some registers are excluded from this
95   *  and they have a more specific mask (described below). On icl+
96   *  this bit no longer exists and is effectively always set.
97   *
98   * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
99   *
100   *  When unmasked (nearly) all pipe/plane register writes
101   *  trigger a PSR exit. Some plane registers are excluded from this
102   *  and they have a more specific mask (described below).
103   *
104   * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
105   * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
106   * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
107   *
108   *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
109   *  SPR_SURF/CURBASE are not included in this and instead are
110   *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
111   *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
112   *
113   * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
114   * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
115   *
116   *  When unmasked PSR is blocked as long as the sprite
117   *  plane is enabled. skl+ with their universal planes no
118   *  longer have a mask bit like this, and no plane being
119   *  enabledb blocks PSR.
120   *
121   * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
122   * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
123   *
124   *  When umasked CURPOS writes trigger a PSR exit. On skl+
125   *  this doesn't exit but CURPOS is included in the
126   *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
127   *
128   * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
129   * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
130   *
131   *  When unmasked PSR is blocked as long as vblank and/or vsync
132   *  interrupt is unmasked in IMR *and* enabled in IER.
133   *
134   * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
135   * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
136   *
137   *  Selectcs whether PSR exit generates an extra vblank before
138   *  the first frame is transmitted. Also note the opposite polarity
139   *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
140   *  unmasked==do not generate the extra vblank).
141   *
142   *  With DC states enabled the extra vblank happens after link training,
143   *  with DC states disabled it happens immediately upuon PSR exit trigger.
144   *  No idea as of now why there is a difference. HSW/BDW (which don't
145   *  even have DMC) always generate it after link training. Go figure.
146   *
147   *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
148   *  and thus won't latch until the first vblank. So with DC states
149   *  enabled the register effctively uses the reset value during DC5
150   *  exit+PSR exit sequence, and thus the bit does nothing until
151   *  latched by the vblank that it was trying to prevent from being
152   *  generated in the first place. So we should probably call this
153   *  one a chicken/egg bit instead on skl+.
154   *
155   *  In standby mode (as opposed to link-off) this makes no difference
156   *  as the timing generator keeps running the whole time generating
157   *  normal periodic vblanks.
158   *
159   *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
160   *  and doing so makes the behaviour match the skl+ reset value.
161   *
162   * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
163   * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
164   *
165   *  On BDW without this bit is no vblanks whatsoever are
166   *  generated after PSR exit. On HSW this has no apparant effect.
167   *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
168   *
169   * The rest of the bits are more self-explanatory and/or
170   * irrelevant for normal operation.
171   */
172  
psr_global_enabled(struct intel_dp * intel_dp)173  static bool psr_global_enabled(struct intel_dp *intel_dp)
174  {
175  	struct intel_connector *connector = intel_dp->attached_connector;
176  	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
177  
178  	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
179  	case I915_PSR_DEBUG_DEFAULT:
180  		if (i915->params.enable_psr == -1)
181  			return connector->panel.vbt.psr.enable;
182  		return i915->params.enable_psr;
183  	case I915_PSR_DEBUG_DISABLE:
184  		return false;
185  	default:
186  		return true;
187  	}
188  }
189  
psr2_global_enabled(struct intel_dp * intel_dp)190  static bool psr2_global_enabled(struct intel_dp *intel_dp)
191  {
192  	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
193  
194  	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
195  	case I915_PSR_DEBUG_DISABLE:
196  	case I915_PSR_DEBUG_FORCE_PSR1:
197  		return false;
198  	default:
199  		if (i915->params.enable_psr == 1)
200  			return false;
201  		return true;
202  	}
203  }
204  
psr_irq_psr_error_bit_get(struct intel_dp * intel_dp)205  static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
206  {
207  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
208  
209  	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
210  		EDP_PSR_ERROR(intel_dp->psr.transcoder);
211  }
212  
psr_irq_post_exit_bit_get(struct intel_dp * intel_dp)213  static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
214  {
215  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
216  
217  	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
218  		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
219  }
220  
psr_irq_pre_entry_bit_get(struct intel_dp * intel_dp)221  static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
222  {
223  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
224  
225  	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
226  		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
227  }
228  
psr_irq_mask_get(struct intel_dp * intel_dp)229  static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
230  {
231  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
232  
233  	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
234  		EDP_PSR_MASK(intel_dp->psr.transcoder);
235  }
236  
psr_ctl_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)237  static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
238  			      enum transcoder cpu_transcoder)
239  {
240  	if (DISPLAY_VER(dev_priv) >= 8)
241  		return EDP_PSR_CTL(cpu_transcoder);
242  	else
243  		return HSW_SRD_CTL;
244  }
245  
psr_debug_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)246  static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
247  				enum transcoder cpu_transcoder)
248  {
249  	if (DISPLAY_VER(dev_priv) >= 8)
250  		return EDP_PSR_DEBUG(cpu_transcoder);
251  	else
252  		return HSW_SRD_DEBUG;
253  }
254  
psr_perf_cnt_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)255  static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
256  				   enum transcoder cpu_transcoder)
257  {
258  	if (DISPLAY_VER(dev_priv) >= 8)
259  		return EDP_PSR_PERF_CNT(cpu_transcoder);
260  	else
261  		return HSW_SRD_PERF_CNT;
262  }
263  
psr_status_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)264  static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
265  				 enum transcoder cpu_transcoder)
266  {
267  	if (DISPLAY_VER(dev_priv) >= 8)
268  		return EDP_PSR_STATUS(cpu_transcoder);
269  	else
270  		return HSW_SRD_STATUS;
271  }
272  
psr_imr_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)273  static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
274  			      enum transcoder cpu_transcoder)
275  {
276  	if (DISPLAY_VER(dev_priv) >= 12)
277  		return TRANS_PSR_IMR(cpu_transcoder);
278  	else
279  		return EDP_PSR_IMR;
280  }
281  
psr_iir_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)282  static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
283  			      enum transcoder cpu_transcoder)
284  {
285  	if (DISPLAY_VER(dev_priv) >= 12)
286  		return TRANS_PSR_IIR(cpu_transcoder);
287  	else
288  		return EDP_PSR_IIR;
289  }
290  
psr_aux_ctl_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)291  static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
292  				  enum transcoder cpu_transcoder)
293  {
294  	if (DISPLAY_VER(dev_priv) >= 8)
295  		return EDP_PSR_AUX_CTL(cpu_transcoder);
296  	else
297  		return HSW_SRD_AUX_CTL;
298  }
299  
psr_aux_data_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder,int i)300  static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
301  				   enum transcoder cpu_transcoder, int i)
302  {
303  	if (DISPLAY_VER(dev_priv) >= 8)
304  		return EDP_PSR_AUX_DATA(cpu_transcoder, i);
305  	else
306  		return HSW_SRD_AUX_DATA(i);
307  }
308  
psr_irq_control(struct intel_dp * intel_dp)309  static void psr_irq_control(struct intel_dp *intel_dp)
310  {
311  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
312  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
313  	u32 mask;
314  
315  	mask = psr_irq_psr_error_bit_get(intel_dp);
316  	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
317  		mask |= psr_irq_post_exit_bit_get(intel_dp) |
318  			psr_irq_pre_entry_bit_get(intel_dp);
319  
320  	intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
321  		     psr_irq_mask_get(intel_dp), ~mask);
322  }
323  
psr_event_print(struct drm_i915_private * i915,u32 val,bool psr2_enabled)324  static void psr_event_print(struct drm_i915_private *i915,
325  			    u32 val, bool psr2_enabled)
326  {
327  	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
328  	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
329  		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
330  	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
331  		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
332  	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
333  		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
334  	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
335  		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
336  	if (val & PSR_EVENT_GRAPHICS_RESET)
337  		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
338  	if (val & PSR_EVENT_PCH_INTERRUPT)
339  		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
340  	if (val & PSR_EVENT_MEMORY_UP)
341  		drm_dbg_kms(&i915->drm, "\tMemory up\n");
342  	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
343  		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
344  	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
345  		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
346  	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
347  		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
348  	if (val & PSR_EVENT_REGISTER_UPDATE)
349  		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
350  	if (val & PSR_EVENT_HDCP_ENABLE)
351  		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
352  	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
353  		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
354  	if (val & PSR_EVENT_VBI_ENABLE)
355  		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
356  	if (val & PSR_EVENT_LPSP_MODE_EXIT)
357  		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
358  	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
359  		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
360  }
361  
intel_psr_irq_handler(struct intel_dp * intel_dp,u32 psr_iir)362  void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
363  {
364  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
365  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
366  	ktime_t time_ns =  ktime_get();
367  
368  	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
369  		intel_dp->psr.last_entry_attempt = time_ns;
370  		drm_dbg_kms(&dev_priv->drm,
371  			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
372  			    transcoder_name(cpu_transcoder));
373  	}
374  
375  	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
376  		intel_dp->psr.last_exit = time_ns;
377  		drm_dbg_kms(&dev_priv->drm,
378  			    "[transcoder %s] PSR exit completed\n",
379  			    transcoder_name(cpu_transcoder));
380  
381  		if (DISPLAY_VER(dev_priv) >= 9) {
382  			u32 val;
383  
384  			val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
385  
386  			psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
387  		}
388  	}
389  
390  	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
391  		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
392  			 transcoder_name(cpu_transcoder));
393  
394  		intel_dp->psr.irq_aux_error = true;
395  
396  		/*
397  		 * If this interruption is not masked it will keep
398  		 * interrupting so fast that it prevents the scheduled
399  		 * work to run.
400  		 * Also after a PSR error, we don't want to arm PSR
401  		 * again so we don't care about unmask the interruption
402  		 * or unset irq_aux_error.
403  		 */
404  		intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
405  			     0, psr_irq_psr_error_bit_get(intel_dp));
406  
407  		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
408  	}
409  }
410  
intel_dp_get_alpm_status(struct intel_dp * intel_dp)411  static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
412  {
413  	u8 alpm_caps = 0;
414  
415  	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
416  			      &alpm_caps) != 1)
417  		return false;
418  	return alpm_caps & DP_ALPM_CAP;
419  }
420  
intel_dp_get_sink_sync_latency(struct intel_dp * intel_dp)421  static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
422  {
423  	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
424  	u8 val = 8; /* assume the worst if we can't read the value */
425  
426  	if (drm_dp_dpcd_readb(&intel_dp->aux,
427  			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
428  		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
429  	else
430  		drm_dbg_kms(&i915->drm,
431  			    "Unable to get sink synchronization latency, assuming 8 frames\n");
432  	return val;
433  }
434  
intel_dp_get_su_granularity(struct intel_dp * intel_dp)435  static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
436  {
437  	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
438  	ssize_t r;
439  	u16 w;
440  	u8 y;
441  
442  	/* If sink don't have specific granularity requirements set legacy ones */
443  	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
444  		/* As PSR2 HW sends full lines, we do not care about x granularity */
445  		w = 4;
446  		y = 4;
447  		goto exit;
448  	}
449  
450  	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
451  	if (r != 2)
452  		drm_dbg_kms(&i915->drm,
453  			    "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
454  	/*
455  	 * Spec says that if the value read is 0 the default granularity should
456  	 * be used instead.
457  	 */
458  	if (r != 2 || w == 0)
459  		w = 4;
460  
461  	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
462  	if (r != 1) {
463  		drm_dbg_kms(&i915->drm,
464  			    "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
465  		y = 4;
466  	}
467  	if (y == 0)
468  		y = 1;
469  
470  exit:
471  	intel_dp->psr.su_w_granularity = w;
472  	intel_dp->psr.su_y_granularity = y;
473  }
474  
intel_psr_init_dpcd(struct intel_dp * intel_dp)475  void intel_psr_init_dpcd(struct intel_dp *intel_dp)
476  {
477  	struct drm_i915_private *dev_priv =
478  		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
479  
480  	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
481  			 sizeof(intel_dp->psr_dpcd));
482  
483  	if (!intel_dp->psr_dpcd[0])
484  		return;
485  	drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
486  		    intel_dp->psr_dpcd[0]);
487  
488  	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
489  		drm_dbg_kms(&dev_priv->drm,
490  			    "PSR support not currently available for this panel\n");
491  		return;
492  	}
493  
494  	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
495  		drm_dbg_kms(&dev_priv->drm,
496  			    "Panel lacks power state control, PSR cannot be enabled\n");
497  		return;
498  	}
499  
500  	intel_dp->psr.sink_support = true;
501  	intel_dp->psr.sink_sync_latency =
502  		intel_dp_get_sink_sync_latency(intel_dp);
503  
504  	if (DISPLAY_VER(dev_priv) >= 9 &&
505  	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
506  		bool y_req = intel_dp->psr_dpcd[1] &
507  			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
508  		bool alpm = intel_dp_get_alpm_status(intel_dp);
509  
510  		/*
511  		 * All panels that supports PSR version 03h (PSR2 +
512  		 * Y-coordinate) can handle Y-coordinates in VSC but we are
513  		 * only sure that it is going to be used when required by the
514  		 * panel. This way panel is capable to do selective update
515  		 * without a aux frame sync.
516  		 *
517  		 * To support PSR version 02h and PSR version 03h without
518  		 * Y-coordinate requirement panels we would need to enable
519  		 * GTC first.
520  		 */
521  		intel_dp->psr.sink_psr2_support = y_req && alpm;
522  		drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
523  			    intel_dp->psr.sink_psr2_support ? "" : "not ");
524  
525  		if (intel_dp->psr.sink_psr2_support) {
526  			intel_dp->psr.colorimetry_support =
527  				intel_dp_get_colorimetry_status(intel_dp);
528  			intel_dp_get_su_granularity(intel_dp);
529  		}
530  	}
531  }
532  
hsw_psr_setup_aux(struct intel_dp * intel_dp)533  static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
534  {
535  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
536  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
537  	u32 aux_clock_divider, aux_ctl;
538  	/* write DP_SET_POWER=D0 */
539  	static const u8 aux_msg[] = {
540  		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
541  		[1] = (DP_SET_POWER >> 8) & 0xff,
542  		[2] = DP_SET_POWER & 0xff,
543  		[3] = 1 - 1,
544  		[4] = DP_SET_POWER_D0,
545  	};
546  	int i;
547  
548  	BUILD_BUG_ON(sizeof(aux_msg) > 20);
549  	for (i = 0; i < sizeof(aux_msg); i += 4)
550  		intel_de_write(dev_priv,
551  			       psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
552  			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
553  
554  	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
555  
556  	/* Start with bits set for DDI_AUX_CTL register */
557  	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
558  					     aux_clock_divider);
559  
560  	/* Select only valid bits for SRD_AUX_CTL */
561  	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
562  		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
563  		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
564  		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
565  
566  	intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
567  		       aux_ctl);
568  }
569  
intel_psr_enable_sink(struct intel_dp * intel_dp)570  static void intel_psr_enable_sink(struct intel_dp *intel_dp)
571  {
572  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
573  	u8 dpcd_val = DP_PSR_ENABLE;
574  
575  	/* Enable ALPM at sink for psr2 */
576  	if (intel_dp->psr.psr2_enabled) {
577  		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
578  				   DP_ALPM_ENABLE |
579  				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
580  
581  		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
582  	} else {
583  		if (intel_dp->psr.link_standby)
584  			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
585  
586  		if (DISPLAY_VER(dev_priv) >= 8)
587  			dpcd_val |= DP_PSR_CRC_VERIFICATION;
588  	}
589  
590  	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
591  		dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
592  
593  	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
594  
595  	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
596  }
597  
intel_psr1_get_tp_time(struct intel_dp * intel_dp)598  static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
599  {
600  	struct intel_connector *connector = intel_dp->attached_connector;
601  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
602  	u32 val = 0;
603  
604  	if (DISPLAY_VER(dev_priv) >= 11)
605  		val |= EDP_PSR_TP4_TIME_0us;
606  
607  	if (dev_priv->params.psr_safest_params) {
608  		val |= EDP_PSR_TP1_TIME_2500us;
609  		val |= EDP_PSR_TP2_TP3_TIME_2500us;
610  		goto check_tp3_sel;
611  	}
612  
613  	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
614  		val |= EDP_PSR_TP1_TIME_0us;
615  	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
616  		val |= EDP_PSR_TP1_TIME_100us;
617  	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
618  		val |= EDP_PSR_TP1_TIME_500us;
619  	else
620  		val |= EDP_PSR_TP1_TIME_2500us;
621  
622  	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
623  		val |= EDP_PSR_TP2_TP3_TIME_0us;
624  	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
625  		val |= EDP_PSR_TP2_TP3_TIME_100us;
626  	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
627  		val |= EDP_PSR_TP2_TP3_TIME_500us;
628  	else
629  		val |= EDP_PSR_TP2_TP3_TIME_2500us;
630  
631  	/*
632  	 * WA 0479: hsw,bdw
633  	 * "Do not skip both TP1 and TP2/TP3"
634  	 */
635  	if (DISPLAY_VER(dev_priv) < 9 &&
636  	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
637  	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
638  		val |= EDP_PSR_TP2_TP3_TIME_100us;
639  
640  check_tp3_sel:
641  	if (intel_dp_source_supports_tps3(dev_priv) &&
642  	    drm_dp_tps3_supported(intel_dp->dpcd))
643  		val |= EDP_PSR_TP_TP1_TP3;
644  	else
645  		val |= EDP_PSR_TP_TP1_TP2;
646  
647  	return val;
648  }
649  
psr_compute_idle_frames(struct intel_dp * intel_dp)650  static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
651  {
652  	struct intel_connector *connector = intel_dp->attached_connector;
653  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
654  	int idle_frames;
655  
656  	/* Let's use 6 as the minimum to cover all known cases including the
657  	 * off-by-one issue that HW has in some cases.
658  	 */
659  	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
660  	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
661  
662  	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
663  		idle_frames = 0xf;
664  
665  	return idle_frames;
666  }
667  
hsw_activate_psr1(struct intel_dp * intel_dp)668  static void hsw_activate_psr1(struct intel_dp *intel_dp)
669  {
670  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
671  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
672  	u32 max_sleep_time = 0x1f;
673  	u32 val = EDP_PSR_ENABLE;
674  
675  	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
676  
677  	if (DISPLAY_VER(dev_priv) < 20)
678  		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
679  
680  	if (IS_HASWELL(dev_priv))
681  		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
682  
683  	if (intel_dp->psr.link_standby)
684  		val |= EDP_PSR_LINK_STANDBY;
685  
686  	val |= intel_psr1_get_tp_time(intel_dp);
687  
688  	if (DISPLAY_VER(dev_priv) >= 8)
689  		val |= EDP_PSR_CRC_ENABLE;
690  
691  	intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
692  		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
693  }
694  
intel_psr2_get_tp_time(struct intel_dp * intel_dp)695  static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
696  {
697  	struct intel_connector *connector = intel_dp->attached_connector;
698  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
699  	u32 val = 0;
700  
701  	if (dev_priv->params.psr_safest_params)
702  		return EDP_PSR2_TP2_TIME_2500us;
703  
704  	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
705  	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
706  		val |= EDP_PSR2_TP2_TIME_50us;
707  	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
708  		val |= EDP_PSR2_TP2_TIME_100us;
709  	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
710  		val |= EDP_PSR2_TP2_TIME_500us;
711  	else
712  		val |= EDP_PSR2_TP2_TIME_2500us;
713  
714  	return val;
715  }
716  
psr2_block_count_lines(struct intel_dp * intel_dp)717  static int psr2_block_count_lines(struct intel_dp *intel_dp)
718  {
719  	return intel_dp->psr.io_wake_lines < 9 &&
720  		intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
721  }
722  
psr2_block_count(struct intel_dp * intel_dp)723  static int psr2_block_count(struct intel_dp *intel_dp)
724  {
725  	return psr2_block_count_lines(intel_dp) / 4;
726  }
727  
hsw_activate_psr2(struct intel_dp * intel_dp)728  static void hsw_activate_psr2(struct intel_dp *intel_dp)
729  {
730  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
731  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
732  	u32 val = EDP_PSR2_ENABLE;
733  
734  	val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
735  
736  	if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
737  		val |= EDP_SU_TRACK_ENABLE;
738  
739  	if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
740  		val |= EDP_Y_COORDINATE_ENABLE;
741  
742  	val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
743  	val |= intel_psr2_get_tp_time(intel_dp);
744  
745  	if (DISPLAY_VER(dev_priv) >= 12) {
746  		if (psr2_block_count(intel_dp) > 2)
747  			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
748  		else
749  			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
750  	}
751  
752  	/* Wa_22012278275:adl-p */
753  	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
754  		static const u8 map[] = {
755  			2, /* 5 lines */
756  			1, /* 6 lines */
757  			0, /* 7 lines */
758  			3, /* 8 lines */
759  			6, /* 9 lines */
760  			5, /* 10 lines */
761  			4, /* 11 lines */
762  			7, /* 12 lines */
763  		};
764  		/*
765  		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
766  		 * comments bellow for more information
767  		 */
768  		int tmp;
769  
770  		tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
771  		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
772  
773  		tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
774  		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
775  	} else if (DISPLAY_VER(dev_priv) >= 12) {
776  		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
777  		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
778  	} else if (DISPLAY_VER(dev_priv) >= 9) {
779  		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
780  		val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
781  	}
782  
783  	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
784  		val |= EDP_PSR2_SU_SDP_SCANLINE;
785  
786  	if (intel_dp->psr.psr2_sel_fetch_enabled) {
787  		u32 tmp;
788  
789  		tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
790  		drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
791  	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
792  		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
793  	}
794  
795  	/*
796  	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
797  	 * recommending keep this bit unset while PSR2 is enabled.
798  	 */
799  	intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0);
800  
801  	intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
802  }
803  
804  static bool
transcoder_has_psr2(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)805  transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
806  {
807  	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
808  		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
809  	else if (DISPLAY_VER(dev_priv) >= 12)
810  		return cpu_transcoder == TRANSCODER_A;
811  	else if (DISPLAY_VER(dev_priv) >= 9)
812  		return cpu_transcoder == TRANSCODER_EDP;
813  	else
814  		return false;
815  }
816  
intel_get_frame_time_us(const struct intel_crtc_state * cstate)817  static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
818  {
819  	if (!cstate || !cstate->hw.active)
820  		return 0;
821  
822  	return DIV_ROUND_UP(1000 * 1000,
823  			    drm_mode_vrefresh(&cstate->hw.adjusted_mode));
824  }
825  
psr2_program_idle_frames(struct intel_dp * intel_dp,u32 idle_frames)826  static void psr2_program_idle_frames(struct intel_dp *intel_dp,
827  				     u32 idle_frames)
828  {
829  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
830  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
831  
832  	intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
833  		     EDP_PSR2_IDLE_FRAMES_MASK,
834  		     EDP_PSR2_IDLE_FRAMES(idle_frames));
835  }
836  
tgl_psr2_enable_dc3co(struct intel_dp * intel_dp)837  static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
838  {
839  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
840  
841  	psr2_program_idle_frames(intel_dp, 0);
842  	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
843  }
844  
tgl_psr2_disable_dc3co(struct intel_dp * intel_dp)845  static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
846  {
847  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
848  
849  	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
850  	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
851  }
852  
tgl_dc3co_disable_work(struct work_struct * work)853  static void tgl_dc3co_disable_work(struct work_struct *work)
854  {
855  	struct intel_dp *intel_dp =
856  		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
857  
858  	mutex_lock(&intel_dp->psr.lock);
859  	/* If delayed work is pending, it is not idle */
860  	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
861  		goto unlock;
862  
863  	tgl_psr2_disable_dc3co(intel_dp);
864  unlock:
865  	mutex_unlock(&intel_dp->psr.lock);
866  }
867  
tgl_disallow_dc3co_on_psr2_exit(struct intel_dp * intel_dp)868  static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
869  {
870  	if (!intel_dp->psr.dc3co_exitline)
871  		return;
872  
873  	cancel_delayed_work(&intel_dp->psr.dc3co_work);
874  	/* Before PSR2 exit disallow dc3co*/
875  	tgl_psr2_disable_dc3co(intel_dp);
876  }
877  
878  static bool
dc3co_is_pipe_port_compatible(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)879  dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
880  			      struct intel_crtc_state *crtc_state)
881  {
882  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
883  	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
884  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
885  	enum port port = dig_port->base.port;
886  
887  	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
888  		return pipe <= PIPE_B && port <= PORT_B;
889  	else
890  		return pipe == PIPE_A && port == PORT_A;
891  }
892  
893  static void
tgl_dc3co_exitline_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)894  tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
895  				  struct intel_crtc_state *crtc_state)
896  {
897  	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
898  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
899  	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
900  	u32 exit_scanlines;
901  
902  	/*
903  	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
904  	 * disable DC3CO until the changed dc3co activating/deactivating sequence
905  	 * is applied. B.Specs:49196
906  	 */
907  	return;
908  
909  	/*
910  	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
911  	 * TODO: when the issue is addressed, this restriction should be removed.
912  	 */
913  	if (crtc_state->enable_psr2_sel_fetch)
914  		return;
915  
916  	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
917  		return;
918  
919  	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
920  		return;
921  
922  	/* Wa_16011303918:adl-p */
923  	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
924  		return;
925  
926  	/*
927  	 * DC3CO Exit time 200us B.Spec 49196
928  	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
929  	 */
930  	exit_scanlines =
931  		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
932  
933  	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
934  		return;
935  
936  	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
937  }
938  
intel_psr2_sel_fetch_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)939  static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
940  					      struct intel_crtc_state *crtc_state)
941  {
942  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
943  
944  	if (!dev_priv->params.enable_psr2_sel_fetch &&
945  	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
946  		drm_dbg_kms(&dev_priv->drm,
947  			    "PSR2 sel fetch not enabled, disabled by parameter\n");
948  		return false;
949  	}
950  
951  	if (crtc_state->uapi.async_flip) {
952  		drm_dbg_kms(&dev_priv->drm,
953  			    "PSR2 sel fetch not enabled, async flip enabled\n");
954  		return false;
955  	}
956  
957  	return crtc_state->enable_psr2_sel_fetch = true;
958  }
959  
psr2_granularity_check(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)960  static bool psr2_granularity_check(struct intel_dp *intel_dp,
961  				   struct intel_crtc_state *crtc_state)
962  {
963  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
964  	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
965  	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
966  	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
967  	u16 y_granularity = 0;
968  
969  	/* PSR2 HW only send full lines so we only need to validate the width */
970  	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
971  		return false;
972  
973  	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
974  		return false;
975  
976  	/* HW tracking is only aligned to 4 lines */
977  	if (!crtc_state->enable_psr2_sel_fetch)
978  		return intel_dp->psr.su_y_granularity == 4;
979  
980  	/*
981  	 * adl_p and mtl platforms have 1 line granularity.
982  	 * For other platforms with SW tracking we can adjust the y coordinates
983  	 * to match sink requirement if multiple of 4.
984  	 */
985  	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
986  		y_granularity = intel_dp->psr.su_y_granularity;
987  	else if (intel_dp->psr.su_y_granularity <= 2)
988  		y_granularity = 4;
989  	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
990  		y_granularity = intel_dp->psr.su_y_granularity;
991  
992  	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
993  		return false;
994  
995  	if (crtc_state->dsc.compression_enable &&
996  	    vdsc_cfg->slice_height % y_granularity)
997  		return false;
998  
999  	crtc_state->su_y_granularity = y_granularity;
1000  	return true;
1001  }
1002  
_compute_psr2_sdp_prior_scanline_indication(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1003  static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1004  							struct intel_crtc_state *crtc_state)
1005  {
1006  	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1007  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1008  	u32 hblank_total, hblank_ns, req_ns;
1009  
1010  	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1011  	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1012  
1013  	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1014  	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1015  
1016  	if ((hblank_ns - req_ns) > 100)
1017  		return true;
1018  
1019  	/* Not supported <13 / Wa_22012279113:adl-p */
1020  	if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1021  		return false;
1022  
1023  	crtc_state->req_psr2_sdp_prior_scanline = true;
1024  	return true;
1025  }
1026  
_compute_psr2_wake_times(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1027  static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
1028  				     struct intel_crtc_state *crtc_state)
1029  {
1030  	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1031  	int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1032  	u8 max_wake_lines;
1033  
1034  	if (DISPLAY_VER(i915) >= 12) {
1035  		io_wake_time = 42;
1036  		/*
1037  		 * According to Bspec it's 42us, but based on testing
1038  		 * it is not enough -> use 45 us.
1039  		 */
1040  		fast_wake_time = 45;
1041  		max_wake_lines = 12;
1042  	} else {
1043  		io_wake_time = 50;
1044  		fast_wake_time = 32;
1045  		max_wake_lines = 8;
1046  	}
1047  
1048  	io_wake_lines = intel_usecs_to_scanlines(
1049  		&crtc_state->hw.adjusted_mode, io_wake_time);
1050  	fast_wake_lines = intel_usecs_to_scanlines(
1051  		&crtc_state->hw.adjusted_mode, fast_wake_time);
1052  
1053  	if (io_wake_lines > max_wake_lines ||
1054  	    fast_wake_lines > max_wake_lines)
1055  		return false;
1056  
1057  	if (i915->params.psr_safest_params)
1058  		io_wake_lines = fast_wake_lines = max_wake_lines;
1059  
1060  	/* According to Bspec lower limit should be set as 7 lines. */
1061  	intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
1062  	intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
1063  
1064  	return true;
1065  }
1066  
intel_psr2_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1067  static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1068  				    struct intel_crtc_state *crtc_state)
1069  {
1070  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1071  	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1072  	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1073  	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1074  
1075  	if (!intel_dp->psr.sink_psr2_support)
1076  		return false;
1077  
1078  	/* JSL and EHL only supports eDP 1.3 */
1079  	if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1080  		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1081  		return false;
1082  	}
1083  
1084  	/* Wa_16011181250 */
1085  	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1086  	    IS_DG2(dev_priv)) {
1087  		drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1088  		return false;
1089  	}
1090  
1091  	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1092  		drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1093  		return false;
1094  	}
1095  
1096  	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1097  		drm_dbg_kms(&dev_priv->drm,
1098  			    "PSR2 not supported in transcoder %s\n",
1099  			    transcoder_name(crtc_state->cpu_transcoder));
1100  		return false;
1101  	}
1102  
1103  	if (!psr2_global_enabled(intel_dp)) {
1104  		drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1105  		return false;
1106  	}
1107  
1108  	/*
1109  	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1110  	 * resolution requires DSC to be enabled, priority is given to DSC
1111  	 * over PSR2.
1112  	 */
1113  	if (crtc_state->dsc.compression_enable &&
1114  	    (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
1115  		drm_dbg_kms(&dev_priv->drm,
1116  			    "PSR2 cannot be enabled since DSC is enabled\n");
1117  		return false;
1118  	}
1119  
1120  	if (crtc_state->crc_enabled) {
1121  		drm_dbg_kms(&dev_priv->drm,
1122  			    "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1123  		return false;
1124  	}
1125  
1126  	if (DISPLAY_VER(dev_priv) >= 12) {
1127  		psr_max_h = 5120;
1128  		psr_max_v = 3200;
1129  		max_bpp = 30;
1130  	} else if (DISPLAY_VER(dev_priv) >= 10) {
1131  		psr_max_h = 4096;
1132  		psr_max_v = 2304;
1133  		max_bpp = 24;
1134  	} else if (DISPLAY_VER(dev_priv) == 9) {
1135  		psr_max_h = 3640;
1136  		psr_max_v = 2304;
1137  		max_bpp = 24;
1138  	}
1139  
1140  	if (crtc_state->pipe_bpp > max_bpp) {
1141  		drm_dbg_kms(&dev_priv->drm,
1142  			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1143  			    crtc_state->pipe_bpp, max_bpp);
1144  		return false;
1145  	}
1146  
1147  	/* Wa_16011303918:adl-p */
1148  	if (crtc_state->vrr.enable &&
1149  	    IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1150  		drm_dbg_kms(&dev_priv->drm,
1151  			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1152  		return false;
1153  	}
1154  
1155  	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1156  		drm_dbg_kms(&dev_priv->drm,
1157  			    "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1158  		return false;
1159  	}
1160  
1161  	if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
1162  		drm_dbg_kms(&dev_priv->drm,
1163  			    "PSR2 not enabled, Unable to use long enough wake times\n");
1164  		return false;
1165  	}
1166  
1167  	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1168  	if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1169  	    crtc_state->hw.adjusted_mode.crtc_vblank_start <
1170  	    psr2_block_count_lines(intel_dp)) {
1171  		drm_dbg_kms(&dev_priv->drm,
1172  			    "PSR2 not enabled, too short vblank time\n");
1173  		return false;
1174  	}
1175  
1176  	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1177  		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1178  		    !HAS_PSR_HW_TRACKING(dev_priv)) {
1179  			drm_dbg_kms(&dev_priv->drm,
1180  				    "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1181  			return false;
1182  		}
1183  	}
1184  
1185  	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1186  		drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1187  		goto unsupported;
1188  	}
1189  
1190  	if (!crtc_state->enable_psr2_sel_fetch &&
1191  	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1192  		drm_dbg_kms(&dev_priv->drm,
1193  			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1194  			    crtc_hdisplay, crtc_vdisplay,
1195  			    psr_max_h, psr_max_v);
1196  		goto unsupported;
1197  	}
1198  
1199  	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1200  	return true;
1201  
1202  unsupported:
1203  	crtc_state->enable_psr2_sel_fetch = false;
1204  	return false;
1205  }
1206  
intel_psr_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)1207  void intel_psr_compute_config(struct intel_dp *intel_dp,
1208  			      struct intel_crtc_state *crtc_state,
1209  			      struct drm_connector_state *conn_state)
1210  {
1211  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1212  	const struct drm_display_mode *adjusted_mode =
1213  		&crtc_state->hw.adjusted_mode;
1214  	int psr_setup_time;
1215  
1216  	/*
1217  	 * Current PSR panels don't work reliably with VRR enabled
1218  	 * So if VRR is enabled, do not enable PSR.
1219  	 */
1220  	if (crtc_state->vrr.enable)
1221  		return;
1222  
1223  	if (!CAN_PSR(intel_dp))
1224  		return;
1225  
1226  	if (!psr_global_enabled(intel_dp)) {
1227  		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1228  		return;
1229  	}
1230  
1231  	if (intel_dp->psr.sink_not_reliable) {
1232  		drm_dbg_kms(&dev_priv->drm,
1233  			    "PSR sink implementation is not reliable\n");
1234  		return;
1235  	}
1236  
1237  	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1238  		drm_dbg_kms(&dev_priv->drm,
1239  			    "PSR condition failed: Interlaced mode enabled\n");
1240  		return;
1241  	}
1242  
1243  	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1244  	if (psr_setup_time < 0) {
1245  		drm_dbg_kms(&dev_priv->drm,
1246  			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1247  			    intel_dp->psr_dpcd[1]);
1248  		return;
1249  	}
1250  
1251  	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1252  	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1253  		drm_dbg_kms(&dev_priv->drm,
1254  			    "PSR condition failed: PSR setup time (%d us) too long\n",
1255  			    psr_setup_time);
1256  		return;
1257  	}
1258  
1259  	crtc_state->has_psr = true;
1260  	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1261  
1262  	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1263  	intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1264  				     &crtc_state->psr_vsc);
1265  }
1266  
intel_psr_get_config(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)1267  void intel_psr_get_config(struct intel_encoder *encoder,
1268  			  struct intel_crtc_state *pipe_config)
1269  {
1270  	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1271  	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1272  	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1273  	struct intel_dp *intel_dp;
1274  	u32 val;
1275  
1276  	if (!dig_port)
1277  		return;
1278  
1279  	intel_dp = &dig_port->dp;
1280  	if (!CAN_PSR(intel_dp))
1281  		return;
1282  
1283  	mutex_lock(&intel_dp->psr.lock);
1284  	if (!intel_dp->psr.enabled)
1285  		goto unlock;
1286  
1287  	/*
1288  	 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1289  	 * enabled/disabled because of frontbuffer tracking and others.
1290  	 */
1291  	pipe_config->has_psr = true;
1292  	pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1293  	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1294  
1295  	if (!intel_dp->psr.psr2_enabled)
1296  		goto unlock;
1297  
1298  	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1299  		val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1300  		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1301  			pipe_config->enable_psr2_sel_fetch = true;
1302  	}
1303  
1304  	if (DISPLAY_VER(dev_priv) >= 12) {
1305  		val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1306  		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1307  	}
1308  unlock:
1309  	mutex_unlock(&intel_dp->psr.lock);
1310  }
1311  
intel_psr_activate(struct intel_dp * intel_dp)1312  static void intel_psr_activate(struct intel_dp *intel_dp)
1313  {
1314  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1315  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1316  
1317  	drm_WARN_ON(&dev_priv->drm,
1318  		    transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1319  		    intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1320  
1321  	drm_WARN_ON(&dev_priv->drm,
1322  		    intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1323  
1324  	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1325  
1326  	lockdep_assert_held(&intel_dp->psr.lock);
1327  
1328  	/* psr1 and psr2 are mutually exclusive.*/
1329  	if (intel_dp->psr.psr2_enabled)
1330  		hsw_activate_psr2(intel_dp);
1331  	else
1332  		hsw_activate_psr1(intel_dp);
1333  
1334  	intel_dp->psr.active = true;
1335  }
1336  
wa_16013835468_bit_get(struct intel_dp * intel_dp)1337  static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1338  {
1339  	switch (intel_dp->psr.pipe) {
1340  	case PIPE_A:
1341  		return LATENCY_REPORTING_REMOVED_PIPE_A;
1342  	case PIPE_B:
1343  		return LATENCY_REPORTING_REMOVED_PIPE_B;
1344  	case PIPE_C:
1345  		return LATENCY_REPORTING_REMOVED_PIPE_C;
1346  	case PIPE_D:
1347  		return LATENCY_REPORTING_REMOVED_PIPE_D;
1348  	default:
1349  		MISSING_CASE(intel_dp->psr.pipe);
1350  		return 0;
1351  	}
1352  }
1353  
1354  /*
1355   * Wa_16013835468
1356   * Wa_14015648006
1357   */
wm_optimization_wa(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1358  static void wm_optimization_wa(struct intel_dp *intel_dp,
1359  			       const struct intel_crtc_state *crtc_state)
1360  {
1361  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1362  	bool set_wa_bit = false;
1363  
1364  	/* Wa_14015648006 */
1365  	if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1366  	    IS_DISPLAY_VER(dev_priv, 11, 13))
1367  		set_wa_bit |= crtc_state->wm_level_disabled;
1368  
1369  	/* Wa_16013835468 */
1370  	if (DISPLAY_VER(dev_priv) == 12)
1371  		set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1372  			crtc_state->hw.adjusted_mode.crtc_vdisplay;
1373  
1374  	if (set_wa_bit)
1375  		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1376  			     0, wa_16013835468_bit_get(intel_dp));
1377  	else
1378  		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1379  			     wa_16013835468_bit_get(intel_dp), 0);
1380  }
1381  
intel_psr_enable_source(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1382  static void intel_psr_enable_source(struct intel_dp *intel_dp,
1383  				    const struct intel_crtc_state *crtc_state)
1384  {
1385  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1386  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1387  	u32 mask;
1388  
1389  	/*
1390  	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1391  	 * SKL+ use hardcoded values PSR AUX transactions
1392  	 */
1393  	if (DISPLAY_VER(dev_priv) < 9)
1394  		hsw_psr_setup_aux(intel_dp);
1395  
1396  	/*
1397  	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1398  	 * mask LPSP to avoid dependency on other drivers that might block
1399  	 * runtime_pm besides preventing  other hw tracking issues now we
1400  	 * can rely on frontbuffer tracking.
1401  	 */
1402  	mask = EDP_PSR_DEBUG_MASK_MEMUP |
1403  	       EDP_PSR_DEBUG_MASK_HPD;
1404  
1405  	/*
1406  	 * For some unknown reason on HSW non-ULT (or at least on
1407  	 * Dell Latitude E6540) external displays start to flicker
1408  	 * when PSR is enabled on the eDP. SR/PC6 residency is much
1409  	 * higher than should be possible with an external display.
1410  	 * As a workaround leave LPSP unmasked to prevent PSR entry
1411  	 * when external displays are active.
1412  	 */
1413  	if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
1414  		mask |= EDP_PSR_DEBUG_MASK_LPSP;
1415  
1416  	if (DISPLAY_VER(dev_priv) < 20)
1417  		mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1418  
1419  	/*
1420  	 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1421  	 * registers in order to keep the CURSURFLIVE tricks working :(
1422  	 */
1423  	if (IS_DISPLAY_VER(dev_priv, 9, 10))
1424  		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1425  
1426  	/* allow PSR with sprite enabled */
1427  	if (IS_HASWELL(dev_priv))
1428  		mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1429  
1430  	intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1431  
1432  	psr_irq_control(intel_dp);
1433  
1434  	/*
1435  	 * TODO: if future platforms supports DC3CO in more than one
1436  	 * transcoder, EXITLINE will need to be unset when disabling PSR
1437  	 */
1438  	if (intel_dp->psr.dc3co_exitline)
1439  		intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1440  			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1441  
1442  	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1443  		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1444  			     intel_dp->psr.psr2_sel_fetch_enabled ?
1445  			     IGNORE_PSR2_HW_TRACKING : 0);
1446  
1447  	/*
1448  	 * Wa_16013835468
1449  	 * Wa_14015648006
1450  	 */
1451  	wm_optimization_wa(intel_dp, crtc_state);
1452  
1453  	if (intel_dp->psr.psr2_enabled) {
1454  		if (DISPLAY_VER(dev_priv) == 9)
1455  			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1456  				     PSR2_VSC_ENABLE_PROG_HEADER |
1457  				     PSR2_ADD_VERTICAL_LINE_COUNT);
1458  
1459  		/*
1460  		 * Wa_16014451276:adlp,mtl[a0,b0]
1461  		 * All supported adlp panels have 1-based X granularity, this may
1462  		 * cause issues if non-supported panels are used.
1463  		 */
1464  		if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1465  			intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
1466  				     ADLP_1_BASED_X_GRANULARITY);
1467  		else if (IS_ALDERLAKE_P(dev_priv))
1468  			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1469  				     ADLP_1_BASED_X_GRANULARITY);
1470  
1471  		/* Wa_16012604467:adlp,mtl[a0,b0] */
1472  		if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1473  			intel_de_rmw(dev_priv,
1474  				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1475  				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1476  		else if (IS_ALDERLAKE_P(dev_priv))
1477  			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1478  				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1479  	}
1480  }
1481  
psr_interrupt_error_check(struct intel_dp * intel_dp)1482  static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1483  {
1484  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1485  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1486  	u32 val;
1487  
1488  	/*
1489  	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1490  	 * will still keep the error set even after the reset done in the
1491  	 * irq_preinstall and irq_uninstall hooks.
1492  	 * And enabling in this situation cause the screen to freeze in the
1493  	 * first time that PSR HW tries to activate so lets keep PSR disabled
1494  	 * to avoid any rendering problems.
1495  	 */
1496  	val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1497  	val &= psr_irq_psr_error_bit_get(intel_dp);
1498  	if (val) {
1499  		intel_dp->psr.sink_not_reliable = true;
1500  		drm_dbg_kms(&dev_priv->drm,
1501  			    "PSR interruption error set, not enabling PSR\n");
1502  		return false;
1503  	}
1504  
1505  	return true;
1506  }
1507  
intel_psr_enable_locked(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1508  static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1509  				    const struct intel_crtc_state *crtc_state)
1510  {
1511  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1512  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1513  	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1514  	struct intel_encoder *encoder = &dig_port->base;
1515  	u32 val;
1516  
1517  	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1518  
1519  	intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1520  	intel_dp->psr.busy_frontbuffer_bits = 0;
1521  	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1522  	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1523  	/* DC5/DC6 requires at least 6 idle frames */
1524  	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1525  	intel_dp->psr.dc3co_exit_delay = val;
1526  	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1527  	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1528  	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1529  	intel_dp->psr.req_psr2_sdp_prior_scanline =
1530  		crtc_state->req_psr2_sdp_prior_scanline;
1531  
1532  	if (!psr_interrupt_error_check(intel_dp))
1533  		return;
1534  
1535  	drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1536  		    intel_dp->psr.psr2_enabled ? "2" : "1");
1537  	intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1538  	intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1539  	intel_psr_enable_sink(intel_dp);
1540  	intel_psr_enable_source(intel_dp, crtc_state);
1541  	intel_dp->psr.enabled = true;
1542  	intel_dp->psr.paused = false;
1543  
1544  	intel_psr_activate(intel_dp);
1545  }
1546  
intel_psr_exit(struct intel_dp * intel_dp)1547  static void intel_psr_exit(struct intel_dp *intel_dp)
1548  {
1549  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1550  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1551  	u32 val;
1552  
1553  	if (!intel_dp->psr.active) {
1554  		if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1555  			val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1556  			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1557  		}
1558  
1559  		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1560  		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1561  
1562  		return;
1563  	}
1564  
1565  	if (intel_dp->psr.psr2_enabled) {
1566  		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1567  
1568  		val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1569  				   EDP_PSR2_ENABLE, 0);
1570  
1571  		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1572  	} else {
1573  		val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1574  				   EDP_PSR_ENABLE, 0);
1575  
1576  		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1577  	}
1578  	intel_dp->psr.active = false;
1579  }
1580  
intel_psr_wait_exit_locked(struct intel_dp * intel_dp)1581  static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1582  {
1583  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1584  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1585  	i915_reg_t psr_status;
1586  	u32 psr_status_mask;
1587  
1588  	if (intel_dp->psr.psr2_enabled) {
1589  		psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1590  		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1591  	} else {
1592  		psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1593  		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1594  	}
1595  
1596  	/* Wait till PSR is idle */
1597  	if (intel_de_wait_for_clear(dev_priv, psr_status,
1598  				    psr_status_mask, 2000))
1599  		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1600  }
1601  
intel_psr_disable_locked(struct intel_dp * intel_dp)1602  static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1603  {
1604  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1605  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1606  	enum phy phy = intel_port_to_phy(dev_priv,
1607  					 dp_to_dig_port(intel_dp)->base.port);
1608  
1609  	lockdep_assert_held(&intel_dp->psr.lock);
1610  
1611  	if (!intel_dp->psr.enabled)
1612  		return;
1613  
1614  	drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1615  		    intel_dp->psr.psr2_enabled ? "2" : "1");
1616  
1617  	intel_psr_exit(intel_dp);
1618  	intel_psr_wait_exit_locked(intel_dp);
1619  
1620  	/*
1621  	 * Wa_16013835468
1622  	 * Wa_14015648006
1623  	 */
1624  	if (DISPLAY_VER(dev_priv) >= 11)
1625  		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1626  			     wa_16013835468_bit_get(intel_dp), 0);
1627  
1628  	if (intel_dp->psr.psr2_enabled) {
1629  		/* Wa_16012604467:adlp,mtl[a0,b0] */
1630  		if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1631  			intel_de_rmw(dev_priv,
1632  				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1633  				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1634  		else if (IS_ALDERLAKE_P(dev_priv))
1635  			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1636  				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1637  	}
1638  
1639  	intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1640  
1641  	/* Disable PSR on Sink */
1642  	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1643  
1644  	if (intel_dp->psr.psr2_enabled)
1645  		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1646  
1647  	intel_dp->psr.enabled = false;
1648  	intel_dp->psr.psr2_enabled = false;
1649  	intel_dp->psr.psr2_sel_fetch_enabled = false;
1650  	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1651  }
1652  
1653  /**
1654   * intel_psr_disable - Disable PSR
1655   * @intel_dp: Intel DP
1656   * @old_crtc_state: old CRTC state
1657   *
1658   * This function needs to be called before disabling pipe.
1659   */
intel_psr_disable(struct intel_dp * intel_dp,const struct intel_crtc_state * old_crtc_state)1660  void intel_psr_disable(struct intel_dp *intel_dp,
1661  		       const struct intel_crtc_state *old_crtc_state)
1662  {
1663  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1664  
1665  	if (!old_crtc_state->has_psr)
1666  		return;
1667  
1668  	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1669  		return;
1670  
1671  	mutex_lock(&intel_dp->psr.lock);
1672  
1673  	intel_psr_disable_locked(intel_dp);
1674  
1675  	mutex_unlock(&intel_dp->psr.lock);
1676  	cancel_work_sync(&intel_dp->psr.work);
1677  	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1678  }
1679  
1680  /**
1681   * intel_psr_pause - Pause PSR
1682   * @intel_dp: Intel DP
1683   *
1684   * This function need to be called after enabling psr.
1685   */
intel_psr_pause(struct intel_dp * intel_dp)1686  void intel_psr_pause(struct intel_dp *intel_dp)
1687  {
1688  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1689  	struct intel_psr *psr = &intel_dp->psr;
1690  
1691  	if (!CAN_PSR(intel_dp))
1692  		return;
1693  
1694  	mutex_lock(&psr->lock);
1695  
1696  	if (!psr->enabled) {
1697  		mutex_unlock(&psr->lock);
1698  		return;
1699  	}
1700  
1701  	/* If we ever hit this, we will need to add refcount to pause/resume */
1702  	drm_WARN_ON(&dev_priv->drm, psr->paused);
1703  
1704  	intel_psr_exit(intel_dp);
1705  	intel_psr_wait_exit_locked(intel_dp);
1706  	psr->paused = true;
1707  
1708  	mutex_unlock(&psr->lock);
1709  
1710  	cancel_work_sync(&psr->work);
1711  	cancel_delayed_work_sync(&psr->dc3co_work);
1712  }
1713  
1714  /**
1715   * intel_psr_resume - Resume PSR
1716   * @intel_dp: Intel DP
1717   *
1718   * This function need to be called after pausing psr.
1719   */
intel_psr_resume(struct intel_dp * intel_dp)1720  void intel_psr_resume(struct intel_dp *intel_dp)
1721  {
1722  	struct intel_psr *psr = &intel_dp->psr;
1723  
1724  	if (!CAN_PSR(intel_dp))
1725  		return;
1726  
1727  	mutex_lock(&psr->lock);
1728  
1729  	if (!psr->paused)
1730  		goto unlock;
1731  
1732  	psr->paused = false;
1733  	intel_psr_activate(intel_dp);
1734  
1735  unlock:
1736  	mutex_unlock(&psr->lock);
1737  }
1738  
man_trk_ctl_enable_bit_get(struct drm_i915_private * dev_priv)1739  static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1740  {
1741  	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1742  		PSR2_MAN_TRK_CTL_ENABLE;
1743  }
1744  
man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private * dev_priv)1745  static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1746  {
1747  	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1748  	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1749  	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1750  }
1751  
man_trk_ctl_partial_frame_bit_get(struct drm_i915_private * dev_priv)1752  static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1753  {
1754  	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1755  	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1756  	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1757  }
1758  
man_trk_ctl_continuos_full_frame(struct drm_i915_private * dev_priv)1759  static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1760  {
1761  	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1762  	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1763  	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1764  }
1765  
psr_force_hw_tracking_exit(struct intel_dp * intel_dp)1766  static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1767  {
1768  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1769  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1770  
1771  	if (intel_dp->psr.psr2_sel_fetch_enabled)
1772  		intel_de_write(dev_priv,
1773  			       PSR2_MAN_TRK_CTL(cpu_transcoder),
1774  			       man_trk_ctl_enable_bit_get(dev_priv) |
1775  			       man_trk_ctl_partial_frame_bit_get(dev_priv) |
1776  			       man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1777  			       man_trk_ctl_continuos_full_frame(dev_priv));
1778  
1779  	/*
1780  	 * Display WA #0884: skl+
1781  	 * This documented WA for bxt can be safely applied
1782  	 * broadly so we can force HW tracking to exit PSR
1783  	 * instead of disabling and re-enabling.
1784  	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1785  	 * but it makes more sense write to the current active
1786  	 * pipe.
1787  	 *
1788  	 * This workaround do not exist for platforms with display 10 or newer
1789  	 * but testing proved that it works for up display 13, for newer
1790  	 * than that testing will be needed.
1791  	 */
1792  	intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1793  }
1794  
intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane * plane,const struct intel_crtc_state * crtc_state)1795  void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
1796  					    const struct intel_crtc_state *crtc_state)
1797  {
1798  	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1799  	enum pipe pipe = plane->pipe;
1800  
1801  	if (!crtc_state->enable_psr2_sel_fetch)
1802  		return;
1803  
1804  	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
1805  }
1806  
intel_psr2_program_plane_sel_fetch_arm(struct intel_plane * plane,const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)1807  void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
1808  					    const struct intel_crtc_state *crtc_state,
1809  					    const struct intel_plane_state *plane_state)
1810  {
1811  	struct drm_i915_private *i915 = to_i915(plane->base.dev);
1812  	enum pipe pipe = plane->pipe;
1813  
1814  	if (!crtc_state->enable_psr2_sel_fetch)
1815  		return;
1816  
1817  	if (plane->id == PLANE_CURSOR)
1818  		intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1819  				  plane_state->ctl);
1820  	else
1821  		intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1822  				  PLANE_SEL_FETCH_CTL_ENABLE);
1823  }
1824  
intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane * plane,const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,int color_plane)1825  void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
1826  					      const struct intel_crtc_state *crtc_state,
1827  					      const struct intel_plane_state *plane_state,
1828  					      int color_plane)
1829  {
1830  	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1831  	enum pipe pipe = plane->pipe;
1832  	const struct drm_rect *clip;
1833  	u32 val;
1834  	int x, y;
1835  
1836  	if (!crtc_state->enable_psr2_sel_fetch)
1837  		return;
1838  
1839  	if (plane->id == PLANE_CURSOR)
1840  		return;
1841  
1842  	clip = &plane_state->psr2_sel_fetch_area;
1843  
1844  	val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1845  	val |= plane_state->uapi.dst.x1;
1846  	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1847  
1848  	x = plane_state->view.color_plane[color_plane].x;
1849  
1850  	/*
1851  	 * From Bspec: UV surface Start Y Position = half of Y plane Y
1852  	 * start position.
1853  	 */
1854  	if (!color_plane)
1855  		y = plane_state->view.color_plane[color_plane].y + clip->y1;
1856  	else
1857  		y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
1858  
1859  	val = y << 16 | x;
1860  
1861  	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1862  			  val);
1863  
1864  	/* Sizes are 0 based */
1865  	val = (drm_rect_height(clip) - 1) << 16;
1866  	val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1867  	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1868  }
1869  
intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state * crtc_state)1870  void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1871  {
1872  	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1873  	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1874  	struct intel_encoder *encoder;
1875  
1876  	if (!crtc_state->enable_psr2_sel_fetch)
1877  		return;
1878  
1879  	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1880  					     crtc_state->uapi.encoder_mask) {
1881  		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1882  
1883  		lockdep_assert_held(&intel_dp->psr.lock);
1884  		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1885  			return;
1886  		break;
1887  	}
1888  
1889  	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
1890  		       crtc_state->psr2_man_track_ctl);
1891  }
1892  
psr2_man_trk_ctl_calc(struct intel_crtc_state * crtc_state,struct drm_rect * clip,bool full_update)1893  static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1894  				  struct drm_rect *clip, bool full_update)
1895  {
1896  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1897  	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1898  	u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1899  
1900  	/* SF partial frame enable has to be set even on full update */
1901  	val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1902  
1903  	if (full_update) {
1904  		val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1905  		val |= man_trk_ctl_continuos_full_frame(dev_priv);
1906  		goto exit;
1907  	}
1908  
1909  	if (clip->y1 == -1)
1910  		goto exit;
1911  
1912  	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
1913  		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1914  		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1915  	} else {
1916  		drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1917  
1918  		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1919  		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1920  	}
1921  exit:
1922  	crtc_state->psr2_man_track_ctl = val;
1923  }
1924  
clip_area_update(struct drm_rect * overlap_damage_area,struct drm_rect * damage_area,struct drm_rect * pipe_src)1925  static void clip_area_update(struct drm_rect *overlap_damage_area,
1926  			     struct drm_rect *damage_area,
1927  			     struct drm_rect *pipe_src)
1928  {
1929  	if (!drm_rect_intersect(damage_area, pipe_src))
1930  		return;
1931  
1932  	if (overlap_damage_area->y1 == -1) {
1933  		overlap_damage_area->y1 = damage_area->y1;
1934  		overlap_damage_area->y2 = damage_area->y2;
1935  		return;
1936  	}
1937  
1938  	if (damage_area->y1 < overlap_damage_area->y1)
1939  		overlap_damage_area->y1 = damage_area->y1;
1940  
1941  	if (damage_area->y2 > overlap_damage_area->y2)
1942  		overlap_damage_area->y2 = damage_area->y2;
1943  }
1944  
intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state * crtc_state,struct drm_rect * pipe_clip)1945  static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1946  						struct drm_rect *pipe_clip)
1947  {
1948  	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1949  	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1950  	u16 y_alignment;
1951  
1952  	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
1953  	if (crtc_state->dsc.compression_enable &&
1954  	    (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
1955  		y_alignment = vdsc_cfg->slice_height;
1956  	else
1957  		y_alignment = crtc_state->su_y_granularity;
1958  
1959  	pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
1960  	if (pipe_clip->y2 % y_alignment)
1961  		pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
1962  }
1963  
1964  /*
1965   * TODO: Not clear how to handle planes with negative position,
1966   * also planes are not updated if they have a negative X
1967   * position so for now doing a full update in this cases
1968   *
1969   * Plane scaling and rotation is not supported by selective fetch and both
1970   * properties can change without a modeset, so need to be check at every
1971   * atomic commit.
1972   */
psr2_sel_fetch_plane_state_supported(const struct intel_plane_state * plane_state)1973  static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
1974  {
1975  	if (plane_state->uapi.dst.y1 < 0 ||
1976  	    plane_state->uapi.dst.x1 < 0 ||
1977  	    plane_state->scaler_id >= 0 ||
1978  	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
1979  		return false;
1980  
1981  	return true;
1982  }
1983  
1984  /*
1985   * Check for pipe properties that is not supported by selective fetch.
1986   *
1987   * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
1988   * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
1989   * enabled and going to the full update path.
1990   */
psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state * crtc_state)1991  static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
1992  {
1993  	if (crtc_state->scaler_state.scaler_id >= 0)
1994  		return false;
1995  
1996  	return true;
1997  }
1998  
intel_psr2_sel_fetch_update(struct intel_atomic_state * state,struct intel_crtc * crtc)1999  int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2000  				struct intel_crtc *crtc)
2001  {
2002  	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2003  	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2004  	struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
2005  	struct intel_plane_state *new_plane_state, *old_plane_state;
2006  	struct intel_plane *plane;
2007  	bool full_update = false;
2008  	int i, ret;
2009  
2010  	if (!crtc_state->enable_psr2_sel_fetch)
2011  		return 0;
2012  
2013  	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2014  		full_update = true;
2015  		goto skip_sel_fetch_set_loop;
2016  	}
2017  
2018  	/*
2019  	 * Calculate minimal selective fetch area of each plane and calculate
2020  	 * the pipe damaged area.
2021  	 * In the next loop the plane selective fetch area will actually be set
2022  	 * using whole pipe damaged area.
2023  	 */
2024  	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2025  					     new_plane_state, i) {
2026  		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2027  						      .x2 = INT_MAX };
2028  
2029  		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2030  			continue;
2031  
2032  		if (!new_plane_state->uapi.visible &&
2033  		    !old_plane_state->uapi.visible)
2034  			continue;
2035  
2036  		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2037  			full_update = true;
2038  			break;
2039  		}
2040  
2041  		/*
2042  		 * If visibility or plane moved, mark the whole plane area as
2043  		 * damaged as it needs to be complete redraw in the new and old
2044  		 * position.
2045  		 */
2046  		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2047  		    !drm_rect_equals(&new_plane_state->uapi.dst,
2048  				     &old_plane_state->uapi.dst)) {
2049  			if (old_plane_state->uapi.visible) {
2050  				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2051  				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2052  				clip_area_update(&pipe_clip, &damaged_area,
2053  						 &crtc_state->pipe_src);
2054  			}
2055  
2056  			if (new_plane_state->uapi.visible) {
2057  				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2058  				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2059  				clip_area_update(&pipe_clip, &damaged_area,
2060  						 &crtc_state->pipe_src);
2061  			}
2062  			continue;
2063  		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2064  			/* If alpha changed mark the whole plane area as damaged */
2065  			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2066  			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2067  			clip_area_update(&pipe_clip, &damaged_area,
2068  					 &crtc_state->pipe_src);
2069  			continue;
2070  		}
2071  
2072  		src = drm_plane_state_src(&new_plane_state->uapi);
2073  		drm_rect_fp_to_int(&src, &src);
2074  
2075  		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2076  						     &new_plane_state->uapi, &damaged_area))
2077  			continue;
2078  
2079  		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2080  		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2081  		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2082  		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2083  
2084  		clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
2085  	}
2086  
2087  	/*
2088  	 * TODO: For now we are just using full update in case
2089  	 * selective fetch area calculation fails. To optimize this we
2090  	 * should identify cases where this happens and fix the area
2091  	 * calculation for those.
2092  	 */
2093  	if (pipe_clip.y1 == -1) {
2094  		drm_info_once(&dev_priv->drm,
2095  			      "Selective fetch area calculation failed in pipe %c\n",
2096  			      pipe_name(crtc->pipe));
2097  		full_update = true;
2098  	}
2099  
2100  	if (full_update)
2101  		goto skip_sel_fetch_set_loop;
2102  
2103  	/* Wa_14014971492 */
2104  	if ((IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
2105  	     IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2106  	    crtc_state->splitter.enable)
2107  		pipe_clip.y1 = 0;
2108  
2109  	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2110  	if (ret)
2111  		return ret;
2112  
2113  	intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
2114  
2115  	/*
2116  	 * Now that we have the pipe damaged area check if it intersect with
2117  	 * every plane, if it does set the plane selective fetch area.
2118  	 */
2119  	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2120  					     new_plane_state, i) {
2121  		struct drm_rect *sel_fetch_area, inter;
2122  		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2123  
2124  		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2125  		    !new_plane_state->uapi.visible)
2126  			continue;
2127  
2128  		inter = pipe_clip;
2129  		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2130  			continue;
2131  
2132  		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2133  			full_update = true;
2134  			break;
2135  		}
2136  
2137  		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2138  		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2139  		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2140  		crtc_state->update_planes |= BIT(plane->id);
2141  
2142  		/*
2143  		 * Sel_fetch_area is calculated for UV plane. Use
2144  		 * same area for Y plane as well.
2145  		 */
2146  		if (linked) {
2147  			struct intel_plane_state *linked_new_plane_state;
2148  			struct drm_rect *linked_sel_fetch_area;
2149  
2150  			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2151  			if (IS_ERR(linked_new_plane_state))
2152  				return PTR_ERR(linked_new_plane_state);
2153  
2154  			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2155  			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2156  			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2157  			crtc_state->update_planes |= BIT(linked->id);
2158  		}
2159  	}
2160  
2161  skip_sel_fetch_set_loop:
2162  	psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
2163  	return 0;
2164  }
2165  
intel_psr_pre_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2166  void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2167  				struct intel_crtc *crtc)
2168  {
2169  	struct drm_i915_private *i915 = to_i915(state->base.dev);
2170  	const struct intel_crtc_state *old_crtc_state =
2171  		intel_atomic_get_old_crtc_state(state, crtc);
2172  	const struct intel_crtc_state *new_crtc_state =
2173  		intel_atomic_get_new_crtc_state(state, crtc);
2174  	struct intel_encoder *encoder;
2175  
2176  	if (!HAS_PSR(i915))
2177  		return;
2178  
2179  	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2180  					     old_crtc_state->uapi.encoder_mask) {
2181  		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2182  		struct intel_psr *psr = &intel_dp->psr;
2183  		bool needs_to_disable = false;
2184  
2185  		mutex_lock(&psr->lock);
2186  
2187  		/*
2188  		 * Reasons to disable:
2189  		 * - PSR disabled in new state
2190  		 * - All planes will go inactive
2191  		 * - Changing between PSR versions
2192  		 * - Display WA #1136: skl, bxt
2193  		 */
2194  		needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2195  		needs_to_disable |= !new_crtc_state->has_psr;
2196  		needs_to_disable |= !new_crtc_state->active_planes;
2197  		needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2198  		needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2199  			new_crtc_state->wm_level_disabled;
2200  
2201  		if (psr->enabled && needs_to_disable)
2202  			intel_psr_disable_locked(intel_dp);
2203  		else if (psr->enabled && new_crtc_state->wm_level_disabled)
2204  			/* Wa_14015648006 */
2205  			wm_optimization_wa(intel_dp, new_crtc_state);
2206  
2207  		mutex_unlock(&psr->lock);
2208  	}
2209  }
2210  
_intel_psr_post_plane_update(const struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)2211  static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
2212  					 const struct intel_crtc_state *crtc_state)
2213  {
2214  	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2215  	struct intel_encoder *encoder;
2216  
2217  	if (!crtc_state->has_psr)
2218  		return;
2219  
2220  	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2221  					     crtc_state->uapi.encoder_mask) {
2222  		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2223  		struct intel_psr *psr = &intel_dp->psr;
2224  		bool keep_disabled = false;
2225  
2226  		mutex_lock(&psr->lock);
2227  
2228  		drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2229  
2230  		keep_disabled |= psr->sink_not_reliable;
2231  		keep_disabled |= !crtc_state->active_planes;
2232  
2233  		/* Display WA #1136: skl, bxt */
2234  		keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2235  			crtc_state->wm_level_disabled;
2236  
2237  		if (!psr->enabled && !keep_disabled)
2238  			intel_psr_enable_locked(intel_dp, crtc_state);
2239  		else if (psr->enabled && !crtc_state->wm_level_disabled)
2240  			/* Wa_14015648006 */
2241  			wm_optimization_wa(intel_dp, crtc_state);
2242  
2243  		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2244  		if (crtc_state->crc_enabled && psr->enabled)
2245  			psr_force_hw_tracking_exit(intel_dp);
2246  
2247  		mutex_unlock(&psr->lock);
2248  	}
2249  }
2250  
intel_psr_post_plane_update(const struct intel_atomic_state * state)2251  void intel_psr_post_plane_update(const struct intel_atomic_state *state)
2252  {
2253  	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2254  	struct intel_crtc_state *crtc_state;
2255  	struct intel_crtc *crtc;
2256  	int i;
2257  
2258  	if (!HAS_PSR(dev_priv))
2259  		return;
2260  
2261  	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
2262  		_intel_psr_post_plane_update(state, crtc_state);
2263  }
2264  
_psr2_ready_for_pipe_update_locked(struct intel_dp * intel_dp)2265  static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2266  {
2267  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2268  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2269  
2270  	/*
2271  	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2272  	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2273  	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2274  	 */
2275  	return intel_de_wait_for_clear(dev_priv,
2276  				       EDP_PSR2_STATUS(cpu_transcoder),
2277  				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2278  }
2279  
_psr1_ready_for_pipe_update_locked(struct intel_dp * intel_dp)2280  static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2281  {
2282  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2283  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2284  
2285  	/*
2286  	 * From bspec: Panel Self Refresh (BDW+)
2287  	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2288  	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2289  	 * defensive enough to cover everything.
2290  	 */
2291  	return intel_de_wait_for_clear(dev_priv,
2292  				       psr_status_reg(dev_priv, cpu_transcoder),
2293  				       EDP_PSR_STATUS_STATE_MASK, 50);
2294  }
2295  
2296  /**
2297   * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2298   * @new_crtc_state: new CRTC state
2299   *
2300   * This function is expected to be called from pipe_update_start() where it is
2301   * not expected to race with PSR enable or disable.
2302   */
intel_psr_wait_for_idle_locked(const struct intel_crtc_state * new_crtc_state)2303  void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2304  {
2305  	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2306  	struct intel_encoder *encoder;
2307  
2308  	if (!new_crtc_state->has_psr)
2309  		return;
2310  
2311  	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2312  					     new_crtc_state->uapi.encoder_mask) {
2313  		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2314  		int ret;
2315  
2316  		lockdep_assert_held(&intel_dp->psr.lock);
2317  
2318  		if (!intel_dp->psr.enabled)
2319  			continue;
2320  
2321  		if (intel_dp->psr.psr2_enabled)
2322  			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2323  		else
2324  			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2325  
2326  		if (ret)
2327  			drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2328  	}
2329  }
2330  
__psr_wait_for_idle_locked(struct intel_dp * intel_dp)2331  static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2332  {
2333  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2334  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2335  	i915_reg_t reg;
2336  	u32 mask;
2337  	int err;
2338  
2339  	if (!intel_dp->psr.enabled)
2340  		return false;
2341  
2342  	if (intel_dp->psr.psr2_enabled) {
2343  		reg = EDP_PSR2_STATUS(cpu_transcoder);
2344  		mask = EDP_PSR2_STATUS_STATE_MASK;
2345  	} else {
2346  		reg = psr_status_reg(dev_priv, cpu_transcoder);
2347  		mask = EDP_PSR_STATUS_STATE_MASK;
2348  	}
2349  
2350  	mutex_unlock(&intel_dp->psr.lock);
2351  
2352  	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2353  	if (err)
2354  		drm_err(&dev_priv->drm,
2355  			"Timed out waiting for PSR Idle for re-enable\n");
2356  
2357  	/* After the unlocked wait, verify that PSR is still wanted! */
2358  	mutex_lock(&intel_dp->psr.lock);
2359  	return err == 0 && intel_dp->psr.enabled;
2360  }
2361  
intel_psr_fastset_force(struct drm_i915_private * dev_priv)2362  static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2363  {
2364  	struct drm_connector_list_iter conn_iter;
2365  	struct drm_modeset_acquire_ctx ctx;
2366  	struct drm_atomic_state *state;
2367  	struct drm_connector *conn;
2368  	int err = 0;
2369  
2370  	state = drm_atomic_state_alloc(&dev_priv->drm);
2371  	if (!state)
2372  		return -ENOMEM;
2373  
2374  	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2375  
2376  	state->acquire_ctx = &ctx;
2377  	to_intel_atomic_state(state)->internal = true;
2378  
2379  retry:
2380  	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2381  	drm_for_each_connector_iter(conn, &conn_iter) {
2382  		struct drm_connector_state *conn_state;
2383  		struct drm_crtc_state *crtc_state;
2384  
2385  		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2386  			continue;
2387  
2388  		conn_state = drm_atomic_get_connector_state(state, conn);
2389  		if (IS_ERR(conn_state)) {
2390  			err = PTR_ERR(conn_state);
2391  			break;
2392  		}
2393  
2394  		if (!conn_state->crtc)
2395  			continue;
2396  
2397  		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2398  		if (IS_ERR(crtc_state)) {
2399  			err = PTR_ERR(crtc_state);
2400  			break;
2401  		}
2402  
2403  		/* Mark mode as changed to trigger a pipe->update() */
2404  		crtc_state->mode_changed = true;
2405  	}
2406  	drm_connector_list_iter_end(&conn_iter);
2407  
2408  	if (err == 0)
2409  		err = drm_atomic_commit(state);
2410  
2411  	if (err == -EDEADLK) {
2412  		drm_atomic_state_clear(state);
2413  		err = drm_modeset_backoff(&ctx);
2414  		if (!err)
2415  			goto retry;
2416  	}
2417  
2418  	drm_modeset_drop_locks(&ctx);
2419  	drm_modeset_acquire_fini(&ctx);
2420  	drm_atomic_state_put(state);
2421  
2422  	return err;
2423  }
2424  
intel_psr_debug_set(struct intel_dp * intel_dp,u64 val)2425  int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2426  {
2427  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2428  	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2429  	u32 old_mode;
2430  	int ret;
2431  
2432  	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2433  	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2434  		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2435  		return -EINVAL;
2436  	}
2437  
2438  	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2439  	if (ret)
2440  		return ret;
2441  
2442  	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2443  	intel_dp->psr.debug = val;
2444  
2445  	/*
2446  	 * Do it right away if it's already enabled, otherwise it will be done
2447  	 * when enabling the source.
2448  	 */
2449  	if (intel_dp->psr.enabled)
2450  		psr_irq_control(intel_dp);
2451  
2452  	mutex_unlock(&intel_dp->psr.lock);
2453  
2454  	if (old_mode != mode)
2455  		ret = intel_psr_fastset_force(dev_priv);
2456  
2457  	return ret;
2458  }
2459  
intel_psr_handle_irq(struct intel_dp * intel_dp)2460  static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2461  {
2462  	struct intel_psr *psr = &intel_dp->psr;
2463  
2464  	intel_psr_disable_locked(intel_dp);
2465  	psr->sink_not_reliable = true;
2466  	/* let's make sure that sink is awaken */
2467  	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2468  }
2469  
intel_psr_work(struct work_struct * work)2470  static void intel_psr_work(struct work_struct *work)
2471  {
2472  	struct intel_dp *intel_dp =
2473  		container_of(work, typeof(*intel_dp), psr.work);
2474  
2475  	mutex_lock(&intel_dp->psr.lock);
2476  
2477  	if (!intel_dp->psr.enabled)
2478  		goto unlock;
2479  
2480  	if (READ_ONCE(intel_dp->psr.irq_aux_error))
2481  		intel_psr_handle_irq(intel_dp);
2482  
2483  	/*
2484  	 * We have to make sure PSR is ready for re-enable
2485  	 * otherwise it keeps disabled until next full enable/disable cycle.
2486  	 * PSR might take some time to get fully disabled
2487  	 * and be ready for re-enable.
2488  	 */
2489  	if (!__psr_wait_for_idle_locked(intel_dp))
2490  		goto unlock;
2491  
2492  	/*
2493  	 * The delayed work can race with an invalidate hence we need to
2494  	 * recheck. Since psr_flush first clears this and then reschedules we
2495  	 * won't ever miss a flush when bailing out here.
2496  	 */
2497  	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2498  		goto unlock;
2499  
2500  	intel_psr_activate(intel_dp);
2501  unlock:
2502  	mutex_unlock(&intel_dp->psr.lock);
2503  }
2504  
_psr_invalidate_handle(struct intel_dp * intel_dp)2505  static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2506  {
2507  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2508  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2509  
2510  	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2511  		u32 val;
2512  
2513  		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2514  			/* Send one update otherwise lag is observed in screen */
2515  			intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2516  			return;
2517  		}
2518  
2519  		val = man_trk_ctl_enable_bit_get(dev_priv) |
2520  		      man_trk_ctl_partial_frame_bit_get(dev_priv) |
2521  		      man_trk_ctl_continuos_full_frame(dev_priv);
2522  		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2523  		intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2524  		intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2525  	} else {
2526  		intel_psr_exit(intel_dp);
2527  	}
2528  }
2529  
2530  /**
2531   * intel_psr_invalidate - Invalidate PSR
2532   * @dev_priv: i915 device
2533   * @frontbuffer_bits: frontbuffer plane tracking bits
2534   * @origin: which operation caused the invalidate
2535   *
2536   * Since the hardware frontbuffer tracking has gaps we need to integrate
2537   * with the software frontbuffer tracking. This function gets called every
2538   * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2539   * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2540   *
2541   * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2542   */
intel_psr_invalidate(struct drm_i915_private * dev_priv,unsigned frontbuffer_bits,enum fb_op_origin origin)2543  void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2544  			  unsigned frontbuffer_bits, enum fb_op_origin origin)
2545  {
2546  	struct intel_encoder *encoder;
2547  
2548  	if (origin == ORIGIN_FLIP)
2549  		return;
2550  
2551  	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2552  		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2553  		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2554  
2555  		mutex_lock(&intel_dp->psr.lock);
2556  		if (!intel_dp->psr.enabled) {
2557  			mutex_unlock(&intel_dp->psr.lock);
2558  			continue;
2559  		}
2560  
2561  		pipe_frontbuffer_bits &=
2562  			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2563  		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2564  
2565  		if (pipe_frontbuffer_bits)
2566  			_psr_invalidate_handle(intel_dp);
2567  
2568  		mutex_unlock(&intel_dp->psr.lock);
2569  	}
2570  }
2571  /*
2572   * When we will be completely rely on PSR2 S/W tracking in future,
2573   * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2574   * event also therefore tgl_dc3co_flush_locked() require to be changed
2575   * accordingly in future.
2576   */
2577  static void
tgl_dc3co_flush_locked(struct intel_dp * intel_dp,unsigned int frontbuffer_bits,enum fb_op_origin origin)2578  tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2579  		       enum fb_op_origin origin)
2580  {
2581  	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2582  
2583  	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2584  	    !intel_dp->psr.active)
2585  		return;
2586  
2587  	/*
2588  	 * At every frontbuffer flush flip event modified delay of delayed work,
2589  	 * when delayed work schedules that means display has been idle.
2590  	 */
2591  	if (!(frontbuffer_bits &
2592  	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2593  		return;
2594  
2595  	tgl_psr2_enable_dc3co(intel_dp);
2596  	mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2597  			 intel_dp->psr.dc3co_exit_delay);
2598  }
2599  
_psr_flush_handle(struct intel_dp * intel_dp)2600  static void _psr_flush_handle(struct intel_dp *intel_dp)
2601  {
2602  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2603  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2604  
2605  	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2606  		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2607  			/* can we turn CFF off? */
2608  			if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2609  				u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2610  					man_trk_ctl_partial_frame_bit_get(dev_priv) |
2611  					man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2612  					man_trk_ctl_continuos_full_frame(dev_priv);
2613  
2614  				/*
2615  				 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2616  				 * updates. Still keep cff bit enabled as we don't have proper
2617  				 * SU configuration in case update is sent for any reason after
2618  				 * sff bit gets cleared by the HW on next vblank.
2619  				 */
2620  				intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2621  					       val);
2622  				intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2623  				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2624  			}
2625  		} else {
2626  			/*
2627  			 * continuous full frame is disabled, only a single full
2628  			 * frame is required
2629  			 */
2630  			psr_force_hw_tracking_exit(intel_dp);
2631  		}
2632  	} else {
2633  		psr_force_hw_tracking_exit(intel_dp);
2634  
2635  		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2636  			queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2637  	}
2638  }
2639  
2640  /**
2641   * intel_psr_flush - Flush PSR
2642   * @dev_priv: i915 device
2643   * @frontbuffer_bits: frontbuffer plane tracking bits
2644   * @origin: which operation caused the flush
2645   *
2646   * Since the hardware frontbuffer tracking has gaps we need to integrate
2647   * with the software frontbuffer tracking. This function gets called every
2648   * time frontbuffer rendering has completed and flushed out to memory. PSR
2649   * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2650   *
2651   * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2652   */
intel_psr_flush(struct drm_i915_private * dev_priv,unsigned frontbuffer_bits,enum fb_op_origin origin)2653  void intel_psr_flush(struct drm_i915_private *dev_priv,
2654  		     unsigned frontbuffer_bits, enum fb_op_origin origin)
2655  {
2656  	struct intel_encoder *encoder;
2657  
2658  	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2659  		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2660  		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2661  
2662  		mutex_lock(&intel_dp->psr.lock);
2663  		if (!intel_dp->psr.enabled) {
2664  			mutex_unlock(&intel_dp->psr.lock);
2665  			continue;
2666  		}
2667  
2668  		pipe_frontbuffer_bits &=
2669  			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2670  		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2671  
2672  		/*
2673  		 * If the PSR is paused by an explicit intel_psr_paused() call,
2674  		 * we have to ensure that the PSR is not activated until
2675  		 * intel_psr_resume() is called.
2676  		 */
2677  		if (intel_dp->psr.paused)
2678  			goto unlock;
2679  
2680  		if (origin == ORIGIN_FLIP ||
2681  		    (origin == ORIGIN_CURSOR_UPDATE &&
2682  		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
2683  			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2684  			goto unlock;
2685  		}
2686  
2687  		if (pipe_frontbuffer_bits == 0)
2688  			goto unlock;
2689  
2690  		/* By definition flush = invalidate + flush */
2691  		_psr_flush_handle(intel_dp);
2692  unlock:
2693  		mutex_unlock(&intel_dp->psr.lock);
2694  	}
2695  }
2696  
2697  /**
2698   * intel_psr_init - Init basic PSR work and mutex.
2699   * @intel_dp: Intel DP
2700   *
2701   * This function is called after the initializing connector.
2702   * (the initializing of connector treats the handling of connector capabilities)
2703   * And it initializes basic PSR stuff for each DP Encoder.
2704   */
intel_psr_init(struct intel_dp * intel_dp)2705  void intel_psr_init(struct intel_dp *intel_dp)
2706  {
2707  	struct intel_connector *connector = intel_dp->attached_connector;
2708  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2709  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2710  
2711  	if (!HAS_PSR(dev_priv))
2712  		return;
2713  
2714  	/*
2715  	 * HSW spec explicitly says PSR is tied to port A.
2716  	 * BDW+ platforms have a instance of PSR registers per transcoder but
2717  	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2718  	 * than eDP one.
2719  	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2720  	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2721  	 * But GEN12 supports a instance of PSR registers per transcoder.
2722  	 */
2723  	if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2724  		drm_dbg_kms(&dev_priv->drm,
2725  			    "PSR condition failed: Port not supported\n");
2726  		return;
2727  	}
2728  
2729  	intel_dp->psr.source_support = true;
2730  
2731  	/* Set link_standby x link_off defaults */
2732  	if (DISPLAY_VER(dev_priv) < 12)
2733  		/* For new platforms up to TGL let's respect VBT back again */
2734  		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2735  
2736  	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2737  	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2738  	mutex_init(&intel_dp->psr.lock);
2739  }
2740  
psr_get_status_and_error_status(struct intel_dp * intel_dp,u8 * status,u8 * error_status)2741  static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2742  					   u8 *status, u8 *error_status)
2743  {
2744  	struct drm_dp_aux *aux = &intel_dp->aux;
2745  	int ret;
2746  
2747  	ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2748  	if (ret != 1)
2749  		return ret;
2750  
2751  	ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
2752  	if (ret != 1)
2753  		return ret;
2754  
2755  	*status = *status & DP_PSR_SINK_STATE_MASK;
2756  
2757  	return 0;
2758  }
2759  
psr_alpm_check(struct intel_dp * intel_dp)2760  static void psr_alpm_check(struct intel_dp *intel_dp)
2761  {
2762  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2763  	struct drm_dp_aux *aux = &intel_dp->aux;
2764  	struct intel_psr *psr = &intel_dp->psr;
2765  	u8 val;
2766  	int r;
2767  
2768  	if (!psr->psr2_enabled)
2769  		return;
2770  
2771  	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2772  	if (r != 1) {
2773  		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2774  		return;
2775  	}
2776  
2777  	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2778  		intel_psr_disable_locked(intel_dp);
2779  		psr->sink_not_reliable = true;
2780  		drm_dbg_kms(&dev_priv->drm,
2781  			    "ALPM lock timeout error, disabling PSR\n");
2782  
2783  		/* Clearing error */
2784  		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2785  	}
2786  }
2787  
psr_capability_changed_check(struct intel_dp * intel_dp)2788  static void psr_capability_changed_check(struct intel_dp *intel_dp)
2789  {
2790  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2791  	struct intel_psr *psr = &intel_dp->psr;
2792  	u8 val;
2793  	int r;
2794  
2795  	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2796  	if (r != 1) {
2797  		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2798  		return;
2799  	}
2800  
2801  	if (val & DP_PSR_CAPS_CHANGE) {
2802  		intel_psr_disable_locked(intel_dp);
2803  		psr->sink_not_reliable = true;
2804  		drm_dbg_kms(&dev_priv->drm,
2805  			    "Sink PSR capability changed, disabling PSR\n");
2806  
2807  		/* Clearing it */
2808  		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2809  	}
2810  }
2811  
intel_psr_short_pulse(struct intel_dp * intel_dp)2812  void intel_psr_short_pulse(struct intel_dp *intel_dp)
2813  {
2814  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2815  	struct intel_psr *psr = &intel_dp->psr;
2816  	u8 status, error_status;
2817  	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2818  			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2819  			  DP_PSR_LINK_CRC_ERROR;
2820  
2821  	if (!CAN_PSR(intel_dp))
2822  		return;
2823  
2824  	mutex_lock(&psr->lock);
2825  
2826  	if (!psr->enabled)
2827  		goto exit;
2828  
2829  	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2830  		drm_err(&dev_priv->drm,
2831  			"Error reading PSR status or error status\n");
2832  		goto exit;
2833  	}
2834  
2835  	if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2836  		intel_psr_disable_locked(intel_dp);
2837  		psr->sink_not_reliable = true;
2838  	}
2839  
2840  	if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2841  		drm_dbg_kms(&dev_priv->drm,
2842  			    "PSR sink internal error, disabling PSR\n");
2843  	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2844  		drm_dbg_kms(&dev_priv->drm,
2845  			    "PSR RFB storage error, disabling PSR\n");
2846  	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2847  		drm_dbg_kms(&dev_priv->drm,
2848  			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
2849  	if (error_status & DP_PSR_LINK_CRC_ERROR)
2850  		drm_dbg_kms(&dev_priv->drm,
2851  			    "PSR Link CRC error, disabling PSR\n");
2852  
2853  	if (error_status & ~errors)
2854  		drm_err(&dev_priv->drm,
2855  			"PSR_ERROR_STATUS unhandled errors %x\n",
2856  			error_status & ~errors);
2857  	/* clear status register */
2858  	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2859  
2860  	psr_alpm_check(intel_dp);
2861  	psr_capability_changed_check(intel_dp);
2862  
2863  exit:
2864  	mutex_unlock(&psr->lock);
2865  }
2866  
intel_psr_enabled(struct intel_dp * intel_dp)2867  bool intel_psr_enabled(struct intel_dp *intel_dp)
2868  {
2869  	bool ret;
2870  
2871  	if (!CAN_PSR(intel_dp))
2872  		return false;
2873  
2874  	mutex_lock(&intel_dp->psr.lock);
2875  	ret = intel_dp->psr.enabled;
2876  	mutex_unlock(&intel_dp->psr.lock);
2877  
2878  	return ret;
2879  }
2880  
2881  /**
2882   * intel_psr_lock - grab PSR lock
2883   * @crtc_state: the crtc state
2884   *
2885   * This is initially meant to be used by around CRTC update, when
2886   * vblank sensitive registers are updated and we need grab the lock
2887   * before it to avoid vblank evasion.
2888   */
intel_psr_lock(const struct intel_crtc_state * crtc_state)2889  void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2890  {
2891  	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2892  	struct intel_encoder *encoder;
2893  
2894  	if (!crtc_state->has_psr)
2895  		return;
2896  
2897  	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2898  					     crtc_state->uapi.encoder_mask) {
2899  		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2900  
2901  		mutex_lock(&intel_dp->psr.lock);
2902  		break;
2903  	}
2904  }
2905  
2906  /**
2907   * intel_psr_unlock - release PSR lock
2908   * @crtc_state: the crtc state
2909   *
2910   * Release the PSR lock that was held during pipe update.
2911   */
intel_psr_unlock(const struct intel_crtc_state * crtc_state)2912  void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2913  {
2914  	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2915  	struct intel_encoder *encoder;
2916  
2917  	if (!crtc_state->has_psr)
2918  		return;
2919  
2920  	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2921  					     crtc_state->uapi.encoder_mask) {
2922  		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2923  
2924  		mutex_unlock(&intel_dp->psr.lock);
2925  		break;
2926  	}
2927  }
2928  
2929  static void
psr_source_status(struct intel_dp * intel_dp,struct seq_file * m)2930  psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
2931  {
2932  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2933  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2934  	const char *status = "unknown";
2935  	u32 val, status_val;
2936  
2937  	if (intel_dp->psr.psr2_enabled) {
2938  		static const char * const live_status[] = {
2939  			"IDLE",
2940  			"CAPTURE",
2941  			"CAPTURE_FS",
2942  			"SLEEP",
2943  			"BUFON_FW",
2944  			"ML_UP",
2945  			"SU_STANDBY",
2946  			"FAST_SLEEP",
2947  			"DEEP_SLEEP",
2948  			"BUF_ON",
2949  			"TG_ON"
2950  		};
2951  		val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
2952  		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
2953  		if (status_val < ARRAY_SIZE(live_status))
2954  			status = live_status[status_val];
2955  	} else {
2956  		static const char * const live_status[] = {
2957  			"IDLE",
2958  			"SRDONACK",
2959  			"SRDENT",
2960  			"BUFOFF",
2961  			"BUFON",
2962  			"AUXACK",
2963  			"SRDOFFACK",
2964  			"SRDENT_ON",
2965  		};
2966  		val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
2967  		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
2968  		if (status_val < ARRAY_SIZE(live_status))
2969  			status = live_status[status_val];
2970  	}
2971  
2972  	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2973  }
2974  
intel_psr_status(struct seq_file * m,struct intel_dp * intel_dp)2975  static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
2976  {
2977  	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2978  	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2979  	struct intel_psr *psr = &intel_dp->psr;
2980  	intel_wakeref_t wakeref;
2981  	const char *status;
2982  	bool enabled;
2983  	u32 val;
2984  
2985  	seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
2986  	if (psr->sink_support)
2987  		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
2988  	seq_puts(m, "\n");
2989  
2990  	if (!psr->sink_support)
2991  		return 0;
2992  
2993  	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2994  	mutex_lock(&psr->lock);
2995  
2996  	if (psr->enabled)
2997  		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2998  	else
2999  		status = "disabled";
3000  	seq_printf(m, "PSR mode: %s\n", status);
3001  
3002  	if (!psr->enabled) {
3003  		seq_printf(m, "PSR sink not reliable: %s\n",
3004  			   str_yes_no(psr->sink_not_reliable));
3005  
3006  		goto unlock;
3007  	}
3008  
3009  	if (psr->psr2_enabled) {
3010  		val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
3011  		enabled = val & EDP_PSR2_ENABLE;
3012  	} else {
3013  		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3014  		enabled = val & EDP_PSR_ENABLE;
3015  	}
3016  	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
3017  		   str_enabled_disabled(enabled), val);
3018  	psr_source_status(intel_dp, m);
3019  	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3020  		   psr->busy_frontbuffer_bits);
3021  
3022  	/*
3023  	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3024  	 */
3025  	val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3026  	seq_printf(m, "Performance counter: %u\n",
3027  		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3028  
3029  	if (psr->debug & I915_PSR_DEBUG_IRQ) {
3030  		seq_printf(m, "Last attempted entry at: %lld\n",
3031  			   psr->last_entry_attempt);
3032  		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3033  	}
3034  
3035  	if (psr->psr2_enabled) {
3036  		u32 su_frames_val[3];
3037  		int frame;
3038  
3039  		/*
3040  		 * Reading all 3 registers before hand to minimize crossing a
3041  		 * frame boundary between register reads
3042  		 */
3043  		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3044  			val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3045  			su_frames_val[frame / 3] = val;
3046  		}
3047  
3048  		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3049  
3050  		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3051  			u32 su_blocks;
3052  
3053  			su_blocks = su_frames_val[frame / 3] &
3054  				    PSR2_SU_STATUS_MASK(frame);
3055  			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3056  			seq_printf(m, "%d\t%d\n", frame, su_blocks);
3057  		}
3058  
3059  		seq_printf(m, "PSR2 selective fetch: %s\n",
3060  			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3061  	}
3062  
3063  unlock:
3064  	mutex_unlock(&psr->lock);
3065  	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3066  
3067  	return 0;
3068  }
3069  
i915_edp_psr_status_show(struct seq_file * m,void * data)3070  static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3071  {
3072  	struct drm_i915_private *dev_priv = m->private;
3073  	struct intel_dp *intel_dp = NULL;
3074  	struct intel_encoder *encoder;
3075  
3076  	if (!HAS_PSR(dev_priv))
3077  		return -ENODEV;
3078  
3079  	/* Find the first EDP which supports PSR */
3080  	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3081  		intel_dp = enc_to_intel_dp(encoder);
3082  		break;
3083  	}
3084  
3085  	if (!intel_dp)
3086  		return -ENODEV;
3087  
3088  	return intel_psr_status(m, intel_dp);
3089  }
3090  DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3091  
3092  static int
i915_edp_psr_debug_set(void * data,u64 val)3093  i915_edp_psr_debug_set(void *data, u64 val)
3094  {
3095  	struct drm_i915_private *dev_priv = data;
3096  	struct intel_encoder *encoder;
3097  	intel_wakeref_t wakeref;
3098  	int ret = -ENODEV;
3099  
3100  	if (!HAS_PSR(dev_priv))
3101  		return ret;
3102  
3103  	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3104  		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3105  
3106  		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3107  
3108  		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3109  
3110  		// TODO: split to each transcoder's PSR debug state
3111  		ret = intel_psr_debug_set(intel_dp, val);
3112  
3113  		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3114  	}
3115  
3116  	return ret;
3117  }
3118  
3119  static int
i915_edp_psr_debug_get(void * data,u64 * val)3120  i915_edp_psr_debug_get(void *data, u64 *val)
3121  {
3122  	struct drm_i915_private *dev_priv = data;
3123  	struct intel_encoder *encoder;
3124  
3125  	if (!HAS_PSR(dev_priv))
3126  		return -ENODEV;
3127  
3128  	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3129  		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3130  
3131  		// TODO: split to each transcoder's PSR debug state
3132  		*val = READ_ONCE(intel_dp->psr.debug);
3133  		return 0;
3134  	}
3135  
3136  	return -ENODEV;
3137  }
3138  
3139  DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3140  			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3141  			"%llu\n");
3142  
intel_psr_debugfs_register(struct drm_i915_private * i915)3143  void intel_psr_debugfs_register(struct drm_i915_private *i915)
3144  {
3145  	struct drm_minor *minor = i915->drm.primary;
3146  
3147  	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3148  			    i915, &i915_edp_psr_debug_fops);
3149  
3150  	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3151  			    i915, &i915_edp_psr_status_fops);
3152  }
3153  
i915_psr_sink_status_show(struct seq_file * m,void * data)3154  static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3155  {
3156  	struct intel_connector *connector = m->private;
3157  	struct intel_dp *intel_dp = intel_attached_dp(connector);
3158  	static const char * const sink_status[] = {
3159  		"inactive",
3160  		"transition to active, capture and display",
3161  		"active, display from RFB",
3162  		"active, capture and display on sink device timings",
3163  		"transition to inactive, capture and display, timing re-sync",
3164  		"reserved",
3165  		"reserved",
3166  		"sink internal error",
3167  	};
3168  	const char *str;
3169  	int ret;
3170  	u8 val;
3171  
3172  	if (!CAN_PSR(intel_dp)) {
3173  		seq_puts(m, "PSR Unsupported\n");
3174  		return -ENODEV;
3175  	}
3176  
3177  	if (connector->base.status != connector_status_connected)
3178  		return -ENODEV;
3179  
3180  	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
3181  	if (ret != 1)
3182  		return ret < 0 ? ret : -EIO;
3183  
3184  	val &= DP_PSR_SINK_STATE_MASK;
3185  	if (val < ARRAY_SIZE(sink_status))
3186  		str = sink_status[val];
3187  	else
3188  		str = "unknown";
3189  
3190  	seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
3191  
3192  	return 0;
3193  }
3194  DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3195  
i915_psr_status_show(struct seq_file * m,void * data)3196  static int i915_psr_status_show(struct seq_file *m, void *data)
3197  {
3198  	struct intel_connector *connector = m->private;
3199  	struct intel_dp *intel_dp = intel_attached_dp(connector);
3200  
3201  	return intel_psr_status(m, intel_dp);
3202  }
3203  DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3204  
intel_psr_connector_debugfs_add(struct intel_connector * connector)3205  void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3206  {
3207  	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3208  	struct dentry *root = connector->base.debugfs_entry;
3209  
3210  	if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
3211  		return;
3212  
3213  	debugfs_create_file("i915_psr_sink_status", 0444, root,
3214  			    connector, &i915_psr_sink_status_fops);
3215  
3216  	if (HAS_PSR(i915))
3217  		debugfs_create_file("i915_psr_status", 0444, root,
3218  				    connector, &i915_psr_status_fops);
3219  }
3220