1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #ifndef __INTEL_DISPLAY_POWER_H__
7 #define __INTEL_DISPLAY_POWER_H__
8 
9 #include "intel_display.h"
10 #include "intel_runtime_pm.h"
11 #include "i915_reg.h"
12 
13 struct drm_i915_private;
14 struct intel_encoder;
15 
16 enum intel_display_power_domain {
17 	POWER_DOMAIN_DISPLAY_CORE,
18 	POWER_DOMAIN_PIPE_A,
19 	POWER_DOMAIN_PIPE_B,
20 	POWER_DOMAIN_PIPE_C,
21 	POWER_DOMAIN_PIPE_D,
22 	POWER_DOMAIN_PIPE_A_PANEL_FITTER,
23 	POWER_DOMAIN_PIPE_B_PANEL_FITTER,
24 	POWER_DOMAIN_PIPE_C_PANEL_FITTER,
25 	POWER_DOMAIN_PIPE_D_PANEL_FITTER,
26 	POWER_DOMAIN_TRANSCODER_A,
27 	POWER_DOMAIN_TRANSCODER_B,
28 	POWER_DOMAIN_TRANSCODER_C,
29 	POWER_DOMAIN_TRANSCODER_D,
30 	POWER_DOMAIN_TRANSCODER_EDP,
31 	/* VDSC/joining for eDP/DSI transcoder (ICL) or pipe A (TGL) */
32 	POWER_DOMAIN_TRANSCODER_VDSC_PW2,
33 	POWER_DOMAIN_TRANSCODER_DSI_A,
34 	POWER_DOMAIN_TRANSCODER_DSI_C,
35 	POWER_DOMAIN_PORT_DDI_A_LANES,
36 	POWER_DOMAIN_PORT_DDI_B_LANES,
37 	POWER_DOMAIN_PORT_DDI_C_LANES,
38 	POWER_DOMAIN_PORT_DDI_D_LANES,
39 	POWER_DOMAIN_PORT_DDI_E_LANES,
40 	POWER_DOMAIN_PORT_DDI_F_LANES,
41 	POWER_DOMAIN_PORT_DDI_G_LANES,
42 	POWER_DOMAIN_PORT_DDI_H_LANES,
43 	POWER_DOMAIN_PORT_DDI_I_LANES,
44 
45 	POWER_DOMAIN_PORT_DDI_LANES_TC1 = POWER_DOMAIN_PORT_DDI_D_LANES, /* tgl+ */
46 	POWER_DOMAIN_PORT_DDI_LANES_TC2,
47 	POWER_DOMAIN_PORT_DDI_LANES_TC3,
48 	POWER_DOMAIN_PORT_DDI_LANES_TC4,
49 	POWER_DOMAIN_PORT_DDI_LANES_TC5,
50 	POWER_DOMAIN_PORT_DDI_LANES_TC6,
51 
52 	POWER_DOMAIN_PORT_DDI_LANES_D_XELPD = POWER_DOMAIN_PORT_DDI_LANES_TC5, /* XELPD */
53 	POWER_DOMAIN_PORT_DDI_LANES_E_XELPD,
54 
55 	POWER_DOMAIN_PORT_DDI_A_IO,
56 	POWER_DOMAIN_PORT_DDI_B_IO,
57 	POWER_DOMAIN_PORT_DDI_C_IO,
58 	POWER_DOMAIN_PORT_DDI_D_IO,
59 	POWER_DOMAIN_PORT_DDI_E_IO,
60 	POWER_DOMAIN_PORT_DDI_F_IO,
61 	POWER_DOMAIN_PORT_DDI_G_IO,
62 	POWER_DOMAIN_PORT_DDI_H_IO,
63 	POWER_DOMAIN_PORT_DDI_I_IO,
64 
65 	POWER_DOMAIN_PORT_DDI_IO_TC1 = POWER_DOMAIN_PORT_DDI_D_IO, /* tgl+ */
66 	POWER_DOMAIN_PORT_DDI_IO_TC2,
67 	POWER_DOMAIN_PORT_DDI_IO_TC3,
68 	POWER_DOMAIN_PORT_DDI_IO_TC4,
69 	POWER_DOMAIN_PORT_DDI_IO_TC5,
70 	POWER_DOMAIN_PORT_DDI_IO_TC6,
71 
72 	POWER_DOMAIN_PORT_DDI_IO_D_XELPD = POWER_DOMAIN_PORT_DDI_IO_TC5, /* XELPD */
73 	POWER_DOMAIN_PORT_DDI_IO_E_XELPD,
74 
75 	POWER_DOMAIN_PORT_DSI,
76 	POWER_DOMAIN_PORT_CRT,
77 	POWER_DOMAIN_PORT_OTHER,
78 	POWER_DOMAIN_VGA,
79 	POWER_DOMAIN_AUDIO_MMIO,
80 	POWER_DOMAIN_AUDIO_PLAYBACK,
81 	POWER_DOMAIN_AUX_A,
82 	POWER_DOMAIN_AUX_B,
83 	POWER_DOMAIN_AUX_C,
84 	POWER_DOMAIN_AUX_D,
85 	POWER_DOMAIN_AUX_E,
86 	POWER_DOMAIN_AUX_F,
87 	POWER_DOMAIN_AUX_G,
88 	POWER_DOMAIN_AUX_H,
89 	POWER_DOMAIN_AUX_I,
90 
91 	POWER_DOMAIN_AUX_USBC1 = POWER_DOMAIN_AUX_D, /* tgl+ */
92 	POWER_DOMAIN_AUX_USBC2,
93 	POWER_DOMAIN_AUX_USBC3,
94 	POWER_DOMAIN_AUX_USBC4,
95 	POWER_DOMAIN_AUX_USBC5,
96 	POWER_DOMAIN_AUX_USBC6,
97 
98 	POWER_DOMAIN_AUX_D_XELPD = POWER_DOMAIN_AUX_USBC5, /* XELPD */
99 	POWER_DOMAIN_AUX_E_XELPD,
100 
101 	POWER_DOMAIN_AUX_IO_A,
102 	POWER_DOMAIN_AUX_C_TBT,
103 	POWER_DOMAIN_AUX_D_TBT,
104 	POWER_DOMAIN_AUX_E_TBT,
105 	POWER_DOMAIN_AUX_F_TBT,
106 	POWER_DOMAIN_AUX_G_TBT,
107 	POWER_DOMAIN_AUX_H_TBT,
108 	POWER_DOMAIN_AUX_I_TBT,
109 
110 	POWER_DOMAIN_AUX_TBT1 = POWER_DOMAIN_AUX_D_TBT, /* tgl+ */
111 	POWER_DOMAIN_AUX_TBT2,
112 	POWER_DOMAIN_AUX_TBT3,
113 	POWER_DOMAIN_AUX_TBT4,
114 	POWER_DOMAIN_AUX_TBT5,
115 	POWER_DOMAIN_AUX_TBT6,
116 
117 	POWER_DOMAIN_GMBUS,
118 	POWER_DOMAIN_MODESET,
119 	POWER_DOMAIN_GT_IRQ,
120 	POWER_DOMAIN_DPLL_DC_OFF,
121 	POWER_DOMAIN_TC_COLD_OFF,
122 	POWER_DOMAIN_INIT,
123 
124 	POWER_DOMAIN_NUM,
125 };
126 
127 /*
128  * i915_power_well_id:
129  *
130  * IDs used to look up power wells. Power wells accessed directly bypassing
131  * the power domains framework must be assigned a unique ID. The rest of power
132  * wells must be assigned DISP_PW_ID_NONE.
133  */
134 enum i915_power_well_id {
135 	DISP_PW_ID_NONE,
136 
137 	VLV_DISP_PW_DISP2D,
138 	BXT_DISP_PW_DPIO_CMN_A,
139 	VLV_DISP_PW_DPIO_CMN_BC,
140 	GLK_DISP_PW_DPIO_CMN_C,
141 	CHV_DISP_PW_DPIO_CMN_D,
142 	HSW_DISP_PW_GLOBAL,
143 	SKL_DISP_PW_MISC_IO,
144 	SKL_DISP_PW_1,
145 	SKL_DISP_PW_2,
146 	ICL_DISP_PW_3,
147 	SKL_DISP_DC_OFF,
148 	TGL_DISP_PW_TC_COLD_OFF,
149 };
150 
151 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
152 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
153 		((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
154 #define POWER_DOMAIN_TRANSCODER(tran) \
155 	((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
156 	 (tran) + POWER_DOMAIN_TRANSCODER_A)
157 
158 struct i915_power_well;
159 
160 struct i915_power_well_ops {
161 	/*
162 	 * Synchronize the well's hw state to match the current sw state, for
163 	 * example enable/disable it based on the current refcount. Called
164 	 * during driver init and resume time, possibly after first calling
165 	 * the enable/disable handlers.
166 	 */
167 	void (*sync_hw)(struct drm_i915_private *dev_priv,
168 			struct i915_power_well *power_well);
169 	/*
170 	 * Enable the well and resources that depend on it (for example
171 	 * interrupts located on the well). Called after the 0->1 refcount
172 	 * transition.
173 	 */
174 	void (*enable)(struct drm_i915_private *dev_priv,
175 		       struct i915_power_well *power_well);
176 	/*
177 	 * Disable the well and resources that depend on it. Called after
178 	 * the 1->0 refcount transition.
179 	 */
180 	void (*disable)(struct drm_i915_private *dev_priv,
181 			struct i915_power_well *power_well);
182 	/* Returns the hw enabled state. */
183 	bool (*is_enabled)(struct drm_i915_private *dev_priv,
184 			   struct i915_power_well *power_well);
185 };
186 
187 struct i915_power_well_regs {
188 	i915_reg_t bios;
189 	i915_reg_t driver;
190 	i915_reg_t kvmr;
191 	i915_reg_t debug;
192 };
193 
194 /* Power well structure for haswell */
195 struct i915_power_well_desc {
196 	const char *name;
197 	bool always_on;
198 	u64 domains;
199 	/* unique identifier for this power well */
200 	enum i915_power_well_id id;
201 	/*
202 	 * Arbitraty data associated with this power well. Platform and power
203 	 * well specific.
204 	 */
205 	union {
206 		struct {
207 			/*
208 			 * request/status flag index in the PUNIT power well
209 			 * control/status registers.
210 			 */
211 			u8 idx;
212 		} vlv;
213 		struct {
214 			enum dpio_phy phy;
215 		} bxt;
216 		struct {
217 			const struct i915_power_well_regs *regs;
218 			/*
219 			 * request/status flag index in the power well
220 			 * constrol/status registers.
221 			 */
222 			u8 idx;
223 			/* Mask of pipes whose IRQ logic is backed by the pw */
224 			u8 irq_pipe_mask;
225 			/*
226 			 * Instead of waiting for the status bit to ack enables,
227 			 * just wait a specific amount of time and then consider
228 			 * the well enabled.
229 			 */
230 			u16 fixed_enable_delay;
231 			/* The pw is backing the VGA functionality */
232 			bool has_vga:1;
233 			bool has_fuses:1;
234 			/*
235 			 * The pw is for an ICL+ TypeC PHY port in
236 			 * Thunderbolt mode.
237 			 */
238 			bool is_tc_tbt:1;
239 		} hsw;
240 	};
241 	const struct i915_power_well_ops *ops;
242 };
243 
244 struct i915_power_well {
245 	const struct i915_power_well_desc *desc;
246 	/* power well enable/disable usage count */
247 	int count;
248 	/* cached hw enabled state */
249 	bool hw_enabled;
250 };
251 
252 struct i915_power_domains {
253 	/*
254 	 * Power wells needed for initialization at driver init and suspend
255 	 * time are on. They are kept on until after the first modeset.
256 	 */
257 	bool initializing;
258 	bool display_core_suspended;
259 	int power_well_count;
260 
261 	intel_wakeref_t init_wakeref;
262 	intel_wakeref_t disable_wakeref;
263 
264 	struct mutex lock;
265 	int domain_use_count[POWER_DOMAIN_NUM];
266 
267 	struct delayed_work async_put_work;
268 	intel_wakeref_t async_put_wakeref;
269 	u64 async_put_domains[2];
270 
271 	struct i915_power_well *power_wells;
272 };
273 
274 struct intel_display_power_domain_set {
275 	u64 mask;
276 #ifdef CONFIG_DRM_I915_DEBUG_RUNTIME_PM
277 	intel_wakeref_t wakerefs[POWER_DOMAIN_NUM];
278 #endif
279 };
280 
281 #define for_each_power_domain(domain, mask)				\
282 	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
283 		for_each_if(BIT_ULL(domain) & (mask))
284 
285 #define for_each_power_well(__dev_priv, __power_well)				\
286 	for ((__power_well) = (__dev_priv)->power_domains.power_wells;	\
287 	     (__power_well) - (__dev_priv)->power_domains.power_wells <	\
288 		(__dev_priv)->power_domains.power_well_count;		\
289 	     (__power_well)++)
290 
291 #define for_each_power_well_reverse(__dev_priv, __power_well)			\
292 	for ((__power_well) = (__dev_priv)->power_domains.power_wells +		\
293 			      (__dev_priv)->power_domains.power_well_count - 1;	\
294 	     (__power_well) - (__dev_priv)->power_domains.power_wells >= 0;	\
295 	     (__power_well)--)
296 
297 #define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask)	\
298 	for_each_power_well(__dev_priv, __power_well)				\
299 		for_each_if((__power_well)->desc->domains & (__domain_mask))
300 
301 #define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
302 	for_each_power_well_reverse(__dev_priv, __power_well)		        \
303 		for_each_if((__power_well)->desc->domains & (__domain_mask))
304 
305 int intel_power_domains_init(struct drm_i915_private *dev_priv);
306 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
307 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
308 void intel_power_domains_driver_remove(struct drm_i915_private *dev_priv);
309 void intel_power_domains_enable(struct drm_i915_private *dev_priv);
310 void intel_power_domains_disable(struct drm_i915_private *dev_priv);
311 void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
312 				 enum i915_drm_suspend_mode);
313 void intel_power_domains_resume(struct drm_i915_private *dev_priv);
314 
315 void intel_display_power_suspend_late(struct drm_i915_private *i915);
316 void intel_display_power_resume_early(struct drm_i915_private *i915);
317 void intel_display_power_suspend(struct drm_i915_private *i915);
318 void intel_display_power_resume(struct drm_i915_private *i915);
319 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
320 					     u32 state);
321 
322 const char *
323 intel_display_power_domain_str(enum intel_display_power_domain domain);
324 
325 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
326 				    enum intel_display_power_domain domain);
327 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
328 					 enum i915_power_well_id power_well_id);
329 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
330 				      enum intel_display_power_domain domain);
331 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
332 					enum intel_display_power_domain domain);
333 intel_wakeref_t
334 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
335 				   enum intel_display_power_domain domain);
336 void __intel_display_power_put_async(struct drm_i915_private *i915,
337 				     enum intel_display_power_domain domain,
338 				     intel_wakeref_t wakeref);
339 void intel_display_power_flush_work(struct drm_i915_private *i915);
340 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
341 void intel_display_power_put(struct drm_i915_private *dev_priv,
342 			     enum intel_display_power_domain domain,
343 			     intel_wakeref_t wakeref);
344 static inline void
345 intel_display_power_put_async(struct drm_i915_private *i915,
346 			      enum intel_display_power_domain domain,
347 			      intel_wakeref_t wakeref)
348 {
349 	__intel_display_power_put_async(i915, domain, wakeref);
350 }
351 #else
352 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
353 				       enum intel_display_power_domain domain);
354 
355 static inline void
356 intel_display_power_put(struct drm_i915_private *i915,
357 			enum intel_display_power_domain domain,
358 			intel_wakeref_t wakeref)
359 {
360 	intel_display_power_put_unchecked(i915, domain);
361 }
362 
363 static inline void
364 intel_display_power_put_async(struct drm_i915_private *i915,
365 			      enum intel_display_power_domain domain,
366 			      intel_wakeref_t wakeref)
367 {
368 	__intel_display_power_put_async(i915, domain, -1);
369 }
370 #endif
371 
372 void
373 intel_display_power_get_in_set(struct drm_i915_private *i915,
374 			       struct intel_display_power_domain_set *power_domain_set,
375 			       enum intel_display_power_domain domain);
376 
377 bool
378 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
379 					  struct intel_display_power_domain_set *power_domain_set,
380 					  enum intel_display_power_domain domain);
381 
382 void
383 intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
384 				    struct intel_display_power_domain_set *power_domain_set,
385 				    u64 mask);
386 
387 static inline void
388 intel_display_power_put_all_in_set(struct drm_i915_private *i915,
389 				   struct intel_display_power_domain_set *power_domain_set)
390 {
391 	intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask);
392 }
393 
394 /*
395  * FIXME: We should probably switch this to a 0-based scheme to be consistent
396  * with how we now name/number DBUF_CTL instances.
397  */
398 enum dbuf_slice {
399 	DBUF_S1,
400 	DBUF_S2,
401 	DBUF_S3,
402 	DBUF_S4,
403 	I915_MAX_DBUF_SLICES
404 };
405 
406 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
407 			     u8 req_slices);
408 
409 #define with_intel_display_power(i915, domain, wf) \
410 	for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
411 	     intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
412 
413 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
414 			     bool override, unsigned int mask);
415 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
416 			  enum dpio_channel ch, bool override);
417 
418 #endif /* __INTEL_DISPLAY_POWER_H__ */
419