1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "display/intel_crt.h"
7 #include "display/intel_dp.h"
8 
9 #include "i915_drv.h"
10 #include "i915_irq.h"
11 #include "intel_cdclk.h"
12 #include "intel_combo_phy.h"
13 #include "intel_csr.h"
14 #include "intel_display_power.h"
15 #include "intel_display_types.h"
16 #include "intel_dpio_phy.h"
17 #include "intel_hotplug.h"
18 #include "intel_pm.h"
19 #include "intel_sideband.h"
20 #include "intel_tc.h"
21 #include "intel_vga.h"
22 
23 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
24 					 enum i915_power_well_id power_well_id);
25 
26 const char *
27 intel_display_power_domain_str(enum intel_display_power_domain domain)
28 {
29 	switch (domain) {
30 	case POWER_DOMAIN_DISPLAY_CORE:
31 		return "DISPLAY_CORE";
32 	case POWER_DOMAIN_PIPE_A:
33 		return "PIPE_A";
34 	case POWER_DOMAIN_PIPE_B:
35 		return "PIPE_B";
36 	case POWER_DOMAIN_PIPE_C:
37 		return "PIPE_C";
38 	case POWER_DOMAIN_PIPE_D:
39 		return "PIPE_D";
40 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
41 		return "PIPE_A_PANEL_FITTER";
42 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
43 		return "PIPE_B_PANEL_FITTER";
44 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
45 		return "PIPE_C_PANEL_FITTER";
46 	case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
47 		return "PIPE_D_PANEL_FITTER";
48 	case POWER_DOMAIN_TRANSCODER_A:
49 		return "TRANSCODER_A";
50 	case POWER_DOMAIN_TRANSCODER_B:
51 		return "TRANSCODER_B";
52 	case POWER_DOMAIN_TRANSCODER_C:
53 		return "TRANSCODER_C";
54 	case POWER_DOMAIN_TRANSCODER_D:
55 		return "TRANSCODER_D";
56 	case POWER_DOMAIN_TRANSCODER_EDP:
57 		return "TRANSCODER_EDP";
58 	case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
59 		return "TRANSCODER_VDSC_PW2";
60 	case POWER_DOMAIN_TRANSCODER_DSI_A:
61 		return "TRANSCODER_DSI_A";
62 	case POWER_DOMAIN_TRANSCODER_DSI_C:
63 		return "TRANSCODER_DSI_C";
64 	case POWER_DOMAIN_PORT_DDI_A_LANES:
65 		return "PORT_DDI_A_LANES";
66 	case POWER_DOMAIN_PORT_DDI_B_LANES:
67 		return "PORT_DDI_B_LANES";
68 	case POWER_DOMAIN_PORT_DDI_C_LANES:
69 		return "PORT_DDI_C_LANES";
70 	case POWER_DOMAIN_PORT_DDI_D_LANES:
71 		return "PORT_DDI_D_LANES";
72 	case POWER_DOMAIN_PORT_DDI_E_LANES:
73 		return "PORT_DDI_E_LANES";
74 	case POWER_DOMAIN_PORT_DDI_F_LANES:
75 		return "PORT_DDI_F_LANES";
76 	case POWER_DOMAIN_PORT_DDI_G_LANES:
77 		return "PORT_DDI_G_LANES";
78 	case POWER_DOMAIN_PORT_DDI_H_LANES:
79 		return "PORT_DDI_H_LANES";
80 	case POWER_DOMAIN_PORT_DDI_I_LANES:
81 		return "PORT_DDI_I_LANES";
82 	case POWER_DOMAIN_PORT_DDI_A_IO:
83 		return "PORT_DDI_A_IO";
84 	case POWER_DOMAIN_PORT_DDI_B_IO:
85 		return "PORT_DDI_B_IO";
86 	case POWER_DOMAIN_PORT_DDI_C_IO:
87 		return "PORT_DDI_C_IO";
88 	case POWER_DOMAIN_PORT_DDI_D_IO:
89 		return "PORT_DDI_D_IO";
90 	case POWER_DOMAIN_PORT_DDI_E_IO:
91 		return "PORT_DDI_E_IO";
92 	case POWER_DOMAIN_PORT_DDI_F_IO:
93 		return "PORT_DDI_F_IO";
94 	case POWER_DOMAIN_PORT_DDI_G_IO:
95 		return "PORT_DDI_G_IO";
96 	case POWER_DOMAIN_PORT_DDI_H_IO:
97 		return "PORT_DDI_H_IO";
98 	case POWER_DOMAIN_PORT_DDI_I_IO:
99 		return "PORT_DDI_I_IO";
100 	case POWER_DOMAIN_PORT_DSI:
101 		return "PORT_DSI";
102 	case POWER_DOMAIN_PORT_CRT:
103 		return "PORT_CRT";
104 	case POWER_DOMAIN_PORT_OTHER:
105 		return "PORT_OTHER";
106 	case POWER_DOMAIN_VGA:
107 		return "VGA";
108 	case POWER_DOMAIN_AUDIO:
109 		return "AUDIO";
110 	case POWER_DOMAIN_AUX_A:
111 		return "AUX_A";
112 	case POWER_DOMAIN_AUX_B:
113 		return "AUX_B";
114 	case POWER_DOMAIN_AUX_C:
115 		return "AUX_C";
116 	case POWER_DOMAIN_AUX_D:
117 		return "AUX_D";
118 	case POWER_DOMAIN_AUX_E:
119 		return "AUX_E";
120 	case POWER_DOMAIN_AUX_F:
121 		return "AUX_F";
122 	case POWER_DOMAIN_AUX_G:
123 		return "AUX_G";
124 	case POWER_DOMAIN_AUX_H:
125 		return "AUX_H";
126 	case POWER_DOMAIN_AUX_I:
127 		return "AUX_I";
128 	case POWER_DOMAIN_AUX_IO_A:
129 		return "AUX_IO_A";
130 	case POWER_DOMAIN_AUX_C_TBT:
131 		return "AUX_C_TBT";
132 	case POWER_DOMAIN_AUX_D_TBT:
133 		return "AUX_D_TBT";
134 	case POWER_DOMAIN_AUX_E_TBT:
135 		return "AUX_E_TBT";
136 	case POWER_DOMAIN_AUX_F_TBT:
137 		return "AUX_F_TBT";
138 	case POWER_DOMAIN_AUX_G_TBT:
139 		return "AUX_G_TBT";
140 	case POWER_DOMAIN_AUX_H_TBT:
141 		return "AUX_H_TBT";
142 	case POWER_DOMAIN_AUX_I_TBT:
143 		return "AUX_I_TBT";
144 	case POWER_DOMAIN_GMBUS:
145 		return "GMBUS";
146 	case POWER_DOMAIN_INIT:
147 		return "INIT";
148 	case POWER_DOMAIN_MODESET:
149 		return "MODESET";
150 	case POWER_DOMAIN_GT_IRQ:
151 		return "GT_IRQ";
152 	case POWER_DOMAIN_DPLL_DC_OFF:
153 		return "DPLL_DC_OFF";
154 	default:
155 		MISSING_CASE(domain);
156 		return "?";
157 	}
158 }
159 
160 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
161 				    struct i915_power_well *power_well)
162 {
163 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
164 	power_well->desc->ops->enable(dev_priv, power_well);
165 	power_well->hw_enabled = true;
166 }
167 
168 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
169 				     struct i915_power_well *power_well)
170 {
171 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
172 	power_well->hw_enabled = false;
173 	power_well->desc->ops->disable(dev_priv, power_well);
174 }
175 
176 static void intel_power_well_get(struct drm_i915_private *dev_priv,
177 				 struct i915_power_well *power_well)
178 {
179 	if (!power_well->count++)
180 		intel_power_well_enable(dev_priv, power_well);
181 }
182 
183 static void intel_power_well_put(struct drm_i915_private *dev_priv,
184 				 struct i915_power_well *power_well)
185 {
186 	drm_WARN(&dev_priv->drm, !power_well->count,
187 		 "Use count on power well %s is already zero",
188 		 power_well->desc->name);
189 
190 	if (!--power_well->count)
191 		intel_power_well_disable(dev_priv, power_well);
192 }
193 
194 /**
195  * __intel_display_power_is_enabled - unlocked check for a power domain
196  * @dev_priv: i915 device instance
197  * @domain: power domain to check
198  *
199  * This is the unlocked version of intel_display_power_is_enabled() and should
200  * only be used from error capture and recovery code where deadlocks are
201  * possible.
202  *
203  * Returns:
204  * True when the power domain is enabled, false otherwise.
205  */
206 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
207 				      enum intel_display_power_domain domain)
208 {
209 	struct i915_power_well *power_well;
210 	bool is_enabled;
211 
212 	if (dev_priv->runtime_pm.suspended)
213 		return false;
214 
215 	is_enabled = true;
216 
217 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
218 		if (power_well->desc->always_on)
219 			continue;
220 
221 		if (!power_well->hw_enabled) {
222 			is_enabled = false;
223 			break;
224 		}
225 	}
226 
227 	return is_enabled;
228 }
229 
230 /**
231  * intel_display_power_is_enabled - check for a power domain
232  * @dev_priv: i915 device instance
233  * @domain: power domain to check
234  *
235  * This function can be used to check the hw power domain state. It is mostly
236  * used in hardware state readout functions. Everywhere else code should rely
237  * upon explicit power domain reference counting to ensure that the hardware
238  * block is powered up before accessing it.
239  *
240  * Callers must hold the relevant modesetting locks to ensure that concurrent
241  * threads can't disable the power well while the caller tries to read a few
242  * registers.
243  *
244  * Returns:
245  * True when the power domain is enabled, false otherwise.
246  */
247 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
248 				    enum intel_display_power_domain domain)
249 {
250 	struct i915_power_domains *power_domains;
251 	bool ret;
252 
253 	power_domains = &dev_priv->power_domains;
254 
255 	mutex_lock(&power_domains->lock);
256 	ret = __intel_display_power_is_enabled(dev_priv, domain);
257 	mutex_unlock(&power_domains->lock);
258 
259 	return ret;
260 }
261 
262 /*
263  * Starting with Haswell, we have a "Power Down Well" that can be turned off
264  * when not needed anymore. We have 4 registers that can request the power well
265  * to be enabled, and it will only be disabled if none of the registers is
266  * requesting it to be enabled.
267  */
268 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
269 				       u8 irq_pipe_mask, bool has_vga)
270 {
271 	if (has_vga)
272 		intel_vga_reset_io_mem(dev_priv);
273 
274 	if (irq_pipe_mask)
275 		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
276 }
277 
278 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
279 				       u8 irq_pipe_mask)
280 {
281 	if (irq_pipe_mask)
282 		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
283 }
284 
285 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
286 					   struct i915_power_well *power_well)
287 {
288 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
289 	int pw_idx = power_well->desc->hsw.idx;
290 
291 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
292 	if (intel_de_wait_for_set(dev_priv, regs->driver,
293 				  HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
294 		drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
295 			    power_well->desc->name);
296 
297 		/* An AUX timeout is expected if the TBT DP tunnel is down. */
298 		drm_WARN_ON(&dev_priv->drm, !power_well->desc->hsw.is_tc_tbt);
299 	}
300 }
301 
302 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
303 				     const struct i915_power_well_regs *regs,
304 				     int pw_idx)
305 {
306 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
307 	u32 ret;
308 
309 	ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
310 	ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
311 	if (regs->kvmr.reg)
312 		ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
313 	ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
314 
315 	return ret;
316 }
317 
318 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
319 					    struct i915_power_well *power_well)
320 {
321 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
322 	int pw_idx = power_well->desc->hsw.idx;
323 	bool disabled;
324 	u32 reqs;
325 
326 	/*
327 	 * Bspec doesn't require waiting for PWs to get disabled, but still do
328 	 * this for paranoia. The known cases where a PW will be forced on:
329 	 * - a KVMR request on any power well via the KVMR request register
330 	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
331 	 *   DEBUG request registers
332 	 * Skip the wait in case any of the request bits are set and print a
333 	 * diagnostic message.
334 	 */
335 	wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
336 			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
337 		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
338 	if (disabled)
339 		return;
340 
341 	drm_dbg_kms(&dev_priv->drm,
342 		    "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
343 		    power_well->desc->name,
344 		    !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
345 }
346 
347 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
348 					   enum skl_power_gate pg)
349 {
350 	/* Timeout 5us for PG#0, for other PGs 1us */
351 	drm_WARN_ON(&dev_priv->drm,
352 		    intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
353 					  SKL_FUSE_PG_DIST_STATUS(pg), 1));
354 }
355 
356 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
357 				  struct i915_power_well *power_well)
358 {
359 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
360 	int pw_idx = power_well->desc->hsw.idx;
361 	bool wait_fuses = power_well->desc->hsw.has_fuses;
362 	enum skl_power_gate uninitialized_var(pg);
363 	u32 val;
364 
365 	if (wait_fuses) {
366 		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
367 						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
368 		/*
369 		 * For PW1 we have to wait both for the PW0/PG0 fuse state
370 		 * before enabling the power well and PW1/PG1's own fuse
371 		 * state after the enabling. For all other power wells with
372 		 * fuses we only have to wait for that PW/PG's fuse state
373 		 * after the enabling.
374 		 */
375 		if (pg == SKL_PG1)
376 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
377 	}
378 
379 	val = intel_de_read(dev_priv, regs->driver);
380 	intel_de_write(dev_priv, regs->driver,
381 		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
382 	hsw_wait_for_power_well_enable(dev_priv, power_well);
383 
384 	/* Display WA #1178: cnl */
385 	if (IS_CANNONLAKE(dev_priv) &&
386 	    pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
387 	    pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
388 		val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
389 		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
390 		intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
391 	}
392 
393 	if (wait_fuses)
394 		gen9_wait_for_power_well_fuses(dev_priv, pg);
395 
396 	hsw_power_well_post_enable(dev_priv,
397 				   power_well->desc->hsw.irq_pipe_mask,
398 				   power_well->desc->hsw.has_vga);
399 }
400 
401 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
402 				   struct i915_power_well *power_well)
403 {
404 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
405 	int pw_idx = power_well->desc->hsw.idx;
406 	u32 val;
407 
408 	hsw_power_well_pre_disable(dev_priv,
409 				   power_well->desc->hsw.irq_pipe_mask);
410 
411 	val = intel_de_read(dev_priv, regs->driver);
412 	intel_de_write(dev_priv, regs->driver,
413 		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
414 	hsw_wait_for_power_well_disable(dev_priv, power_well);
415 }
416 
417 #define ICL_AUX_PW_TO_PHY(pw_idx)	((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
418 
419 static void
420 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
421 				    struct i915_power_well *power_well)
422 {
423 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
424 	int pw_idx = power_well->desc->hsw.idx;
425 	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
426 	u32 val;
427 
428 	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
429 
430 	val = intel_de_read(dev_priv, regs->driver);
431 	intel_de_write(dev_priv, regs->driver,
432 		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
433 
434 	if (INTEL_GEN(dev_priv) < 12) {
435 		val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
436 		intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
437 			       val | ICL_LANE_ENABLE_AUX);
438 	}
439 
440 	hsw_wait_for_power_well_enable(dev_priv, power_well);
441 
442 	/* Display WA #1178: icl */
443 	if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
444 	    !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
445 		val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
446 		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
447 		intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
448 	}
449 }
450 
451 static void
452 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
453 				     struct i915_power_well *power_well)
454 {
455 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
456 	int pw_idx = power_well->desc->hsw.idx;
457 	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
458 	u32 val;
459 
460 	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
461 
462 	val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
463 	intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
464 		       val & ~ICL_LANE_ENABLE_AUX);
465 
466 	val = intel_de_read(dev_priv, regs->driver);
467 	intel_de_write(dev_priv, regs->driver,
468 		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
469 
470 	hsw_wait_for_power_well_disable(dev_priv, power_well);
471 }
472 
473 #define ICL_AUX_PW_TO_CH(pw_idx)	\
474 	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
475 
476 #define ICL_TBT_AUX_PW_TO_CH(pw_idx)	\
477 	((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
478 
479 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
480 				     struct i915_power_well *power_well)
481 {
482 	int pw_idx = power_well->desc->hsw.idx;
483 
484 	return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
485 						 ICL_AUX_PW_TO_CH(pw_idx);
486 }
487 
488 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
489 
490 static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
491 
492 static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
493 				      struct i915_power_well *power_well)
494 {
495 	int refs = hweight64(power_well->desc->domains &
496 			     async_put_domains_mask(&dev_priv->power_domains));
497 
498 	drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
499 
500 	return refs;
501 }
502 
503 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
504 					struct i915_power_well *power_well)
505 {
506 	enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
507 	struct intel_digital_port *dig_port = NULL;
508 	struct intel_encoder *encoder;
509 
510 	/* Bypass the check if all references are released asynchronously */
511 	if (power_well_async_ref_count(dev_priv, power_well) ==
512 	    power_well->count)
513 		return;
514 
515 	aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
516 
517 	for_each_intel_encoder(&dev_priv->drm, encoder) {
518 		enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
519 
520 		if (!intel_phy_is_tc(dev_priv, phy))
521 			continue;
522 
523 		/* We'll check the MST primary port */
524 		if (encoder->type == INTEL_OUTPUT_DP_MST)
525 			continue;
526 
527 		dig_port = enc_to_dig_port(encoder);
528 		if (drm_WARN_ON(&dev_priv->drm, !dig_port))
529 			continue;
530 
531 		if (dig_port->aux_ch != aux_ch) {
532 			dig_port = NULL;
533 			continue;
534 		}
535 
536 		break;
537 	}
538 
539 	if (drm_WARN_ON(&dev_priv->drm, !dig_port))
540 		return;
541 
542 	drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
543 }
544 
545 #else
546 
547 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
548 					struct i915_power_well *power_well)
549 {
550 }
551 
552 #endif
553 
554 #define TGL_AUX_PW_TO_TC_PORT(pw_idx)	((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
555 
556 static void
557 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
558 				 struct i915_power_well *power_well)
559 {
560 	enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
561 	u32 val;
562 
563 	icl_tc_port_assert_ref_held(dev_priv, power_well);
564 
565 	val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
566 	val &= ~DP_AUX_CH_CTL_TBT_IO;
567 	if (power_well->desc->hsw.is_tc_tbt)
568 		val |= DP_AUX_CH_CTL_TBT_IO;
569 	intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
570 
571 	hsw_power_well_enable(dev_priv, power_well);
572 
573 	if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
574 		enum tc_port tc_port;
575 
576 		tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
577 		intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
578 			       HIP_INDEX_VAL(tc_port, 0x2));
579 
580 		if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
581 					  DKL_CMN_UC_DW27_UC_HEALTH, 1))
582 			drm_warn(&dev_priv->drm,
583 				 "Timeout waiting TC uC health\n");
584 	}
585 }
586 
587 static void
588 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
589 				  struct i915_power_well *power_well)
590 {
591 	icl_tc_port_assert_ref_held(dev_priv, power_well);
592 
593 	hsw_power_well_disable(dev_priv, power_well);
594 }
595 
596 /*
597  * We should only use the power well if we explicitly asked the hardware to
598  * enable it, so check if it's enabled and also check if we've requested it to
599  * be enabled.
600  */
601 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
602 				   struct i915_power_well *power_well)
603 {
604 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
605 	enum i915_power_well_id id = power_well->desc->id;
606 	int pw_idx = power_well->desc->hsw.idx;
607 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
608 		   HSW_PWR_WELL_CTL_STATE(pw_idx);
609 	u32 val;
610 
611 	val = intel_de_read(dev_priv, regs->driver);
612 
613 	/*
614 	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
615 	 * and the MISC_IO PW will be not restored, so check instead for the
616 	 * BIOS's own request bits, which are forced-on for these power wells
617 	 * when exiting DC5/6.
618 	 */
619 	if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
620 	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
621 		val |= intel_de_read(dev_priv, regs->bios);
622 
623 	return (val & mask) == mask;
624 }
625 
626 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
627 {
628 	drm_WARN_ONCE(&dev_priv->drm,
629 		      (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
630 		      "DC9 already programmed to be enabled.\n");
631 	drm_WARN_ONCE(&dev_priv->drm,
632 		      intel_de_read(dev_priv, DC_STATE_EN) &
633 		      DC_STATE_EN_UPTO_DC5,
634 		      "DC5 still not disabled to enable DC9.\n");
635 	drm_WARN_ONCE(&dev_priv->drm,
636 		      intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
637 		      HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
638 		      "Power well 2 on.\n");
639 	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
640 		      "Interrupts not disabled yet.\n");
641 
642 	 /*
643 	  * TODO: check for the following to verify the conditions to enter DC9
644 	  * state are satisfied:
645 	  * 1] Check relevant display engine registers to verify if mode set
646 	  * disable sequence was followed.
647 	  * 2] Check if display uninitialize sequence is initialized.
648 	  */
649 }
650 
651 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
652 {
653 	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
654 		      "Interrupts not disabled yet.\n");
655 	drm_WARN_ONCE(&dev_priv->drm,
656 		      intel_de_read(dev_priv, DC_STATE_EN) &
657 		      DC_STATE_EN_UPTO_DC5,
658 		      "DC5 still not disabled.\n");
659 
660 	 /*
661 	  * TODO: check for the following to verify DC9 state was indeed
662 	  * entered before programming to disable it:
663 	  * 1] Check relevant display engine registers to verify if mode
664 	  *  set disable sequence was followed.
665 	  * 2] Check if display uninitialize sequence is initialized.
666 	  */
667 }
668 
669 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
670 				u32 state)
671 {
672 	int rewrites = 0;
673 	int rereads = 0;
674 	u32 v;
675 
676 	intel_de_write(dev_priv, DC_STATE_EN, state);
677 
678 	/* It has been observed that disabling the dc6 state sometimes
679 	 * doesn't stick and dmc keeps returning old value. Make sure
680 	 * the write really sticks enough times and also force rewrite until
681 	 * we are confident that state is exactly what we want.
682 	 */
683 	do  {
684 		v = intel_de_read(dev_priv, DC_STATE_EN);
685 
686 		if (v != state) {
687 			intel_de_write(dev_priv, DC_STATE_EN, state);
688 			rewrites++;
689 			rereads = 0;
690 		} else if (rereads++ > 5) {
691 			break;
692 		}
693 
694 	} while (rewrites < 100);
695 
696 	if (v != state)
697 		drm_err(&dev_priv->drm,
698 			"Writing dc state to 0x%x failed, now 0x%x\n",
699 			state, v);
700 
701 	/* Most of the times we need one retry, avoid spam */
702 	if (rewrites > 1)
703 		drm_dbg_kms(&dev_priv->drm,
704 			    "Rewrote dc state to 0x%x %d times\n",
705 			    state, rewrites);
706 }
707 
708 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
709 {
710 	u32 mask;
711 
712 	mask = DC_STATE_EN_UPTO_DC5;
713 
714 	if (INTEL_GEN(dev_priv) >= 12)
715 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
716 					  | DC_STATE_EN_DC9;
717 	else if (IS_GEN(dev_priv, 11))
718 		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
719 	else if (IS_GEN9_LP(dev_priv))
720 		mask |= DC_STATE_EN_DC9;
721 	else
722 		mask |= DC_STATE_EN_UPTO_DC6;
723 
724 	return mask;
725 }
726 
727 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
728 {
729 	u32 val;
730 
731 	val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
732 
733 	drm_dbg_kms(&dev_priv->drm,
734 		    "Resetting DC state tracking from %02x to %02x\n",
735 		    dev_priv->csr.dc_state, val);
736 	dev_priv->csr.dc_state = val;
737 }
738 
739 /**
740  * gen9_set_dc_state - set target display C power state
741  * @dev_priv: i915 device instance
742  * @state: target DC power state
743  * - DC_STATE_DISABLE
744  * - DC_STATE_EN_UPTO_DC5
745  * - DC_STATE_EN_UPTO_DC6
746  * - DC_STATE_EN_DC9
747  *
748  * Signal to DMC firmware/HW the target DC power state passed in @state.
749  * DMC/HW can turn off individual display clocks and power rails when entering
750  * a deeper DC power state (higher in number) and turns these back when exiting
751  * that state to a shallower power state (lower in number). The HW will decide
752  * when to actually enter a given state on an on-demand basis, for instance
753  * depending on the active state of display pipes. The state of display
754  * registers backed by affected power rails are saved/restored as needed.
755  *
756  * Based on the above enabling a deeper DC power state is asynchronous wrt.
757  * enabling it. Disabling a deeper power state is synchronous: for instance
758  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
759  * back on and register state is restored. This is guaranteed by the MMIO write
760  * to DC_STATE_EN blocking until the state is restored.
761  */
762 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
763 {
764 	u32 val;
765 	u32 mask;
766 
767 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
768 			     state & ~dev_priv->csr.allowed_dc_mask))
769 		state &= dev_priv->csr.allowed_dc_mask;
770 
771 	val = intel_de_read(dev_priv, DC_STATE_EN);
772 	mask = gen9_dc_mask(dev_priv);
773 	drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
774 		    val & mask, state);
775 
776 	/* Check if DMC is ignoring our DC state requests */
777 	if ((val & mask) != dev_priv->csr.dc_state)
778 		drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
779 			dev_priv->csr.dc_state, val & mask);
780 
781 	val &= ~mask;
782 	val |= state;
783 
784 	gen9_write_dc_state(dev_priv, val);
785 
786 	dev_priv->csr.dc_state = val & mask;
787 }
788 
789 static u32
790 sanitize_target_dc_state(struct drm_i915_private *dev_priv,
791 			 u32 target_dc_state)
792 {
793 	u32 states[] = {
794 		DC_STATE_EN_UPTO_DC6,
795 		DC_STATE_EN_UPTO_DC5,
796 		DC_STATE_EN_DC3CO,
797 		DC_STATE_DISABLE,
798 	};
799 	int i;
800 
801 	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
802 		if (target_dc_state != states[i])
803 			continue;
804 
805 		if (dev_priv->csr.allowed_dc_mask & target_dc_state)
806 			break;
807 
808 		target_dc_state = states[i + 1];
809 	}
810 
811 	return target_dc_state;
812 }
813 
814 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
815 {
816 	drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
817 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
818 }
819 
820 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
821 {
822 	u32 val;
823 
824 	drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
825 	val = intel_de_read(dev_priv, DC_STATE_EN);
826 	val &= ~DC_STATE_DC3CO_STATUS;
827 	intel_de_write(dev_priv, DC_STATE_EN, val);
828 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
829 	/*
830 	 * Delay of 200us DC3CO Exit time B.Spec 49196
831 	 */
832 	usleep_range(200, 210);
833 }
834 
835 static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
836 {
837 	assert_can_enable_dc9(dev_priv);
838 
839 	drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
840 	/*
841 	 * Power sequencer reset is not needed on
842 	 * platforms with South Display Engine on PCH,
843 	 * because PPS registers are always on.
844 	 */
845 	if (!HAS_PCH_SPLIT(dev_priv))
846 		intel_power_sequencer_reset(dev_priv);
847 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
848 }
849 
850 static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
851 {
852 	assert_can_disable_dc9(dev_priv);
853 
854 	drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
855 
856 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
857 
858 	intel_pps_unlock_regs_wa(dev_priv);
859 }
860 
861 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
862 {
863 	drm_WARN_ONCE(&dev_priv->drm,
864 		      !intel_de_read(dev_priv, CSR_PROGRAM(0)),
865 		      "CSR program storage start is NULL\n");
866 	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
867 		      "CSR SSP Base Not fine\n");
868 	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
869 		      "CSR HTP Not fine\n");
870 }
871 
872 static struct i915_power_well *
873 lookup_power_well(struct drm_i915_private *dev_priv,
874 		  enum i915_power_well_id power_well_id)
875 {
876 	struct i915_power_well *power_well;
877 
878 	for_each_power_well(dev_priv, power_well)
879 		if (power_well->desc->id == power_well_id)
880 			return power_well;
881 
882 	/*
883 	 * It's not feasible to add error checking code to the callers since
884 	 * this condition really shouldn't happen and it doesn't even make sense
885 	 * to abort things like display initialization sequences. Just return
886 	 * the first power well and hope the WARN gets reported so we can fix
887 	 * our driver.
888 	 */
889 	drm_WARN(&dev_priv->drm, 1,
890 		 "Power well %d not defined for this platform\n",
891 		 power_well_id);
892 	return &dev_priv->power_domains.power_wells[0];
893 }
894 
895 /**
896  * intel_display_power_set_target_dc_state - Set target dc state.
897  * @dev_priv: i915 device
898  * @state: state which needs to be set as target_dc_state.
899  *
900  * This function set the "DC off" power well target_dc_state,
901  * based upon this target_dc_stste, "DC off" power well will
902  * enable desired DC state.
903  */
904 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
905 					     u32 state)
906 {
907 	struct i915_power_well *power_well;
908 	bool dc_off_enabled;
909 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
910 
911 	mutex_lock(&power_domains->lock);
912 	power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
913 
914 	if (drm_WARN_ON(&dev_priv->drm, !power_well))
915 		goto unlock;
916 
917 	state = sanitize_target_dc_state(dev_priv, state);
918 
919 	if (state == dev_priv->csr.target_dc_state)
920 		goto unlock;
921 
922 	dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
923 							   power_well);
924 	/*
925 	 * If DC off power well is disabled, need to enable and disable the
926 	 * DC off power well to effect target DC state.
927 	 */
928 	if (!dc_off_enabled)
929 		power_well->desc->ops->enable(dev_priv, power_well);
930 
931 	dev_priv->csr.target_dc_state = state;
932 
933 	if (!dc_off_enabled)
934 		power_well->desc->ops->disable(dev_priv, power_well);
935 
936 unlock:
937 	mutex_unlock(&power_domains->lock);
938 }
939 
940 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
941 {
942 	enum i915_power_well_id high_pg;
943 
944 	/* Power wells at this level and above must be disabled for DC5 entry */
945 	if (INTEL_GEN(dev_priv) >= 12)
946 		high_pg = TGL_DISP_PW_3;
947 	else
948 		high_pg = SKL_DISP_PW_2;
949 
950 	drm_WARN_ONCE(&dev_priv->drm,
951 		      intel_display_power_well_is_enabled(dev_priv, high_pg),
952 		      "Power wells above platform's DC5 limit still enabled.\n");
953 
954 	drm_WARN_ONCE(&dev_priv->drm,
955 		      (intel_de_read(dev_priv, DC_STATE_EN) &
956 		       DC_STATE_EN_UPTO_DC5),
957 		      "DC5 already programmed to be enabled.\n");
958 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
959 
960 	assert_csr_loaded(dev_priv);
961 }
962 
963 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
964 {
965 	assert_can_enable_dc5(dev_priv);
966 
967 	drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
968 
969 	/* Wa Display #1183: skl,kbl,cfl */
970 	if (IS_GEN9_BC(dev_priv))
971 		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
972 			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
973 
974 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
975 }
976 
977 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
978 {
979 	drm_WARN_ONCE(&dev_priv->drm,
980 		      intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
981 		      "Backlight is not disabled.\n");
982 	drm_WARN_ONCE(&dev_priv->drm,
983 		      (intel_de_read(dev_priv, DC_STATE_EN) &
984 		       DC_STATE_EN_UPTO_DC6),
985 		      "DC6 already programmed to be enabled.\n");
986 
987 	assert_csr_loaded(dev_priv);
988 }
989 
990 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
991 {
992 	assert_can_enable_dc6(dev_priv);
993 
994 	drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
995 
996 	/* Wa Display #1183: skl,kbl,cfl */
997 	if (IS_GEN9_BC(dev_priv))
998 		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
999 			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1000 
1001 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1002 }
1003 
1004 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1005 				   struct i915_power_well *power_well)
1006 {
1007 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1008 	int pw_idx = power_well->desc->hsw.idx;
1009 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1010 	u32 bios_req = intel_de_read(dev_priv, regs->bios);
1011 
1012 	/* Take over the request bit if set by BIOS. */
1013 	if (bios_req & mask) {
1014 		u32 drv_req = intel_de_read(dev_priv, regs->driver);
1015 
1016 		if (!(drv_req & mask))
1017 			intel_de_write(dev_priv, regs->driver, drv_req | mask);
1018 		intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
1019 	}
1020 }
1021 
1022 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1023 					   struct i915_power_well *power_well)
1024 {
1025 	bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1026 }
1027 
1028 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1029 					    struct i915_power_well *power_well)
1030 {
1031 	bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1032 }
1033 
1034 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1035 					    struct i915_power_well *power_well)
1036 {
1037 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1038 }
1039 
1040 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1041 {
1042 	struct i915_power_well *power_well;
1043 
1044 	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1045 	if (power_well->count > 0)
1046 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1047 
1048 	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1049 	if (power_well->count > 0)
1050 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1051 
1052 	if (IS_GEMINILAKE(dev_priv)) {
1053 		power_well = lookup_power_well(dev_priv,
1054 					       GLK_DISP_PW_DPIO_CMN_C);
1055 		if (power_well->count > 0)
1056 			bxt_ddi_phy_verify_state(dev_priv,
1057 						 power_well->desc->bxt.phy);
1058 	}
1059 }
1060 
1061 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1062 					   struct i915_power_well *power_well)
1063 {
1064 	return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1065 		(intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1066 }
1067 
1068 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1069 {
1070 	u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1071 	u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
1072 
1073 	drm_WARN(&dev_priv->drm,
1074 		 hw_enabled_dbuf_slices != enabled_dbuf_slices,
1075 		 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1076 		 hw_enabled_dbuf_slices,
1077 		 enabled_dbuf_slices);
1078 }
1079 
1080 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1081 {
1082 	struct intel_cdclk_config cdclk_config = {};
1083 
1084 	if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
1085 		tgl_disable_dc3co(dev_priv);
1086 		return;
1087 	}
1088 
1089 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1090 
1091 	dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1092 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
1093 	drm_WARN_ON(&dev_priv->drm,
1094 		    intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
1095 					      &cdclk_config));
1096 
1097 	gen9_assert_dbuf_enabled(dev_priv);
1098 
1099 	if (IS_GEN9_LP(dev_priv))
1100 		bxt_verify_ddi_phy_power_wells(dev_priv);
1101 
1102 	if (INTEL_GEN(dev_priv) >= 11)
1103 		/*
1104 		 * DMC retains HW context only for port A, the other combo
1105 		 * PHY's HW context for port B is lost after DC transitions,
1106 		 * so we need to restore it manually.
1107 		 */
1108 		intel_combo_phy_init(dev_priv);
1109 }
1110 
1111 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1112 					  struct i915_power_well *power_well)
1113 {
1114 	gen9_disable_dc_states(dev_priv);
1115 }
1116 
1117 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1118 					   struct i915_power_well *power_well)
1119 {
1120 	if (!dev_priv->csr.dmc_payload)
1121 		return;
1122 
1123 	switch (dev_priv->csr.target_dc_state) {
1124 	case DC_STATE_EN_DC3CO:
1125 		tgl_enable_dc3co(dev_priv);
1126 		break;
1127 	case DC_STATE_EN_UPTO_DC6:
1128 		skl_enable_dc6(dev_priv);
1129 		break;
1130 	case DC_STATE_EN_UPTO_DC5:
1131 		gen9_enable_dc5(dev_priv);
1132 		break;
1133 	}
1134 }
1135 
1136 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1137 					 struct i915_power_well *power_well)
1138 {
1139 }
1140 
1141 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1142 					   struct i915_power_well *power_well)
1143 {
1144 }
1145 
1146 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1147 					     struct i915_power_well *power_well)
1148 {
1149 	return true;
1150 }
1151 
1152 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1153 					 struct i915_power_well *power_well)
1154 {
1155 	if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1156 		i830_enable_pipe(dev_priv, PIPE_A);
1157 	if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1158 		i830_enable_pipe(dev_priv, PIPE_B);
1159 }
1160 
1161 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1162 					  struct i915_power_well *power_well)
1163 {
1164 	i830_disable_pipe(dev_priv, PIPE_B);
1165 	i830_disable_pipe(dev_priv, PIPE_A);
1166 }
1167 
1168 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1169 					  struct i915_power_well *power_well)
1170 {
1171 	return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1172 		intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1173 }
1174 
1175 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1176 					  struct i915_power_well *power_well)
1177 {
1178 	if (power_well->count > 0)
1179 		i830_pipes_power_well_enable(dev_priv, power_well);
1180 	else
1181 		i830_pipes_power_well_disable(dev_priv, power_well);
1182 }
1183 
1184 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1185 			       struct i915_power_well *power_well, bool enable)
1186 {
1187 	int pw_idx = power_well->desc->vlv.idx;
1188 	u32 mask;
1189 	u32 state;
1190 	u32 ctrl;
1191 
1192 	mask = PUNIT_PWRGT_MASK(pw_idx);
1193 	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1194 			 PUNIT_PWRGT_PWR_GATE(pw_idx);
1195 
1196 	vlv_punit_get(dev_priv);
1197 
1198 #define COND \
1199 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1200 
1201 	if (COND)
1202 		goto out;
1203 
1204 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1205 	ctrl &= ~mask;
1206 	ctrl |= state;
1207 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1208 
1209 	if (wait_for(COND, 100))
1210 		drm_err(&dev_priv->drm,
1211 			"timeout setting power well state %08x (%08x)\n",
1212 			state,
1213 			vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1214 
1215 #undef COND
1216 
1217 out:
1218 	vlv_punit_put(dev_priv);
1219 }
1220 
1221 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1222 				  struct i915_power_well *power_well)
1223 {
1224 	vlv_set_power_well(dev_priv, power_well, true);
1225 }
1226 
1227 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1228 				   struct i915_power_well *power_well)
1229 {
1230 	vlv_set_power_well(dev_priv, power_well, false);
1231 }
1232 
1233 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1234 				   struct i915_power_well *power_well)
1235 {
1236 	int pw_idx = power_well->desc->vlv.idx;
1237 	bool enabled = false;
1238 	u32 mask;
1239 	u32 state;
1240 	u32 ctrl;
1241 
1242 	mask = PUNIT_PWRGT_MASK(pw_idx);
1243 	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1244 
1245 	vlv_punit_get(dev_priv);
1246 
1247 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1248 	/*
1249 	 * We only ever set the power-on and power-gate states, anything
1250 	 * else is unexpected.
1251 	 */
1252 	drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1253 		    state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1254 	if (state == ctrl)
1255 		enabled = true;
1256 
1257 	/*
1258 	 * A transient state at this point would mean some unexpected party
1259 	 * is poking at the power controls too.
1260 	 */
1261 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1262 	drm_WARN_ON(&dev_priv->drm, ctrl != state);
1263 
1264 	vlv_punit_put(dev_priv);
1265 
1266 	return enabled;
1267 }
1268 
1269 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1270 {
1271 	u32 val;
1272 
1273 	/*
1274 	 * On driver load, a pipe may be active and driving a DSI display.
1275 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1276 	 * (and never recovering) in this case. intel_dsi_post_disable() will
1277 	 * clear it when we turn off the display.
1278 	 */
1279 	val = intel_de_read(dev_priv, DSPCLK_GATE_D);
1280 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1281 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1282 	intel_de_write(dev_priv, DSPCLK_GATE_D, val);
1283 
1284 	/*
1285 	 * Disable trickle feed and enable pnd deadline calculation
1286 	 */
1287 	intel_de_write(dev_priv, MI_ARB_VLV,
1288 		       MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1289 	intel_de_write(dev_priv, CBR1_VLV, 0);
1290 
1291 	drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1292 	intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1293 		       DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1294 					 1000));
1295 }
1296 
1297 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1298 {
1299 	struct intel_encoder *encoder;
1300 	enum pipe pipe;
1301 
1302 	/*
1303 	 * Enable the CRI clock source so we can get at the
1304 	 * display and the reference clock for VGA
1305 	 * hotplug / manual detection. Supposedly DSI also
1306 	 * needs the ref clock up and running.
1307 	 *
1308 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1309 	 */
1310 	for_each_pipe(dev_priv, pipe) {
1311 		u32 val = intel_de_read(dev_priv, DPLL(pipe));
1312 
1313 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1314 		if (pipe != PIPE_A)
1315 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1316 
1317 		intel_de_write(dev_priv, DPLL(pipe), val);
1318 	}
1319 
1320 	vlv_init_display_clock_gating(dev_priv);
1321 
1322 	spin_lock_irq(&dev_priv->irq_lock);
1323 	valleyview_enable_display_irqs(dev_priv);
1324 	spin_unlock_irq(&dev_priv->irq_lock);
1325 
1326 	/*
1327 	 * During driver initialization/resume we can avoid restoring the
1328 	 * part of the HW/SW state that will be inited anyway explicitly.
1329 	 */
1330 	if (dev_priv->power_domains.initializing)
1331 		return;
1332 
1333 	intel_hpd_init(dev_priv);
1334 
1335 	/* Re-enable the ADPA, if we have one */
1336 	for_each_intel_encoder(&dev_priv->drm, encoder) {
1337 		if (encoder->type == INTEL_OUTPUT_ANALOG)
1338 			intel_crt_reset(&encoder->base);
1339 	}
1340 
1341 	intel_vga_redisable_power_on(dev_priv);
1342 
1343 	intel_pps_unlock_regs_wa(dev_priv);
1344 }
1345 
1346 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1347 {
1348 	spin_lock_irq(&dev_priv->irq_lock);
1349 	valleyview_disable_display_irqs(dev_priv);
1350 	spin_unlock_irq(&dev_priv->irq_lock);
1351 
1352 	/* make sure we're done processing display irqs */
1353 	intel_synchronize_irq(dev_priv);
1354 
1355 	intel_power_sequencer_reset(dev_priv);
1356 
1357 	/* Prevent us from re-enabling polling on accident in late suspend */
1358 	if (!dev_priv->drm.dev->power.is_suspended)
1359 		intel_hpd_poll_init(dev_priv);
1360 }
1361 
1362 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1363 					  struct i915_power_well *power_well)
1364 {
1365 	vlv_set_power_well(dev_priv, power_well, true);
1366 
1367 	vlv_display_power_well_init(dev_priv);
1368 }
1369 
1370 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1371 					   struct i915_power_well *power_well)
1372 {
1373 	vlv_display_power_well_deinit(dev_priv);
1374 
1375 	vlv_set_power_well(dev_priv, power_well, false);
1376 }
1377 
1378 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1379 					   struct i915_power_well *power_well)
1380 {
1381 	/* since ref/cri clock was enabled */
1382 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1383 
1384 	vlv_set_power_well(dev_priv, power_well, true);
1385 
1386 	/*
1387 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1388 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1389 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1390 	 *   b.	The other bits such as sfr settings / modesel may all
1391 	 *	be set to 0.
1392 	 *
1393 	 * This should only be done on init and resume from S3 with
1394 	 * both PLLs disabled, or we risk losing DPIO and PLL
1395 	 * synchronization.
1396 	 */
1397 	intel_de_write(dev_priv, DPIO_CTL,
1398 		       intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
1399 }
1400 
1401 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1402 					    struct i915_power_well *power_well)
1403 {
1404 	enum pipe pipe;
1405 
1406 	for_each_pipe(dev_priv, pipe)
1407 		assert_pll_disabled(dev_priv, pipe);
1408 
1409 	/* Assert common reset */
1410 	intel_de_write(dev_priv, DPIO_CTL,
1411 		       intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
1412 
1413 	vlv_set_power_well(dev_priv, power_well, false);
1414 }
1415 
1416 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1417 
1418 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1419 
1420 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1421 {
1422 	struct i915_power_well *cmn_bc =
1423 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1424 	struct i915_power_well *cmn_d =
1425 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1426 	u32 phy_control = dev_priv->chv_phy_control;
1427 	u32 phy_status = 0;
1428 	u32 phy_status_mask = 0xffffffff;
1429 
1430 	/*
1431 	 * The BIOS can leave the PHY is some weird state
1432 	 * where it doesn't fully power down some parts.
1433 	 * Disable the asserts until the PHY has been fully
1434 	 * reset (ie. the power well has been disabled at
1435 	 * least once).
1436 	 */
1437 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1438 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1439 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1440 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1441 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1442 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1443 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1444 
1445 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1446 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1447 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1448 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1449 
1450 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1451 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1452 
1453 		/* this assumes override is only used to enable lanes */
1454 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1455 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1456 
1457 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1458 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1459 
1460 		/* CL1 is on whenever anything is on in either channel */
1461 		if (BITS_SET(phy_control,
1462 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1463 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1464 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1465 
1466 		/*
1467 		 * The DPLLB check accounts for the pipe B + port A usage
1468 		 * with CL2 powered up but all the lanes in the second channel
1469 		 * powered down.
1470 		 */
1471 		if (BITS_SET(phy_control,
1472 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1473 		    (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1474 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1475 
1476 		if (BITS_SET(phy_control,
1477 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1478 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1479 		if (BITS_SET(phy_control,
1480 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1481 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1482 
1483 		if (BITS_SET(phy_control,
1484 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1485 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1486 		if (BITS_SET(phy_control,
1487 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1488 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1489 	}
1490 
1491 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1492 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1493 
1494 		/* this assumes override is only used to enable lanes */
1495 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1496 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1497 
1498 		if (BITS_SET(phy_control,
1499 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1500 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1501 
1502 		if (BITS_SET(phy_control,
1503 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1504 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1505 		if (BITS_SET(phy_control,
1506 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1507 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1508 	}
1509 
1510 	phy_status &= phy_status_mask;
1511 
1512 	/*
1513 	 * The PHY may be busy with some initial calibration and whatnot,
1514 	 * so the power state can take a while to actually change.
1515 	 */
1516 	if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1517 				       phy_status_mask, phy_status, 10))
1518 		drm_err(&dev_priv->drm,
1519 			"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1520 			intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1521 			phy_status, dev_priv->chv_phy_control);
1522 }
1523 
1524 #undef BITS_SET
1525 
1526 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1527 					   struct i915_power_well *power_well)
1528 {
1529 	enum dpio_phy phy;
1530 	enum pipe pipe;
1531 	u32 tmp;
1532 
1533 	drm_WARN_ON_ONCE(&dev_priv->drm,
1534 			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1535 			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1536 
1537 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1538 		pipe = PIPE_A;
1539 		phy = DPIO_PHY0;
1540 	} else {
1541 		pipe = PIPE_C;
1542 		phy = DPIO_PHY1;
1543 	}
1544 
1545 	/* since ref/cri clock was enabled */
1546 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1547 	vlv_set_power_well(dev_priv, power_well, true);
1548 
1549 	/* Poll for phypwrgood signal */
1550 	if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1551 				  PHY_POWERGOOD(phy), 1))
1552 		drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1553 			phy);
1554 
1555 	vlv_dpio_get(dev_priv);
1556 
1557 	/* Enable dynamic power down */
1558 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1559 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1560 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1561 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1562 
1563 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1564 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1565 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1566 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1567 	} else {
1568 		/*
1569 		 * Force the non-existing CL2 off. BXT does this
1570 		 * too, so maybe it saves some power even though
1571 		 * CL2 doesn't exist?
1572 		 */
1573 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1574 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1575 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1576 	}
1577 
1578 	vlv_dpio_put(dev_priv);
1579 
1580 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1581 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1582 		       dev_priv->chv_phy_control);
1583 
1584 	drm_dbg_kms(&dev_priv->drm,
1585 		    "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1586 		    phy, dev_priv->chv_phy_control);
1587 
1588 	assert_chv_phy_status(dev_priv);
1589 }
1590 
1591 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1592 					    struct i915_power_well *power_well)
1593 {
1594 	enum dpio_phy phy;
1595 
1596 	drm_WARN_ON_ONCE(&dev_priv->drm,
1597 			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1598 			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1599 
1600 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1601 		phy = DPIO_PHY0;
1602 		assert_pll_disabled(dev_priv, PIPE_A);
1603 		assert_pll_disabled(dev_priv, PIPE_B);
1604 	} else {
1605 		phy = DPIO_PHY1;
1606 		assert_pll_disabled(dev_priv, PIPE_C);
1607 	}
1608 
1609 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1610 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1611 		       dev_priv->chv_phy_control);
1612 
1613 	vlv_set_power_well(dev_priv, power_well, false);
1614 
1615 	drm_dbg_kms(&dev_priv->drm,
1616 		    "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1617 		    phy, dev_priv->chv_phy_control);
1618 
1619 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1620 	dev_priv->chv_phy_assert[phy] = true;
1621 
1622 	assert_chv_phy_status(dev_priv);
1623 }
1624 
1625 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1626 				     enum dpio_channel ch, bool override, unsigned int mask)
1627 {
1628 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1629 	u32 reg, val, expected, actual;
1630 
1631 	/*
1632 	 * The BIOS can leave the PHY is some weird state
1633 	 * where it doesn't fully power down some parts.
1634 	 * Disable the asserts until the PHY has been fully
1635 	 * reset (ie. the power well has been disabled at
1636 	 * least once).
1637 	 */
1638 	if (!dev_priv->chv_phy_assert[phy])
1639 		return;
1640 
1641 	if (ch == DPIO_CH0)
1642 		reg = _CHV_CMN_DW0_CH0;
1643 	else
1644 		reg = _CHV_CMN_DW6_CH1;
1645 
1646 	vlv_dpio_get(dev_priv);
1647 	val = vlv_dpio_read(dev_priv, pipe, reg);
1648 	vlv_dpio_put(dev_priv);
1649 
1650 	/*
1651 	 * This assumes !override is only used when the port is disabled.
1652 	 * All lanes should power down even without the override when
1653 	 * the port is disabled.
1654 	 */
1655 	if (!override || mask == 0xf) {
1656 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1657 		/*
1658 		 * If CH1 common lane is not active anymore
1659 		 * (eg. for pipe B DPLL) the entire channel will
1660 		 * shut down, which causes the common lane registers
1661 		 * to read as 0. That means we can't actually check
1662 		 * the lane power down status bits, but as the entire
1663 		 * register reads as 0 it's a good indication that the
1664 		 * channel is indeed entirely powered down.
1665 		 */
1666 		if (ch == DPIO_CH1 && val == 0)
1667 			expected = 0;
1668 	} else if (mask != 0x0) {
1669 		expected = DPIO_ANYDL_POWERDOWN;
1670 	} else {
1671 		expected = 0;
1672 	}
1673 
1674 	if (ch == DPIO_CH0)
1675 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1676 	else
1677 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1678 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1679 
1680 	drm_WARN(&dev_priv->drm, actual != expected,
1681 		 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1682 		 !!(actual & DPIO_ALLDL_POWERDOWN),
1683 		 !!(actual & DPIO_ANYDL_POWERDOWN),
1684 		 !!(expected & DPIO_ALLDL_POWERDOWN),
1685 		 !!(expected & DPIO_ANYDL_POWERDOWN),
1686 		 reg, val);
1687 }
1688 
1689 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1690 			  enum dpio_channel ch, bool override)
1691 {
1692 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1693 	bool was_override;
1694 
1695 	mutex_lock(&power_domains->lock);
1696 
1697 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1698 
1699 	if (override == was_override)
1700 		goto out;
1701 
1702 	if (override)
1703 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1704 	else
1705 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1706 
1707 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1708 		       dev_priv->chv_phy_control);
1709 
1710 	drm_dbg_kms(&dev_priv->drm,
1711 		    "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1712 		    phy, ch, dev_priv->chv_phy_control);
1713 
1714 	assert_chv_phy_status(dev_priv);
1715 
1716 out:
1717 	mutex_unlock(&power_domains->lock);
1718 
1719 	return was_override;
1720 }
1721 
1722 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1723 			     bool override, unsigned int mask)
1724 {
1725 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1726 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1727 	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
1728 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
1729 
1730 	mutex_lock(&power_domains->lock);
1731 
1732 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1733 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1734 
1735 	if (override)
1736 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1737 	else
1738 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1739 
1740 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1741 		       dev_priv->chv_phy_control);
1742 
1743 	drm_dbg_kms(&dev_priv->drm,
1744 		    "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1745 		    phy, ch, mask, dev_priv->chv_phy_control);
1746 
1747 	assert_chv_phy_status(dev_priv);
1748 
1749 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1750 
1751 	mutex_unlock(&power_domains->lock);
1752 }
1753 
1754 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1755 					struct i915_power_well *power_well)
1756 {
1757 	enum pipe pipe = PIPE_A;
1758 	bool enabled;
1759 	u32 state, ctrl;
1760 
1761 	vlv_punit_get(dev_priv);
1762 
1763 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1764 	/*
1765 	 * We only ever set the power-on and power-gate states, anything
1766 	 * else is unexpected.
1767 	 */
1768 	drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
1769 		    state != DP_SSS_PWR_GATE(pipe));
1770 	enabled = state == DP_SSS_PWR_ON(pipe);
1771 
1772 	/*
1773 	 * A transient state at this point would mean some unexpected party
1774 	 * is poking at the power controls too.
1775 	 */
1776 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1777 	drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
1778 
1779 	vlv_punit_put(dev_priv);
1780 
1781 	return enabled;
1782 }
1783 
1784 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1785 				    struct i915_power_well *power_well,
1786 				    bool enable)
1787 {
1788 	enum pipe pipe = PIPE_A;
1789 	u32 state;
1790 	u32 ctrl;
1791 
1792 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1793 
1794 	vlv_punit_get(dev_priv);
1795 
1796 #define COND \
1797 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1798 
1799 	if (COND)
1800 		goto out;
1801 
1802 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1803 	ctrl &= ~DP_SSC_MASK(pipe);
1804 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1805 	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1806 
1807 	if (wait_for(COND, 100))
1808 		drm_err(&dev_priv->drm,
1809 			"timeout setting power well state %08x (%08x)\n",
1810 			state,
1811 			vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1812 
1813 #undef COND
1814 
1815 out:
1816 	vlv_punit_put(dev_priv);
1817 }
1818 
1819 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1820 					struct i915_power_well *power_well)
1821 {
1822 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1823 		       dev_priv->chv_phy_control);
1824 }
1825 
1826 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1827 				       struct i915_power_well *power_well)
1828 {
1829 	chv_set_pipe_power_well(dev_priv, power_well, true);
1830 
1831 	vlv_display_power_well_init(dev_priv);
1832 }
1833 
1834 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1835 					struct i915_power_well *power_well)
1836 {
1837 	vlv_display_power_well_deinit(dev_priv);
1838 
1839 	chv_set_pipe_power_well(dev_priv, power_well, false);
1840 }
1841 
1842 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1843 {
1844 	return power_domains->async_put_domains[0] |
1845 	       power_domains->async_put_domains[1];
1846 }
1847 
1848 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1849 
1850 static bool
1851 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1852 {
1853 	return !WARN_ON(power_domains->async_put_domains[0] &
1854 			power_domains->async_put_domains[1]);
1855 }
1856 
1857 static bool
1858 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1859 {
1860 	enum intel_display_power_domain domain;
1861 	bool err = false;
1862 
1863 	err |= !assert_async_put_domain_masks_disjoint(power_domains);
1864 	err |= WARN_ON(!!power_domains->async_put_wakeref !=
1865 		       !!__async_put_domains_mask(power_domains));
1866 
1867 	for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1868 		err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1869 
1870 	return !err;
1871 }
1872 
1873 static void print_power_domains(struct i915_power_domains *power_domains,
1874 				const char *prefix, u64 mask)
1875 {
1876 	enum intel_display_power_domain domain;
1877 
1878 	DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
1879 	for_each_power_domain(domain, mask)
1880 		DRM_DEBUG_DRIVER("%s use_count %d\n",
1881 				 intel_display_power_domain_str(domain),
1882 				 power_domains->domain_use_count[domain]);
1883 }
1884 
1885 static void
1886 print_async_put_domains_state(struct i915_power_domains *power_domains)
1887 {
1888 	DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
1889 			 power_domains->async_put_wakeref);
1890 
1891 	print_power_domains(power_domains, "async_put_domains[0]",
1892 			    power_domains->async_put_domains[0]);
1893 	print_power_domains(power_domains, "async_put_domains[1]",
1894 			    power_domains->async_put_domains[1]);
1895 }
1896 
1897 static void
1898 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1899 {
1900 	if (!__async_put_domains_state_ok(power_domains))
1901 		print_async_put_domains_state(power_domains);
1902 }
1903 
1904 #else
1905 
1906 static void
1907 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1908 {
1909 }
1910 
1911 static void
1912 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1913 {
1914 }
1915 
1916 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
1917 
1918 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
1919 {
1920 	assert_async_put_domain_masks_disjoint(power_domains);
1921 
1922 	return __async_put_domains_mask(power_domains);
1923 }
1924 
1925 static void
1926 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
1927 			       enum intel_display_power_domain domain)
1928 {
1929 	assert_async_put_domain_masks_disjoint(power_domains);
1930 
1931 	power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
1932 	power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
1933 }
1934 
1935 static bool
1936 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
1937 				       enum intel_display_power_domain domain)
1938 {
1939 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1940 	bool ret = false;
1941 
1942 	if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
1943 		goto out_verify;
1944 
1945 	async_put_domains_clear_domain(power_domains, domain);
1946 
1947 	ret = true;
1948 
1949 	if (async_put_domains_mask(power_domains))
1950 		goto out_verify;
1951 
1952 	cancel_delayed_work(&power_domains->async_put_work);
1953 	intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
1954 				 fetch_and_zero(&power_domains->async_put_wakeref));
1955 out_verify:
1956 	verify_async_put_domains_state(power_domains);
1957 
1958 	return ret;
1959 }
1960 
1961 static void
1962 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1963 				 enum intel_display_power_domain domain)
1964 {
1965 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1966 	struct i915_power_well *power_well;
1967 
1968 	if (intel_display_power_grab_async_put_ref(dev_priv, domain))
1969 		return;
1970 
1971 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1972 		intel_power_well_get(dev_priv, power_well);
1973 
1974 	power_domains->domain_use_count[domain]++;
1975 }
1976 
1977 /**
1978  * intel_display_power_get - grab a power domain reference
1979  * @dev_priv: i915 device instance
1980  * @domain: power domain to reference
1981  *
1982  * This function grabs a power domain reference for @domain and ensures that the
1983  * power domain and all its parents are powered up. Therefore users should only
1984  * grab a reference to the innermost power domain they need.
1985  *
1986  * Any power domain reference obtained by this function must have a symmetric
1987  * call to intel_display_power_put() to release the reference again.
1988  */
1989 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
1990 					enum intel_display_power_domain domain)
1991 {
1992 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1993 	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1994 
1995 	mutex_lock(&power_domains->lock);
1996 	__intel_display_power_get_domain(dev_priv, domain);
1997 	mutex_unlock(&power_domains->lock);
1998 
1999 	return wakeref;
2000 }
2001 
2002 /**
2003  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
2004  * @dev_priv: i915 device instance
2005  * @domain: power domain to reference
2006  *
2007  * This function grabs a power domain reference for @domain and ensures that the
2008  * power domain and all its parents are powered up. Therefore users should only
2009  * grab a reference to the innermost power domain they need.
2010  *
2011  * Any power domain reference obtained by this function must have a symmetric
2012  * call to intel_display_power_put() to release the reference again.
2013  */
2014 intel_wakeref_t
2015 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2016 				   enum intel_display_power_domain domain)
2017 {
2018 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2019 	intel_wakeref_t wakeref;
2020 	bool is_enabled;
2021 
2022 	wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
2023 	if (!wakeref)
2024 		return false;
2025 
2026 	mutex_lock(&power_domains->lock);
2027 
2028 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
2029 		__intel_display_power_get_domain(dev_priv, domain);
2030 		is_enabled = true;
2031 	} else {
2032 		is_enabled = false;
2033 	}
2034 
2035 	mutex_unlock(&power_domains->lock);
2036 
2037 	if (!is_enabled) {
2038 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2039 		wakeref = 0;
2040 	}
2041 
2042 	return wakeref;
2043 }
2044 
2045 static void
2046 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2047 				 enum intel_display_power_domain domain)
2048 {
2049 	struct i915_power_domains *power_domains;
2050 	struct i915_power_well *power_well;
2051 	const char *name = intel_display_power_domain_str(domain);
2052 
2053 	power_domains = &dev_priv->power_domains;
2054 
2055 	drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
2056 		 "Use count on domain %s is already zero\n",
2057 		 name);
2058 	drm_WARN(&dev_priv->drm,
2059 		 async_put_domains_mask(power_domains) & BIT_ULL(domain),
2060 		 "Async disabling of domain %s is pending\n",
2061 		 name);
2062 
2063 	power_domains->domain_use_count[domain]--;
2064 
2065 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2066 		intel_power_well_put(dev_priv, power_well);
2067 }
2068 
2069 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2070 				      enum intel_display_power_domain domain)
2071 {
2072 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2073 
2074 	mutex_lock(&power_domains->lock);
2075 	__intel_display_power_put_domain(dev_priv, domain);
2076 	mutex_unlock(&power_domains->lock);
2077 }
2078 
2079 /**
2080  * intel_display_power_put_unchecked - release an unchecked power domain reference
2081  * @dev_priv: i915 device instance
2082  * @domain: power domain to reference
2083  *
2084  * This function drops the power domain reference obtained by
2085  * intel_display_power_get() and might power down the corresponding hardware
2086  * block right away if this is the last reference.
2087  *
2088  * This function exists only for historical reasons and should be avoided in
2089  * new code, as the correctness of its use cannot be checked. Always use
2090  * intel_display_power_put() instead.
2091  */
2092 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2093 				       enum intel_display_power_domain domain)
2094 {
2095 	__intel_display_power_put(dev_priv, domain);
2096 	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2097 }
2098 
2099 static void
2100 queue_async_put_domains_work(struct i915_power_domains *power_domains,
2101 			     intel_wakeref_t wakeref)
2102 {
2103 	WARN_ON(power_domains->async_put_wakeref);
2104 	power_domains->async_put_wakeref = wakeref;
2105 	WARN_ON(!queue_delayed_work(system_unbound_wq,
2106 				    &power_domains->async_put_work,
2107 				    msecs_to_jiffies(100)));
2108 }
2109 
2110 static void
2111 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2112 {
2113 	struct drm_i915_private *dev_priv =
2114 		container_of(power_domains, struct drm_i915_private,
2115 			     power_domains);
2116 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2117 	enum intel_display_power_domain domain;
2118 	intel_wakeref_t wakeref;
2119 
2120 	/*
2121 	 * The caller must hold already raw wakeref, upgrade that to a proper
2122 	 * wakeref to make the state checker happy about the HW access during
2123 	 * power well disabling.
2124 	 */
2125 	assert_rpm_raw_wakeref_held(rpm);
2126 	wakeref = intel_runtime_pm_get(rpm);
2127 
2128 	for_each_power_domain(domain, mask) {
2129 		/* Clear before put, so put's sanity check is happy. */
2130 		async_put_domains_clear_domain(power_domains, domain);
2131 		__intel_display_power_put_domain(dev_priv, domain);
2132 	}
2133 
2134 	intel_runtime_pm_put(rpm, wakeref);
2135 }
2136 
2137 static void
2138 intel_display_power_put_async_work(struct work_struct *work)
2139 {
2140 	struct drm_i915_private *dev_priv =
2141 		container_of(work, struct drm_i915_private,
2142 			     power_domains.async_put_work.work);
2143 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2144 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2145 	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2146 	intel_wakeref_t old_work_wakeref = 0;
2147 
2148 	mutex_lock(&power_domains->lock);
2149 
2150 	/*
2151 	 * Bail out if all the domain refs pending to be released were grabbed
2152 	 * by subsequent gets or a flush_work.
2153 	 */
2154 	old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2155 	if (!old_work_wakeref)
2156 		goto out_verify;
2157 
2158 	release_async_put_domains(power_domains,
2159 				  power_domains->async_put_domains[0]);
2160 
2161 	/* Requeue the work if more domains were async put meanwhile. */
2162 	if (power_domains->async_put_domains[1]) {
2163 		power_domains->async_put_domains[0] =
2164 			fetch_and_zero(&power_domains->async_put_domains[1]);
2165 		queue_async_put_domains_work(power_domains,
2166 					     fetch_and_zero(&new_work_wakeref));
2167 	}
2168 
2169 out_verify:
2170 	verify_async_put_domains_state(power_domains);
2171 
2172 	mutex_unlock(&power_domains->lock);
2173 
2174 	if (old_work_wakeref)
2175 		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2176 	if (new_work_wakeref)
2177 		intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2178 }
2179 
2180 /**
2181  * intel_display_power_put_async - release a power domain reference asynchronously
2182  * @i915: i915 device instance
2183  * @domain: power domain to reference
2184  * @wakeref: wakeref acquired for the reference that is being released
2185  *
2186  * This function drops the power domain reference obtained by
2187  * intel_display_power_get*() and schedules a work to power down the
2188  * corresponding hardware block if this is the last reference.
2189  */
2190 void __intel_display_power_put_async(struct drm_i915_private *i915,
2191 				     enum intel_display_power_domain domain,
2192 				     intel_wakeref_t wakeref)
2193 {
2194 	struct i915_power_domains *power_domains = &i915->power_domains;
2195 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
2196 	intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2197 
2198 	mutex_lock(&power_domains->lock);
2199 
2200 	if (power_domains->domain_use_count[domain] > 1) {
2201 		__intel_display_power_put_domain(i915, domain);
2202 
2203 		goto out_verify;
2204 	}
2205 
2206 	drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
2207 
2208 	/* Let a pending work requeue itself or queue a new one. */
2209 	if (power_domains->async_put_wakeref) {
2210 		power_domains->async_put_domains[1] |= BIT_ULL(domain);
2211 	} else {
2212 		power_domains->async_put_domains[0] |= BIT_ULL(domain);
2213 		queue_async_put_domains_work(power_domains,
2214 					     fetch_and_zero(&work_wakeref));
2215 	}
2216 
2217 out_verify:
2218 	verify_async_put_domains_state(power_domains);
2219 
2220 	mutex_unlock(&power_domains->lock);
2221 
2222 	if (work_wakeref)
2223 		intel_runtime_pm_put_raw(rpm, work_wakeref);
2224 
2225 	intel_runtime_pm_put(rpm, wakeref);
2226 }
2227 
2228 /**
2229  * intel_display_power_flush_work - flushes the async display power disabling work
2230  * @i915: i915 device instance
2231  *
2232  * Flushes any pending work that was scheduled by a preceding
2233  * intel_display_power_put_async() call, completing the disabling of the
2234  * corresponding power domains.
2235  *
2236  * Note that the work handler function may still be running after this
2237  * function returns; to ensure that the work handler isn't running use
2238  * intel_display_power_flush_work_sync() instead.
2239  */
2240 void intel_display_power_flush_work(struct drm_i915_private *i915)
2241 {
2242 	struct i915_power_domains *power_domains = &i915->power_domains;
2243 	intel_wakeref_t work_wakeref;
2244 
2245 	mutex_lock(&power_domains->lock);
2246 
2247 	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2248 	if (!work_wakeref)
2249 		goto out_verify;
2250 
2251 	release_async_put_domains(power_domains,
2252 				  async_put_domains_mask(power_domains));
2253 	cancel_delayed_work(&power_domains->async_put_work);
2254 
2255 out_verify:
2256 	verify_async_put_domains_state(power_domains);
2257 
2258 	mutex_unlock(&power_domains->lock);
2259 
2260 	if (work_wakeref)
2261 		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2262 }
2263 
2264 /**
2265  * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2266  * @i915: i915 device instance
2267  *
2268  * Like intel_display_power_flush_work(), but also ensure that the work
2269  * handler function is not running any more when this function returns.
2270  */
2271 static void
2272 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2273 {
2274 	struct i915_power_domains *power_domains = &i915->power_domains;
2275 
2276 	intel_display_power_flush_work(i915);
2277 	cancel_delayed_work_sync(&power_domains->async_put_work);
2278 
2279 	verify_async_put_domains_state(power_domains);
2280 
2281 	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2282 }
2283 
2284 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2285 /**
2286  * intel_display_power_put - release a power domain reference
2287  * @dev_priv: i915 device instance
2288  * @domain: power domain to reference
2289  * @wakeref: wakeref acquired for the reference that is being released
2290  *
2291  * This function drops the power domain reference obtained by
2292  * intel_display_power_get() and might power down the corresponding hardware
2293  * block right away if this is the last reference.
2294  */
2295 void intel_display_power_put(struct drm_i915_private *dev_priv,
2296 			     enum intel_display_power_domain domain,
2297 			     intel_wakeref_t wakeref)
2298 {
2299 	__intel_display_power_put(dev_priv, domain);
2300 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2301 }
2302 #endif
2303 
2304 #define I830_PIPES_POWER_DOMAINS (		\
2305 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2306 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2307 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2308 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2309 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2310 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2311 	BIT_ULL(POWER_DOMAIN_INIT))
2312 
2313 #define VLV_DISPLAY_POWER_DOMAINS (		\
2314 	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2315 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2316 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2317 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2318 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2319 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2320 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2321 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2322 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2323 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2324 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2325 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2326 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2327 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2328 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2329 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2330 	BIT_ULL(POWER_DOMAIN_INIT))
2331 
2332 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
2333 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2334 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2335 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2336 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2337 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2338 	BIT_ULL(POWER_DOMAIN_INIT))
2339 
2340 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
2341 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2342 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2343 	BIT_ULL(POWER_DOMAIN_INIT))
2344 
2345 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
2346 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2347 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2348 	BIT_ULL(POWER_DOMAIN_INIT))
2349 
2350 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
2351 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2352 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2353 	BIT_ULL(POWER_DOMAIN_INIT))
2354 
2355 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
2356 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2357 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2358 	BIT_ULL(POWER_DOMAIN_INIT))
2359 
2360 #define CHV_DISPLAY_POWER_DOMAINS (		\
2361 	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2362 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2363 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2364 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
2365 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2366 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2367 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2368 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2369 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2370 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
2371 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2372 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2373 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2374 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2375 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2376 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2377 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2378 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2379 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2380 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2381 	BIT_ULL(POWER_DOMAIN_INIT))
2382 
2383 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
2384 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2385 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2386 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2387 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2388 	BIT_ULL(POWER_DOMAIN_INIT))
2389 
2390 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
2391 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2392 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2393 	BIT_ULL(POWER_DOMAIN_INIT))
2394 
2395 #define HSW_DISPLAY_POWER_DOMAINS (			\
2396 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2397 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2398 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
2399 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2400 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2401 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2402 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2403 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2404 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2405 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2406 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2407 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2408 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2409 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2410 	BIT_ULL(POWER_DOMAIN_INIT))
2411 
2412 #define BDW_DISPLAY_POWER_DOMAINS (			\
2413 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2414 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2415 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2416 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2417 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2418 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2419 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2420 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2421 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2422 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2423 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2424 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2425 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2426 	BIT_ULL(POWER_DOMAIN_INIT))
2427 
2428 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2429 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2430 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2431 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2432 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2433 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2434 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2435 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2436 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2437 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2438 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2439 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
2440 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2441 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2442 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2443 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2444 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2445 	BIT_ULL(POWER_DOMAIN_INIT))
2446 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
2447 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2448 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2449 	BIT_ULL(POWER_DOMAIN_INIT))
2450 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2451 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2452 	BIT_ULL(POWER_DOMAIN_INIT))
2453 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2454 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2455 	BIT_ULL(POWER_DOMAIN_INIT))
2456 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
2457 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2458 	BIT_ULL(POWER_DOMAIN_INIT))
2459 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2460 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2461 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2462 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2463 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2464 	BIT_ULL(POWER_DOMAIN_INIT))
2465 
2466 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2467 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2468 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2469 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2470 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2471 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2472 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2473 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2474 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2475 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2476 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2477 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2478 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2479 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2480 	BIT_ULL(POWER_DOMAIN_INIT))
2481 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2482 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2483 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2484 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2485 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2486 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2487 	BIT_ULL(POWER_DOMAIN_INIT))
2488 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
2489 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2490 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2491 	BIT_ULL(POWER_DOMAIN_INIT))
2492 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
2493 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2494 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2495 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2496 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2497 	BIT_ULL(POWER_DOMAIN_INIT))
2498 
2499 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2500 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2501 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2502 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2503 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2504 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2505 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2506 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2507 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2508 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2509 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2510 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2511 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2512 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2513 	BIT_ULL(POWER_DOMAIN_INIT))
2514 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
2515 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2516 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2517 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2518 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2519 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2520 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
2521 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2522 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2523 	BIT_ULL(POWER_DOMAIN_INIT))
2524 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
2525 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2526 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2527 	BIT_ULL(POWER_DOMAIN_INIT))
2528 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
2529 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2530 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2531 	BIT_ULL(POWER_DOMAIN_INIT))
2532 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
2533 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
2534 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2535 	BIT_ULL(POWER_DOMAIN_INIT))
2536 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
2537 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2538 	BIT_ULL(POWER_DOMAIN_INIT))
2539 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
2540 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2541 	BIT_ULL(POWER_DOMAIN_INIT))
2542 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2543 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2544 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2545 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2546 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2547 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2548 	BIT_ULL(POWER_DOMAIN_INIT))
2549 
2550 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2551 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2552 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2553 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2554 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2555 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2556 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2557 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2558 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2559 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2560 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2561 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
2562 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2563 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2564 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2565 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2566 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2567 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2568 	BIT_ULL(POWER_DOMAIN_INIT))
2569 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
2570 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2571 	BIT_ULL(POWER_DOMAIN_INIT))
2572 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
2573 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2574 	BIT_ULL(POWER_DOMAIN_INIT))
2575 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
2576 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2577 	BIT_ULL(POWER_DOMAIN_INIT))
2578 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
2579 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2580 	BIT_ULL(POWER_DOMAIN_INIT))
2581 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
2582 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2583 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2584 	BIT_ULL(POWER_DOMAIN_INIT))
2585 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
2586 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2587 	BIT_ULL(POWER_DOMAIN_INIT))
2588 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
2589 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2590 	BIT_ULL(POWER_DOMAIN_INIT))
2591 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
2592 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2593 	BIT_ULL(POWER_DOMAIN_INIT))
2594 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
2595 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2596 	BIT_ULL(POWER_DOMAIN_INIT))
2597 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
2598 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2599 	BIT_ULL(POWER_DOMAIN_INIT))
2600 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2601 	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2602 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2603 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2604 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2605 	BIT_ULL(POWER_DOMAIN_INIT))
2606 
2607 /*
2608  * ICL PW_0/PG_0 domains (HW/DMC control):
2609  * - PCI
2610  * - clocks except port PLL
2611  * - central power except FBC
2612  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2613  * ICL PW_1/PG_1 domains (HW/DMC control):
2614  * - DBUF function
2615  * - PIPE_A and its planes, except VGA
2616  * - transcoder EDP + PSR
2617  * - transcoder DSI
2618  * - DDI_A
2619  * - FBC
2620  */
2621 #define ICL_PW_4_POWER_DOMAINS (			\
2622 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2623 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2624 	BIT_ULL(POWER_DOMAIN_INIT))
2625 	/* VDSC/joining */
2626 #define ICL_PW_3_POWER_DOMAINS (			\
2627 	ICL_PW_4_POWER_DOMAINS |			\
2628 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2629 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2630 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2631 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2632 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2633 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2634 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2635 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2636 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2637 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2638 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2639 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2640 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2641 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2642 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2643 	BIT_ULL(POWER_DOMAIN_AUX_C_TBT) |		\
2644 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |		\
2645 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |		\
2646 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |		\
2647 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2648 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2649 	BIT_ULL(POWER_DOMAIN_INIT))
2650 	/*
2651 	 * - transcoder WD
2652 	 * - KVMR (HW control)
2653 	 */
2654 #define ICL_PW_2_POWER_DOMAINS (			\
2655 	ICL_PW_3_POWER_DOMAINS |			\
2656 	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |		\
2657 	BIT_ULL(POWER_DOMAIN_INIT))
2658 	/*
2659 	 * - KVMR (HW control)
2660 	 */
2661 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2662 	ICL_PW_2_POWER_DOMAINS |			\
2663 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2664 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2665 	BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) |			\
2666 	BIT_ULL(POWER_DOMAIN_INIT))
2667 
2668 #define ICL_DDI_IO_A_POWER_DOMAINS (			\
2669 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2670 #define ICL_DDI_IO_B_POWER_DOMAINS (			\
2671 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2672 #define ICL_DDI_IO_C_POWER_DOMAINS (			\
2673 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2674 #define ICL_DDI_IO_D_POWER_DOMAINS (			\
2675 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2676 #define ICL_DDI_IO_E_POWER_DOMAINS (			\
2677 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2678 #define ICL_DDI_IO_F_POWER_DOMAINS (			\
2679 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2680 
2681 #define ICL_AUX_A_IO_POWER_DOMAINS (			\
2682 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2683 	BIT_ULL(POWER_DOMAIN_AUX_A))
2684 #define ICL_AUX_B_IO_POWER_DOMAINS (			\
2685 	BIT_ULL(POWER_DOMAIN_AUX_B))
2686 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS (		\
2687 	BIT_ULL(POWER_DOMAIN_AUX_C))
2688 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS (		\
2689 	BIT_ULL(POWER_DOMAIN_AUX_D))
2690 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS (		\
2691 	BIT_ULL(POWER_DOMAIN_AUX_E))
2692 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS (		\
2693 	BIT_ULL(POWER_DOMAIN_AUX_F))
2694 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS (		\
2695 	BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2696 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS (		\
2697 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2698 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS (		\
2699 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2700 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS (		\
2701 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2702 
2703 #define TGL_PW_5_POWER_DOMAINS (			\
2704 	BIT_ULL(POWER_DOMAIN_PIPE_D) |			\
2705 	BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |		\
2706 	BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |     \
2707 	BIT_ULL(POWER_DOMAIN_INIT))
2708 
2709 #define TGL_PW_4_POWER_DOMAINS (			\
2710 	TGL_PW_5_POWER_DOMAINS |			\
2711 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2712 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2713 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2714 	BIT_ULL(POWER_DOMAIN_INIT))
2715 
2716 #define TGL_PW_3_POWER_DOMAINS (			\
2717 	TGL_PW_4_POWER_DOMAINS |			\
2718 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2719 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2720 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2721 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2722 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2723 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2724 	BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) |	\
2725 	BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) |	\
2726 	BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) |	\
2727 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2728 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2729 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2730 	BIT_ULL(POWER_DOMAIN_AUX_G) |			\
2731 	BIT_ULL(POWER_DOMAIN_AUX_H) |			\
2732 	BIT_ULL(POWER_DOMAIN_AUX_I) |			\
2733 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |		\
2734 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |		\
2735 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |		\
2736 	BIT_ULL(POWER_DOMAIN_AUX_G_TBT) |		\
2737 	BIT_ULL(POWER_DOMAIN_AUX_H_TBT) |		\
2738 	BIT_ULL(POWER_DOMAIN_AUX_I_TBT) |		\
2739 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2740 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2741 	BIT_ULL(POWER_DOMAIN_INIT))
2742 
2743 #define TGL_PW_2_POWER_DOMAINS (			\
2744 	TGL_PW_3_POWER_DOMAINS |			\
2745 	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |	\
2746 	BIT_ULL(POWER_DOMAIN_INIT))
2747 
2748 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2749 	TGL_PW_3_POWER_DOMAINS |			\
2750 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2751 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2752 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2753 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2754 	BIT_ULL(POWER_DOMAIN_INIT))
2755 
2756 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS (	\
2757 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2758 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS (	\
2759 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2760 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS (	\
2761 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2762 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS (	\
2763 	BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
2764 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS (	\
2765 	BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
2766 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS (	\
2767 	BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
2768 
2769 #define TGL_AUX_A_IO_POWER_DOMAINS (		\
2770 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |	\
2771 	BIT_ULL(POWER_DOMAIN_AUX_A))
2772 #define TGL_AUX_B_IO_POWER_DOMAINS (		\
2773 	BIT_ULL(POWER_DOMAIN_AUX_B))
2774 #define TGL_AUX_C_IO_POWER_DOMAINS (		\
2775 	BIT_ULL(POWER_DOMAIN_AUX_C))
2776 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS (	\
2777 	BIT_ULL(POWER_DOMAIN_AUX_D))
2778 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS (	\
2779 	BIT_ULL(POWER_DOMAIN_AUX_E))
2780 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS (	\
2781 	BIT_ULL(POWER_DOMAIN_AUX_F))
2782 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS (	\
2783 	BIT_ULL(POWER_DOMAIN_AUX_G))
2784 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS (	\
2785 	BIT_ULL(POWER_DOMAIN_AUX_H))
2786 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS (	\
2787 	BIT_ULL(POWER_DOMAIN_AUX_I))
2788 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS (	\
2789 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2790 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS (	\
2791 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2792 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS (	\
2793 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2794 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS (	\
2795 	BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
2796 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS (	\
2797 	BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
2798 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS (	\
2799 	BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
2800 
2801 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2802 	.sync_hw = i9xx_power_well_sync_hw_noop,
2803 	.enable = i9xx_always_on_power_well_noop,
2804 	.disable = i9xx_always_on_power_well_noop,
2805 	.is_enabled = i9xx_always_on_power_well_enabled,
2806 };
2807 
2808 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2809 	.sync_hw = chv_pipe_power_well_sync_hw,
2810 	.enable = chv_pipe_power_well_enable,
2811 	.disable = chv_pipe_power_well_disable,
2812 	.is_enabled = chv_pipe_power_well_enabled,
2813 };
2814 
2815 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2816 	.sync_hw = i9xx_power_well_sync_hw_noop,
2817 	.enable = chv_dpio_cmn_power_well_enable,
2818 	.disable = chv_dpio_cmn_power_well_disable,
2819 	.is_enabled = vlv_power_well_enabled,
2820 };
2821 
2822 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2823 	{
2824 		.name = "always-on",
2825 		.always_on = true,
2826 		.domains = POWER_DOMAIN_MASK,
2827 		.ops = &i9xx_always_on_power_well_ops,
2828 		.id = DISP_PW_ID_NONE,
2829 	},
2830 };
2831 
2832 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2833 	.sync_hw = i830_pipes_power_well_sync_hw,
2834 	.enable = i830_pipes_power_well_enable,
2835 	.disable = i830_pipes_power_well_disable,
2836 	.is_enabled = i830_pipes_power_well_enabled,
2837 };
2838 
2839 static const struct i915_power_well_desc i830_power_wells[] = {
2840 	{
2841 		.name = "always-on",
2842 		.always_on = true,
2843 		.domains = POWER_DOMAIN_MASK,
2844 		.ops = &i9xx_always_on_power_well_ops,
2845 		.id = DISP_PW_ID_NONE,
2846 	},
2847 	{
2848 		.name = "pipes",
2849 		.domains = I830_PIPES_POWER_DOMAINS,
2850 		.ops = &i830_pipes_power_well_ops,
2851 		.id = DISP_PW_ID_NONE,
2852 	},
2853 };
2854 
2855 static const struct i915_power_well_ops hsw_power_well_ops = {
2856 	.sync_hw = hsw_power_well_sync_hw,
2857 	.enable = hsw_power_well_enable,
2858 	.disable = hsw_power_well_disable,
2859 	.is_enabled = hsw_power_well_enabled,
2860 };
2861 
2862 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2863 	.sync_hw = i9xx_power_well_sync_hw_noop,
2864 	.enable = gen9_dc_off_power_well_enable,
2865 	.disable = gen9_dc_off_power_well_disable,
2866 	.is_enabled = gen9_dc_off_power_well_enabled,
2867 };
2868 
2869 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2870 	.sync_hw = i9xx_power_well_sync_hw_noop,
2871 	.enable = bxt_dpio_cmn_power_well_enable,
2872 	.disable = bxt_dpio_cmn_power_well_disable,
2873 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
2874 };
2875 
2876 static const struct i915_power_well_regs hsw_power_well_regs = {
2877 	.bios	= HSW_PWR_WELL_CTL1,
2878 	.driver	= HSW_PWR_WELL_CTL2,
2879 	.kvmr	= HSW_PWR_WELL_CTL3,
2880 	.debug	= HSW_PWR_WELL_CTL4,
2881 };
2882 
2883 static const struct i915_power_well_desc hsw_power_wells[] = {
2884 	{
2885 		.name = "always-on",
2886 		.always_on = true,
2887 		.domains = POWER_DOMAIN_MASK,
2888 		.ops = &i9xx_always_on_power_well_ops,
2889 		.id = DISP_PW_ID_NONE,
2890 	},
2891 	{
2892 		.name = "display",
2893 		.domains = HSW_DISPLAY_POWER_DOMAINS,
2894 		.ops = &hsw_power_well_ops,
2895 		.id = HSW_DISP_PW_GLOBAL,
2896 		{
2897 			.hsw.regs = &hsw_power_well_regs,
2898 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2899 			.hsw.has_vga = true,
2900 		},
2901 	},
2902 };
2903 
2904 static const struct i915_power_well_desc bdw_power_wells[] = {
2905 	{
2906 		.name = "always-on",
2907 		.always_on = true,
2908 		.domains = POWER_DOMAIN_MASK,
2909 		.ops = &i9xx_always_on_power_well_ops,
2910 		.id = DISP_PW_ID_NONE,
2911 	},
2912 	{
2913 		.name = "display",
2914 		.domains = BDW_DISPLAY_POWER_DOMAINS,
2915 		.ops = &hsw_power_well_ops,
2916 		.id = HSW_DISP_PW_GLOBAL,
2917 		{
2918 			.hsw.regs = &hsw_power_well_regs,
2919 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2920 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2921 			.hsw.has_vga = true,
2922 		},
2923 	},
2924 };
2925 
2926 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2927 	.sync_hw = i9xx_power_well_sync_hw_noop,
2928 	.enable = vlv_display_power_well_enable,
2929 	.disable = vlv_display_power_well_disable,
2930 	.is_enabled = vlv_power_well_enabled,
2931 };
2932 
2933 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2934 	.sync_hw = i9xx_power_well_sync_hw_noop,
2935 	.enable = vlv_dpio_cmn_power_well_enable,
2936 	.disable = vlv_dpio_cmn_power_well_disable,
2937 	.is_enabled = vlv_power_well_enabled,
2938 };
2939 
2940 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2941 	.sync_hw = i9xx_power_well_sync_hw_noop,
2942 	.enable = vlv_power_well_enable,
2943 	.disable = vlv_power_well_disable,
2944 	.is_enabled = vlv_power_well_enabled,
2945 };
2946 
2947 static const struct i915_power_well_desc vlv_power_wells[] = {
2948 	{
2949 		.name = "always-on",
2950 		.always_on = true,
2951 		.domains = POWER_DOMAIN_MASK,
2952 		.ops = &i9xx_always_on_power_well_ops,
2953 		.id = DISP_PW_ID_NONE,
2954 	},
2955 	{
2956 		.name = "display",
2957 		.domains = VLV_DISPLAY_POWER_DOMAINS,
2958 		.ops = &vlv_display_power_well_ops,
2959 		.id = VLV_DISP_PW_DISP2D,
2960 		{
2961 			.vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2962 		},
2963 	},
2964 	{
2965 		.name = "dpio-tx-b-01",
2966 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2967 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2968 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2969 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2970 		.ops = &vlv_dpio_power_well_ops,
2971 		.id = DISP_PW_ID_NONE,
2972 		{
2973 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2974 		},
2975 	},
2976 	{
2977 		.name = "dpio-tx-b-23",
2978 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2979 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2980 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2981 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2982 		.ops = &vlv_dpio_power_well_ops,
2983 		.id = DISP_PW_ID_NONE,
2984 		{
2985 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2986 		},
2987 	},
2988 	{
2989 		.name = "dpio-tx-c-01",
2990 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2991 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2992 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2993 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2994 		.ops = &vlv_dpio_power_well_ops,
2995 		.id = DISP_PW_ID_NONE,
2996 		{
2997 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2998 		},
2999 	},
3000 	{
3001 		.name = "dpio-tx-c-23",
3002 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3003 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3004 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3005 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3006 		.ops = &vlv_dpio_power_well_ops,
3007 		.id = DISP_PW_ID_NONE,
3008 		{
3009 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
3010 		},
3011 	},
3012 	{
3013 		.name = "dpio-common",
3014 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
3015 		.ops = &vlv_dpio_cmn_power_well_ops,
3016 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3017 		{
3018 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3019 		},
3020 	},
3021 };
3022 
3023 static const struct i915_power_well_desc chv_power_wells[] = {
3024 	{
3025 		.name = "always-on",
3026 		.always_on = true,
3027 		.domains = POWER_DOMAIN_MASK,
3028 		.ops = &i9xx_always_on_power_well_ops,
3029 		.id = DISP_PW_ID_NONE,
3030 	},
3031 	{
3032 		.name = "display",
3033 		/*
3034 		 * Pipe A power well is the new disp2d well. Pipe B and C
3035 		 * power wells don't actually exist. Pipe A power well is
3036 		 * required for any pipe to work.
3037 		 */
3038 		.domains = CHV_DISPLAY_POWER_DOMAINS,
3039 		.ops = &chv_pipe_power_well_ops,
3040 		.id = DISP_PW_ID_NONE,
3041 	},
3042 	{
3043 		.name = "dpio-common-bc",
3044 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3045 		.ops = &chv_dpio_cmn_power_well_ops,
3046 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3047 		{
3048 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3049 		},
3050 	},
3051 	{
3052 		.name = "dpio-common-d",
3053 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3054 		.ops = &chv_dpio_cmn_power_well_ops,
3055 		.id = CHV_DISP_PW_DPIO_CMN_D,
3056 		{
3057 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3058 		},
3059 	},
3060 };
3061 
3062 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3063 					 enum i915_power_well_id power_well_id)
3064 {
3065 	struct i915_power_well *power_well;
3066 	bool ret;
3067 
3068 	power_well = lookup_power_well(dev_priv, power_well_id);
3069 	ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3070 
3071 	return ret;
3072 }
3073 
3074 static const struct i915_power_well_desc skl_power_wells[] = {
3075 	{
3076 		.name = "always-on",
3077 		.always_on = true,
3078 		.domains = POWER_DOMAIN_MASK,
3079 		.ops = &i9xx_always_on_power_well_ops,
3080 		.id = DISP_PW_ID_NONE,
3081 	},
3082 	{
3083 		.name = "power well 1",
3084 		/* Handled by the DMC firmware */
3085 		.always_on = true,
3086 		.domains = 0,
3087 		.ops = &hsw_power_well_ops,
3088 		.id = SKL_DISP_PW_1,
3089 		{
3090 			.hsw.regs = &hsw_power_well_regs,
3091 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3092 			.hsw.has_fuses = true,
3093 		},
3094 	},
3095 	{
3096 		.name = "MISC IO power well",
3097 		/* Handled by the DMC firmware */
3098 		.always_on = true,
3099 		.domains = 0,
3100 		.ops = &hsw_power_well_ops,
3101 		.id = SKL_DISP_PW_MISC_IO,
3102 		{
3103 			.hsw.regs = &hsw_power_well_regs,
3104 			.hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3105 		},
3106 	},
3107 	{
3108 		.name = "DC off",
3109 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3110 		.ops = &gen9_dc_off_power_well_ops,
3111 		.id = SKL_DISP_DC_OFF,
3112 	},
3113 	{
3114 		.name = "power well 2",
3115 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3116 		.ops = &hsw_power_well_ops,
3117 		.id = SKL_DISP_PW_2,
3118 		{
3119 			.hsw.regs = &hsw_power_well_regs,
3120 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3121 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3122 			.hsw.has_vga = true,
3123 			.hsw.has_fuses = true,
3124 		},
3125 	},
3126 	{
3127 		.name = "DDI A/E IO power well",
3128 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3129 		.ops = &hsw_power_well_ops,
3130 		.id = DISP_PW_ID_NONE,
3131 		{
3132 			.hsw.regs = &hsw_power_well_regs,
3133 			.hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3134 		},
3135 	},
3136 	{
3137 		.name = "DDI B IO power well",
3138 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3139 		.ops = &hsw_power_well_ops,
3140 		.id = DISP_PW_ID_NONE,
3141 		{
3142 			.hsw.regs = &hsw_power_well_regs,
3143 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3144 		},
3145 	},
3146 	{
3147 		.name = "DDI C IO power well",
3148 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3149 		.ops = &hsw_power_well_ops,
3150 		.id = DISP_PW_ID_NONE,
3151 		{
3152 			.hsw.regs = &hsw_power_well_regs,
3153 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3154 		},
3155 	},
3156 	{
3157 		.name = "DDI D IO power well",
3158 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3159 		.ops = &hsw_power_well_ops,
3160 		.id = DISP_PW_ID_NONE,
3161 		{
3162 			.hsw.regs = &hsw_power_well_regs,
3163 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3164 		},
3165 	},
3166 };
3167 
3168 static const struct i915_power_well_desc bxt_power_wells[] = {
3169 	{
3170 		.name = "always-on",
3171 		.always_on = true,
3172 		.domains = POWER_DOMAIN_MASK,
3173 		.ops = &i9xx_always_on_power_well_ops,
3174 		.id = DISP_PW_ID_NONE,
3175 	},
3176 	{
3177 		.name = "power well 1",
3178 		/* Handled by the DMC firmware */
3179 		.always_on = true,
3180 		.domains = 0,
3181 		.ops = &hsw_power_well_ops,
3182 		.id = SKL_DISP_PW_1,
3183 		{
3184 			.hsw.regs = &hsw_power_well_regs,
3185 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3186 			.hsw.has_fuses = true,
3187 		},
3188 	},
3189 	{
3190 		.name = "DC off",
3191 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3192 		.ops = &gen9_dc_off_power_well_ops,
3193 		.id = SKL_DISP_DC_OFF,
3194 	},
3195 	{
3196 		.name = "power well 2",
3197 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3198 		.ops = &hsw_power_well_ops,
3199 		.id = SKL_DISP_PW_2,
3200 		{
3201 			.hsw.regs = &hsw_power_well_regs,
3202 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3203 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3204 			.hsw.has_vga = true,
3205 			.hsw.has_fuses = true,
3206 		},
3207 	},
3208 	{
3209 		.name = "dpio-common-a",
3210 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3211 		.ops = &bxt_dpio_cmn_power_well_ops,
3212 		.id = BXT_DISP_PW_DPIO_CMN_A,
3213 		{
3214 			.bxt.phy = DPIO_PHY1,
3215 		},
3216 	},
3217 	{
3218 		.name = "dpio-common-bc",
3219 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3220 		.ops = &bxt_dpio_cmn_power_well_ops,
3221 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3222 		{
3223 			.bxt.phy = DPIO_PHY0,
3224 		},
3225 	},
3226 };
3227 
3228 static const struct i915_power_well_desc glk_power_wells[] = {
3229 	{
3230 		.name = "always-on",
3231 		.always_on = true,
3232 		.domains = POWER_DOMAIN_MASK,
3233 		.ops = &i9xx_always_on_power_well_ops,
3234 		.id = DISP_PW_ID_NONE,
3235 	},
3236 	{
3237 		.name = "power well 1",
3238 		/* Handled by the DMC firmware */
3239 		.always_on = true,
3240 		.domains = 0,
3241 		.ops = &hsw_power_well_ops,
3242 		.id = SKL_DISP_PW_1,
3243 		{
3244 			.hsw.regs = &hsw_power_well_regs,
3245 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3246 			.hsw.has_fuses = true,
3247 		},
3248 	},
3249 	{
3250 		.name = "DC off",
3251 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3252 		.ops = &gen9_dc_off_power_well_ops,
3253 		.id = SKL_DISP_DC_OFF,
3254 	},
3255 	{
3256 		.name = "power well 2",
3257 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3258 		.ops = &hsw_power_well_ops,
3259 		.id = SKL_DISP_PW_2,
3260 		{
3261 			.hsw.regs = &hsw_power_well_regs,
3262 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3263 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3264 			.hsw.has_vga = true,
3265 			.hsw.has_fuses = true,
3266 		},
3267 	},
3268 	{
3269 		.name = "dpio-common-a",
3270 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3271 		.ops = &bxt_dpio_cmn_power_well_ops,
3272 		.id = BXT_DISP_PW_DPIO_CMN_A,
3273 		{
3274 			.bxt.phy = DPIO_PHY1,
3275 		},
3276 	},
3277 	{
3278 		.name = "dpio-common-b",
3279 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3280 		.ops = &bxt_dpio_cmn_power_well_ops,
3281 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3282 		{
3283 			.bxt.phy = DPIO_PHY0,
3284 		},
3285 	},
3286 	{
3287 		.name = "dpio-common-c",
3288 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3289 		.ops = &bxt_dpio_cmn_power_well_ops,
3290 		.id = GLK_DISP_PW_DPIO_CMN_C,
3291 		{
3292 			.bxt.phy = DPIO_PHY2,
3293 		},
3294 	},
3295 	{
3296 		.name = "AUX A",
3297 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3298 		.ops = &hsw_power_well_ops,
3299 		.id = DISP_PW_ID_NONE,
3300 		{
3301 			.hsw.regs = &hsw_power_well_regs,
3302 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3303 		},
3304 	},
3305 	{
3306 		.name = "AUX B",
3307 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3308 		.ops = &hsw_power_well_ops,
3309 		.id = DISP_PW_ID_NONE,
3310 		{
3311 			.hsw.regs = &hsw_power_well_regs,
3312 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3313 		},
3314 	},
3315 	{
3316 		.name = "AUX C",
3317 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3318 		.ops = &hsw_power_well_ops,
3319 		.id = DISP_PW_ID_NONE,
3320 		{
3321 			.hsw.regs = &hsw_power_well_regs,
3322 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3323 		},
3324 	},
3325 	{
3326 		.name = "DDI A IO power well",
3327 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3328 		.ops = &hsw_power_well_ops,
3329 		.id = DISP_PW_ID_NONE,
3330 		{
3331 			.hsw.regs = &hsw_power_well_regs,
3332 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3333 		},
3334 	},
3335 	{
3336 		.name = "DDI B IO power well",
3337 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3338 		.ops = &hsw_power_well_ops,
3339 		.id = DISP_PW_ID_NONE,
3340 		{
3341 			.hsw.regs = &hsw_power_well_regs,
3342 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3343 		},
3344 	},
3345 	{
3346 		.name = "DDI C IO power well",
3347 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3348 		.ops = &hsw_power_well_ops,
3349 		.id = DISP_PW_ID_NONE,
3350 		{
3351 			.hsw.regs = &hsw_power_well_regs,
3352 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3353 		},
3354 	},
3355 };
3356 
3357 static const struct i915_power_well_desc cnl_power_wells[] = {
3358 	{
3359 		.name = "always-on",
3360 		.always_on = true,
3361 		.domains = POWER_DOMAIN_MASK,
3362 		.ops = &i9xx_always_on_power_well_ops,
3363 		.id = DISP_PW_ID_NONE,
3364 	},
3365 	{
3366 		.name = "power well 1",
3367 		/* Handled by the DMC firmware */
3368 		.always_on = true,
3369 		.domains = 0,
3370 		.ops = &hsw_power_well_ops,
3371 		.id = SKL_DISP_PW_1,
3372 		{
3373 			.hsw.regs = &hsw_power_well_regs,
3374 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3375 			.hsw.has_fuses = true,
3376 		},
3377 	},
3378 	{
3379 		.name = "AUX A",
3380 		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3381 		.ops = &hsw_power_well_ops,
3382 		.id = DISP_PW_ID_NONE,
3383 		{
3384 			.hsw.regs = &hsw_power_well_regs,
3385 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3386 		},
3387 	},
3388 	{
3389 		.name = "AUX B",
3390 		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3391 		.ops = &hsw_power_well_ops,
3392 		.id = DISP_PW_ID_NONE,
3393 		{
3394 			.hsw.regs = &hsw_power_well_regs,
3395 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3396 		},
3397 	},
3398 	{
3399 		.name = "AUX C",
3400 		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3401 		.ops = &hsw_power_well_ops,
3402 		.id = DISP_PW_ID_NONE,
3403 		{
3404 			.hsw.regs = &hsw_power_well_regs,
3405 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3406 		},
3407 	},
3408 	{
3409 		.name = "AUX D",
3410 		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3411 		.ops = &hsw_power_well_ops,
3412 		.id = DISP_PW_ID_NONE,
3413 		{
3414 			.hsw.regs = &hsw_power_well_regs,
3415 			.hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3416 		},
3417 	},
3418 	{
3419 		.name = "DC off",
3420 		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3421 		.ops = &gen9_dc_off_power_well_ops,
3422 		.id = SKL_DISP_DC_OFF,
3423 	},
3424 	{
3425 		.name = "power well 2",
3426 		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3427 		.ops = &hsw_power_well_ops,
3428 		.id = SKL_DISP_PW_2,
3429 		{
3430 			.hsw.regs = &hsw_power_well_regs,
3431 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3432 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3433 			.hsw.has_vga = true,
3434 			.hsw.has_fuses = true,
3435 		},
3436 	},
3437 	{
3438 		.name = "DDI A IO power well",
3439 		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3440 		.ops = &hsw_power_well_ops,
3441 		.id = DISP_PW_ID_NONE,
3442 		{
3443 			.hsw.regs = &hsw_power_well_regs,
3444 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3445 		},
3446 	},
3447 	{
3448 		.name = "DDI B IO power well",
3449 		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3450 		.ops = &hsw_power_well_ops,
3451 		.id = DISP_PW_ID_NONE,
3452 		{
3453 			.hsw.regs = &hsw_power_well_regs,
3454 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3455 		},
3456 	},
3457 	{
3458 		.name = "DDI C IO power well",
3459 		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3460 		.ops = &hsw_power_well_ops,
3461 		.id = DISP_PW_ID_NONE,
3462 		{
3463 			.hsw.regs = &hsw_power_well_regs,
3464 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3465 		},
3466 	},
3467 	{
3468 		.name = "DDI D IO power well",
3469 		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3470 		.ops = &hsw_power_well_ops,
3471 		.id = DISP_PW_ID_NONE,
3472 		{
3473 			.hsw.regs = &hsw_power_well_regs,
3474 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3475 		},
3476 	},
3477 	{
3478 		.name = "DDI F IO power well",
3479 		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3480 		.ops = &hsw_power_well_ops,
3481 		.id = DISP_PW_ID_NONE,
3482 		{
3483 			.hsw.regs = &hsw_power_well_regs,
3484 			.hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3485 		},
3486 	},
3487 	{
3488 		.name = "AUX F",
3489 		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3490 		.ops = &hsw_power_well_ops,
3491 		.id = DISP_PW_ID_NONE,
3492 		{
3493 			.hsw.regs = &hsw_power_well_regs,
3494 			.hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3495 		},
3496 	},
3497 };
3498 
3499 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3500 	.sync_hw = hsw_power_well_sync_hw,
3501 	.enable = icl_combo_phy_aux_power_well_enable,
3502 	.disable = icl_combo_phy_aux_power_well_disable,
3503 	.is_enabled = hsw_power_well_enabled,
3504 };
3505 
3506 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3507 	.sync_hw = hsw_power_well_sync_hw,
3508 	.enable = icl_tc_phy_aux_power_well_enable,
3509 	.disable = icl_tc_phy_aux_power_well_disable,
3510 	.is_enabled = hsw_power_well_enabled,
3511 };
3512 
3513 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3514 	.bios	= ICL_PWR_WELL_CTL_AUX1,
3515 	.driver	= ICL_PWR_WELL_CTL_AUX2,
3516 	.debug	= ICL_PWR_WELL_CTL_AUX4,
3517 };
3518 
3519 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3520 	.bios	= ICL_PWR_WELL_CTL_DDI1,
3521 	.driver	= ICL_PWR_WELL_CTL_DDI2,
3522 	.debug	= ICL_PWR_WELL_CTL_DDI4,
3523 };
3524 
3525 static const struct i915_power_well_desc icl_power_wells[] = {
3526 	{
3527 		.name = "always-on",
3528 		.always_on = true,
3529 		.domains = POWER_DOMAIN_MASK,
3530 		.ops = &i9xx_always_on_power_well_ops,
3531 		.id = DISP_PW_ID_NONE,
3532 	},
3533 	{
3534 		.name = "power well 1",
3535 		/* Handled by the DMC firmware */
3536 		.always_on = true,
3537 		.domains = 0,
3538 		.ops = &hsw_power_well_ops,
3539 		.id = SKL_DISP_PW_1,
3540 		{
3541 			.hsw.regs = &hsw_power_well_regs,
3542 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3543 			.hsw.has_fuses = true,
3544 		},
3545 	},
3546 	{
3547 		.name = "DC off",
3548 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3549 		.ops = &gen9_dc_off_power_well_ops,
3550 		.id = SKL_DISP_DC_OFF,
3551 	},
3552 	{
3553 		.name = "power well 2",
3554 		.domains = ICL_PW_2_POWER_DOMAINS,
3555 		.ops = &hsw_power_well_ops,
3556 		.id = SKL_DISP_PW_2,
3557 		{
3558 			.hsw.regs = &hsw_power_well_regs,
3559 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3560 			.hsw.has_fuses = true,
3561 		},
3562 	},
3563 	{
3564 		.name = "power well 3",
3565 		.domains = ICL_PW_3_POWER_DOMAINS,
3566 		.ops = &hsw_power_well_ops,
3567 		.id = DISP_PW_ID_NONE,
3568 		{
3569 			.hsw.regs = &hsw_power_well_regs,
3570 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3571 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3572 			.hsw.has_vga = true,
3573 			.hsw.has_fuses = true,
3574 		},
3575 	},
3576 	{
3577 		.name = "DDI A IO",
3578 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3579 		.ops = &hsw_power_well_ops,
3580 		.id = DISP_PW_ID_NONE,
3581 		{
3582 			.hsw.regs = &icl_ddi_power_well_regs,
3583 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3584 		},
3585 	},
3586 	{
3587 		.name = "DDI B IO",
3588 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3589 		.ops = &hsw_power_well_ops,
3590 		.id = DISP_PW_ID_NONE,
3591 		{
3592 			.hsw.regs = &icl_ddi_power_well_regs,
3593 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3594 		},
3595 	},
3596 	{
3597 		.name = "DDI C IO",
3598 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3599 		.ops = &hsw_power_well_ops,
3600 		.id = DISP_PW_ID_NONE,
3601 		{
3602 			.hsw.regs = &icl_ddi_power_well_regs,
3603 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3604 		},
3605 	},
3606 	{
3607 		.name = "DDI D IO",
3608 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3609 		.ops = &hsw_power_well_ops,
3610 		.id = DISP_PW_ID_NONE,
3611 		{
3612 			.hsw.regs = &icl_ddi_power_well_regs,
3613 			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3614 		},
3615 	},
3616 	{
3617 		.name = "DDI E IO",
3618 		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
3619 		.ops = &hsw_power_well_ops,
3620 		.id = DISP_PW_ID_NONE,
3621 		{
3622 			.hsw.regs = &icl_ddi_power_well_regs,
3623 			.hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3624 		},
3625 	},
3626 	{
3627 		.name = "DDI F IO",
3628 		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
3629 		.ops = &hsw_power_well_ops,
3630 		.id = DISP_PW_ID_NONE,
3631 		{
3632 			.hsw.regs = &icl_ddi_power_well_regs,
3633 			.hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3634 		},
3635 	},
3636 	{
3637 		.name = "AUX A",
3638 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3639 		.ops = &icl_combo_phy_aux_power_well_ops,
3640 		.id = DISP_PW_ID_NONE,
3641 		{
3642 			.hsw.regs = &icl_aux_power_well_regs,
3643 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3644 		},
3645 	},
3646 	{
3647 		.name = "AUX B",
3648 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3649 		.ops = &icl_combo_phy_aux_power_well_ops,
3650 		.id = DISP_PW_ID_NONE,
3651 		{
3652 			.hsw.regs = &icl_aux_power_well_regs,
3653 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3654 		},
3655 	},
3656 	{
3657 		.name = "AUX C TC1",
3658 		.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3659 		.ops = &icl_tc_phy_aux_power_well_ops,
3660 		.id = DISP_PW_ID_NONE,
3661 		{
3662 			.hsw.regs = &icl_aux_power_well_regs,
3663 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3664 			.hsw.is_tc_tbt = false,
3665 		},
3666 	},
3667 	{
3668 		.name = "AUX D TC2",
3669 		.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3670 		.ops = &icl_tc_phy_aux_power_well_ops,
3671 		.id = DISP_PW_ID_NONE,
3672 		{
3673 			.hsw.regs = &icl_aux_power_well_regs,
3674 			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3675 			.hsw.is_tc_tbt = false,
3676 		},
3677 	},
3678 	{
3679 		.name = "AUX E TC3",
3680 		.domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
3681 		.ops = &icl_tc_phy_aux_power_well_ops,
3682 		.id = DISP_PW_ID_NONE,
3683 		{
3684 			.hsw.regs = &icl_aux_power_well_regs,
3685 			.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3686 			.hsw.is_tc_tbt = false,
3687 		},
3688 	},
3689 	{
3690 		.name = "AUX F TC4",
3691 		.domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
3692 		.ops = &icl_tc_phy_aux_power_well_ops,
3693 		.id = DISP_PW_ID_NONE,
3694 		{
3695 			.hsw.regs = &icl_aux_power_well_regs,
3696 			.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3697 			.hsw.is_tc_tbt = false,
3698 		},
3699 	},
3700 	{
3701 		.name = "AUX C TBT1",
3702 		.domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
3703 		.ops = &icl_tc_phy_aux_power_well_ops,
3704 		.id = DISP_PW_ID_NONE,
3705 		{
3706 			.hsw.regs = &icl_aux_power_well_regs,
3707 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3708 			.hsw.is_tc_tbt = true,
3709 		},
3710 	},
3711 	{
3712 		.name = "AUX D TBT2",
3713 		.domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
3714 		.ops = &icl_tc_phy_aux_power_well_ops,
3715 		.id = DISP_PW_ID_NONE,
3716 		{
3717 			.hsw.regs = &icl_aux_power_well_regs,
3718 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3719 			.hsw.is_tc_tbt = true,
3720 		},
3721 	},
3722 	{
3723 		.name = "AUX E TBT3",
3724 		.domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
3725 		.ops = &icl_tc_phy_aux_power_well_ops,
3726 		.id = DISP_PW_ID_NONE,
3727 		{
3728 			.hsw.regs = &icl_aux_power_well_regs,
3729 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3730 			.hsw.is_tc_tbt = true,
3731 		},
3732 	},
3733 	{
3734 		.name = "AUX F TBT4",
3735 		.domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
3736 		.ops = &icl_tc_phy_aux_power_well_ops,
3737 		.id = DISP_PW_ID_NONE,
3738 		{
3739 			.hsw.regs = &icl_aux_power_well_regs,
3740 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3741 			.hsw.is_tc_tbt = true,
3742 		},
3743 	},
3744 	{
3745 		.name = "power well 4",
3746 		.domains = ICL_PW_4_POWER_DOMAINS,
3747 		.ops = &hsw_power_well_ops,
3748 		.id = DISP_PW_ID_NONE,
3749 		{
3750 			.hsw.regs = &hsw_power_well_regs,
3751 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3752 			.hsw.has_fuses = true,
3753 			.hsw.irq_pipe_mask = BIT(PIPE_C),
3754 		},
3755 	},
3756 };
3757 
3758 static const struct i915_power_well_desc ehl_power_wells[] = {
3759 	{
3760 		.name = "always-on",
3761 		.always_on = true,
3762 		.domains = POWER_DOMAIN_MASK,
3763 		.ops = &i9xx_always_on_power_well_ops,
3764 		.id = DISP_PW_ID_NONE,
3765 	},
3766 	{
3767 		.name = "power well 1",
3768 		/* Handled by the DMC firmware */
3769 		.always_on = true,
3770 		.domains = 0,
3771 		.ops = &hsw_power_well_ops,
3772 		.id = SKL_DISP_PW_1,
3773 		{
3774 			.hsw.regs = &hsw_power_well_regs,
3775 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3776 			.hsw.has_fuses = true,
3777 		},
3778 	},
3779 	{
3780 		.name = "DC off",
3781 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3782 		.ops = &gen9_dc_off_power_well_ops,
3783 		.id = SKL_DISP_DC_OFF,
3784 	},
3785 	{
3786 		.name = "power well 2",
3787 		.domains = ICL_PW_2_POWER_DOMAINS,
3788 		.ops = &hsw_power_well_ops,
3789 		.id = SKL_DISP_PW_2,
3790 		{
3791 			.hsw.regs = &hsw_power_well_regs,
3792 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3793 			.hsw.has_fuses = true,
3794 		},
3795 	},
3796 	{
3797 		.name = "power well 3",
3798 		.domains = ICL_PW_3_POWER_DOMAINS,
3799 		.ops = &hsw_power_well_ops,
3800 		.id = DISP_PW_ID_NONE,
3801 		{
3802 			.hsw.regs = &hsw_power_well_regs,
3803 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3804 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3805 			.hsw.has_vga = true,
3806 			.hsw.has_fuses = true,
3807 		},
3808 	},
3809 	{
3810 		.name = "DDI A IO",
3811 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3812 		.ops = &hsw_power_well_ops,
3813 		.id = DISP_PW_ID_NONE,
3814 		{
3815 			.hsw.regs = &icl_ddi_power_well_regs,
3816 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3817 		},
3818 	},
3819 	{
3820 		.name = "DDI B IO",
3821 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3822 		.ops = &hsw_power_well_ops,
3823 		.id = DISP_PW_ID_NONE,
3824 		{
3825 			.hsw.regs = &icl_ddi_power_well_regs,
3826 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3827 		},
3828 	},
3829 	{
3830 		.name = "DDI C IO",
3831 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3832 		.ops = &hsw_power_well_ops,
3833 		.id = DISP_PW_ID_NONE,
3834 		{
3835 			.hsw.regs = &icl_ddi_power_well_regs,
3836 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3837 		},
3838 	},
3839 	{
3840 		.name = "DDI D IO",
3841 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3842 		.ops = &hsw_power_well_ops,
3843 		.id = DISP_PW_ID_NONE,
3844 		{
3845 			.hsw.regs = &icl_ddi_power_well_regs,
3846 			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3847 		},
3848 	},
3849 	{
3850 		.name = "AUX A",
3851 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3852 		.ops = &hsw_power_well_ops,
3853 		.id = DISP_PW_ID_NONE,
3854 		{
3855 			.hsw.regs = &icl_aux_power_well_regs,
3856 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3857 		},
3858 	},
3859 	{
3860 		.name = "AUX B",
3861 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3862 		.ops = &hsw_power_well_ops,
3863 		.id = DISP_PW_ID_NONE,
3864 		{
3865 			.hsw.regs = &icl_aux_power_well_regs,
3866 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3867 		},
3868 	},
3869 	{
3870 		.name = "AUX C",
3871 		.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3872 		.ops = &hsw_power_well_ops,
3873 		.id = DISP_PW_ID_NONE,
3874 		{
3875 			.hsw.regs = &icl_aux_power_well_regs,
3876 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3877 		},
3878 	},
3879 	{
3880 		.name = "AUX D",
3881 		.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3882 		.ops = &hsw_power_well_ops,
3883 		.id = DISP_PW_ID_NONE,
3884 		{
3885 			.hsw.regs = &icl_aux_power_well_regs,
3886 			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3887 		},
3888 	},
3889 	{
3890 		.name = "power well 4",
3891 		.domains = ICL_PW_4_POWER_DOMAINS,
3892 		.ops = &hsw_power_well_ops,
3893 		.id = DISP_PW_ID_NONE,
3894 		{
3895 			.hsw.regs = &hsw_power_well_regs,
3896 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3897 			.hsw.has_fuses = true,
3898 			.hsw.irq_pipe_mask = BIT(PIPE_C),
3899 		},
3900 	},
3901 };
3902 
3903 static const struct i915_power_well_desc tgl_power_wells[] = {
3904 	{
3905 		.name = "always-on",
3906 		.always_on = true,
3907 		.domains = POWER_DOMAIN_MASK,
3908 		.ops = &i9xx_always_on_power_well_ops,
3909 		.id = DISP_PW_ID_NONE,
3910 	},
3911 	{
3912 		.name = "power well 1",
3913 		/* Handled by the DMC firmware */
3914 		.always_on = true,
3915 		.domains = 0,
3916 		.ops = &hsw_power_well_ops,
3917 		.id = SKL_DISP_PW_1,
3918 		{
3919 			.hsw.regs = &hsw_power_well_regs,
3920 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3921 			.hsw.has_fuses = true,
3922 		},
3923 	},
3924 	{
3925 		.name = "DC off",
3926 		.domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
3927 		.ops = &gen9_dc_off_power_well_ops,
3928 		.id = SKL_DISP_DC_OFF,
3929 	},
3930 	{
3931 		.name = "power well 2",
3932 		.domains = TGL_PW_2_POWER_DOMAINS,
3933 		.ops = &hsw_power_well_ops,
3934 		.id = SKL_DISP_PW_2,
3935 		{
3936 			.hsw.regs = &hsw_power_well_regs,
3937 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3938 			.hsw.has_fuses = true,
3939 		},
3940 	},
3941 	{
3942 		.name = "power well 3",
3943 		.domains = TGL_PW_3_POWER_DOMAINS,
3944 		.ops = &hsw_power_well_ops,
3945 		.id = TGL_DISP_PW_3,
3946 		{
3947 			.hsw.regs = &hsw_power_well_regs,
3948 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3949 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3950 			.hsw.has_vga = true,
3951 			.hsw.has_fuses = true,
3952 		},
3953 	},
3954 	{
3955 		.name = "DDI A IO",
3956 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3957 		.ops = &hsw_power_well_ops,
3958 		.id = DISP_PW_ID_NONE,
3959 		{
3960 			.hsw.regs = &icl_ddi_power_well_regs,
3961 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3962 		}
3963 	},
3964 	{
3965 		.name = "DDI B IO",
3966 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3967 		.ops = &hsw_power_well_ops,
3968 		.id = DISP_PW_ID_NONE,
3969 		{
3970 			.hsw.regs = &icl_ddi_power_well_regs,
3971 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3972 		}
3973 	},
3974 	{
3975 		.name = "DDI C IO",
3976 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3977 		.ops = &hsw_power_well_ops,
3978 		.id = DISP_PW_ID_NONE,
3979 		{
3980 			.hsw.regs = &icl_ddi_power_well_regs,
3981 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3982 		}
3983 	},
3984 	{
3985 		.name = "DDI D TC1 IO",
3986 		.domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
3987 		.ops = &hsw_power_well_ops,
3988 		.id = DISP_PW_ID_NONE,
3989 		{
3990 			.hsw.regs = &icl_ddi_power_well_regs,
3991 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
3992 		},
3993 	},
3994 	{
3995 		.name = "DDI E TC2 IO",
3996 		.domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
3997 		.ops = &hsw_power_well_ops,
3998 		.id = DISP_PW_ID_NONE,
3999 		{
4000 			.hsw.regs = &icl_ddi_power_well_regs,
4001 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4002 		},
4003 	},
4004 	{
4005 		.name = "DDI F TC3 IO",
4006 		.domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
4007 		.ops = &hsw_power_well_ops,
4008 		.id = DISP_PW_ID_NONE,
4009 		{
4010 			.hsw.regs = &icl_ddi_power_well_regs,
4011 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4012 		},
4013 	},
4014 	{
4015 		.name = "DDI G TC4 IO",
4016 		.domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
4017 		.ops = &hsw_power_well_ops,
4018 		.id = DISP_PW_ID_NONE,
4019 		{
4020 			.hsw.regs = &icl_ddi_power_well_regs,
4021 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4022 		},
4023 	},
4024 	{
4025 		.name = "DDI H TC5 IO",
4026 		.domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
4027 		.ops = &hsw_power_well_ops,
4028 		.id = DISP_PW_ID_NONE,
4029 		{
4030 			.hsw.regs = &icl_ddi_power_well_regs,
4031 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
4032 		},
4033 	},
4034 	{
4035 		.name = "DDI I TC6 IO",
4036 		.domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
4037 		.ops = &hsw_power_well_ops,
4038 		.id = DISP_PW_ID_NONE,
4039 		{
4040 			.hsw.regs = &icl_ddi_power_well_regs,
4041 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
4042 		},
4043 	},
4044 	{
4045 		.name = "AUX A",
4046 		.domains = TGL_AUX_A_IO_POWER_DOMAINS,
4047 		.ops = &hsw_power_well_ops,
4048 		.id = DISP_PW_ID_NONE,
4049 		{
4050 			.hsw.regs = &icl_aux_power_well_regs,
4051 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4052 		},
4053 	},
4054 	{
4055 		.name = "AUX B",
4056 		.domains = TGL_AUX_B_IO_POWER_DOMAINS,
4057 		.ops = &hsw_power_well_ops,
4058 		.id = DISP_PW_ID_NONE,
4059 		{
4060 			.hsw.regs = &icl_aux_power_well_regs,
4061 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4062 		},
4063 	},
4064 	{
4065 		.name = "AUX C",
4066 		.domains = TGL_AUX_C_IO_POWER_DOMAINS,
4067 		.ops = &hsw_power_well_ops,
4068 		.id = DISP_PW_ID_NONE,
4069 		{
4070 			.hsw.regs = &icl_aux_power_well_regs,
4071 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4072 		},
4073 	},
4074 	{
4075 		.name = "AUX D TC1",
4076 		.domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
4077 		.ops = &icl_tc_phy_aux_power_well_ops,
4078 		.id = DISP_PW_ID_NONE,
4079 		{
4080 			.hsw.regs = &icl_aux_power_well_regs,
4081 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4082 			.hsw.is_tc_tbt = false,
4083 		},
4084 	},
4085 	{
4086 		.name = "AUX E TC2",
4087 		.domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
4088 		.ops = &icl_tc_phy_aux_power_well_ops,
4089 		.id = DISP_PW_ID_NONE,
4090 		{
4091 			.hsw.regs = &icl_aux_power_well_regs,
4092 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4093 			.hsw.is_tc_tbt = false,
4094 		},
4095 	},
4096 	{
4097 		.name = "AUX F TC3",
4098 		.domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
4099 		.ops = &icl_tc_phy_aux_power_well_ops,
4100 		.id = DISP_PW_ID_NONE,
4101 		{
4102 			.hsw.regs = &icl_aux_power_well_regs,
4103 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4104 			.hsw.is_tc_tbt = false,
4105 		},
4106 	},
4107 	{
4108 		.name = "AUX G TC4",
4109 		.domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
4110 		.ops = &icl_tc_phy_aux_power_well_ops,
4111 		.id = DISP_PW_ID_NONE,
4112 		{
4113 			.hsw.regs = &icl_aux_power_well_regs,
4114 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4115 			.hsw.is_tc_tbt = false,
4116 		},
4117 	},
4118 	{
4119 		.name = "AUX H TC5",
4120 		.domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
4121 		.ops = &icl_tc_phy_aux_power_well_ops,
4122 		.id = DISP_PW_ID_NONE,
4123 		{
4124 			.hsw.regs = &icl_aux_power_well_regs,
4125 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4126 			.hsw.is_tc_tbt = false,
4127 		},
4128 	},
4129 	{
4130 		.name = "AUX I TC6",
4131 		.domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
4132 		.ops = &icl_tc_phy_aux_power_well_ops,
4133 		.id = DISP_PW_ID_NONE,
4134 		{
4135 			.hsw.regs = &icl_aux_power_well_regs,
4136 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4137 			.hsw.is_tc_tbt = false,
4138 		},
4139 	},
4140 	{
4141 		.name = "AUX D TBT1",
4142 		.domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
4143 		.ops = &hsw_power_well_ops,
4144 		.id = DISP_PW_ID_NONE,
4145 		{
4146 			.hsw.regs = &icl_aux_power_well_regs,
4147 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4148 			.hsw.is_tc_tbt = true,
4149 		},
4150 	},
4151 	{
4152 		.name = "AUX E TBT2",
4153 		.domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
4154 		.ops = &hsw_power_well_ops,
4155 		.id = DISP_PW_ID_NONE,
4156 		{
4157 			.hsw.regs = &icl_aux_power_well_regs,
4158 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4159 			.hsw.is_tc_tbt = true,
4160 		},
4161 	},
4162 	{
4163 		.name = "AUX F TBT3",
4164 		.domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
4165 		.ops = &hsw_power_well_ops,
4166 		.id = DISP_PW_ID_NONE,
4167 		{
4168 			.hsw.regs = &icl_aux_power_well_regs,
4169 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4170 			.hsw.is_tc_tbt = true,
4171 		},
4172 	},
4173 	{
4174 		.name = "AUX G TBT4",
4175 		.domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
4176 		.ops = &hsw_power_well_ops,
4177 		.id = DISP_PW_ID_NONE,
4178 		{
4179 			.hsw.regs = &icl_aux_power_well_regs,
4180 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4181 			.hsw.is_tc_tbt = true,
4182 		},
4183 	},
4184 	{
4185 		.name = "AUX H TBT5",
4186 		.domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
4187 		.ops = &hsw_power_well_ops,
4188 		.id = DISP_PW_ID_NONE,
4189 		{
4190 			.hsw.regs = &icl_aux_power_well_regs,
4191 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4192 			.hsw.is_tc_tbt = true,
4193 		},
4194 	},
4195 	{
4196 		.name = "AUX I TBT6",
4197 		.domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
4198 		.ops = &hsw_power_well_ops,
4199 		.id = DISP_PW_ID_NONE,
4200 		{
4201 			.hsw.regs = &icl_aux_power_well_regs,
4202 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4203 			.hsw.is_tc_tbt = true,
4204 		},
4205 	},
4206 	{
4207 		.name = "power well 4",
4208 		.domains = TGL_PW_4_POWER_DOMAINS,
4209 		.ops = &hsw_power_well_ops,
4210 		.id = DISP_PW_ID_NONE,
4211 		{
4212 			.hsw.regs = &hsw_power_well_regs,
4213 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4214 			.hsw.has_fuses = true,
4215 			.hsw.irq_pipe_mask = BIT(PIPE_C),
4216 		}
4217 	},
4218 	{
4219 		.name = "power well 5",
4220 		.domains = TGL_PW_5_POWER_DOMAINS,
4221 		.ops = &hsw_power_well_ops,
4222 		.id = DISP_PW_ID_NONE,
4223 		{
4224 			.hsw.regs = &hsw_power_well_regs,
4225 			.hsw.idx = TGL_PW_CTL_IDX_PW_5,
4226 			.hsw.has_fuses = true,
4227 			.hsw.irq_pipe_mask = BIT(PIPE_D),
4228 		},
4229 	},
4230 };
4231 
4232 static int
4233 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4234 				   int disable_power_well)
4235 {
4236 	if (disable_power_well >= 0)
4237 		return !!disable_power_well;
4238 
4239 	return 1;
4240 }
4241 
4242 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4243 			       int enable_dc)
4244 {
4245 	u32 mask;
4246 	int requested_dc;
4247 	int max_dc;
4248 
4249 	if (INTEL_GEN(dev_priv) >= 12) {
4250 		max_dc = 4;
4251 		/*
4252 		 * DC9 has a separate HW flow from the rest of the DC states,
4253 		 * not depending on the DMC firmware. It's needed by system
4254 		 * suspend/resume, so allow it unconditionally.
4255 		 */
4256 		mask = DC_STATE_EN_DC9;
4257 	} else if (IS_GEN(dev_priv, 11)) {
4258 		max_dc = 2;
4259 		mask = DC_STATE_EN_DC9;
4260 	} else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
4261 		max_dc = 2;
4262 		mask = 0;
4263 	} else if (IS_GEN9_LP(dev_priv)) {
4264 		max_dc = 1;
4265 		mask = DC_STATE_EN_DC9;
4266 	} else {
4267 		max_dc = 0;
4268 		mask = 0;
4269 	}
4270 
4271 	if (!i915_modparams.disable_power_well)
4272 		max_dc = 0;
4273 
4274 	if (enable_dc >= 0 && enable_dc <= max_dc) {
4275 		requested_dc = enable_dc;
4276 	} else if (enable_dc == -1) {
4277 		requested_dc = max_dc;
4278 	} else if (enable_dc > max_dc && enable_dc <= 4) {
4279 		drm_dbg_kms(&dev_priv->drm,
4280 			    "Adjusting requested max DC state (%d->%d)\n",
4281 			    enable_dc, max_dc);
4282 		requested_dc = max_dc;
4283 	} else {
4284 		drm_err(&dev_priv->drm,
4285 			"Unexpected value for enable_dc (%d)\n", enable_dc);
4286 		requested_dc = max_dc;
4287 	}
4288 
4289 	switch (requested_dc) {
4290 	case 4:
4291 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
4292 		break;
4293 	case 3:
4294 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
4295 		break;
4296 	case 2:
4297 		mask |= DC_STATE_EN_UPTO_DC6;
4298 		break;
4299 	case 1:
4300 		mask |= DC_STATE_EN_UPTO_DC5;
4301 		break;
4302 	}
4303 
4304 	drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
4305 
4306 	return mask;
4307 }
4308 
4309 static int
4310 __set_power_wells(struct i915_power_domains *power_domains,
4311 		  const struct i915_power_well_desc *power_well_descs,
4312 		  int power_well_count)
4313 {
4314 	u64 power_well_ids = 0;
4315 	int i;
4316 
4317 	power_domains->power_well_count = power_well_count;
4318 	power_domains->power_wells =
4319 				kcalloc(power_well_count,
4320 					sizeof(*power_domains->power_wells),
4321 					GFP_KERNEL);
4322 	if (!power_domains->power_wells)
4323 		return -ENOMEM;
4324 
4325 	for (i = 0; i < power_well_count; i++) {
4326 		enum i915_power_well_id id = power_well_descs[i].id;
4327 
4328 		power_domains->power_wells[i].desc = &power_well_descs[i];
4329 
4330 		if (id == DISP_PW_ID_NONE)
4331 			continue;
4332 
4333 		WARN_ON(id >= sizeof(power_well_ids) * 8);
4334 		WARN_ON(power_well_ids & BIT_ULL(id));
4335 		power_well_ids |= BIT_ULL(id);
4336 	}
4337 
4338 	return 0;
4339 }
4340 
4341 #define set_power_wells(power_domains, __power_well_descs) \
4342 	__set_power_wells(power_domains, __power_well_descs, \
4343 			  ARRAY_SIZE(__power_well_descs))
4344 
4345 /**
4346  * intel_power_domains_init - initializes the power domain structures
4347  * @dev_priv: i915 device instance
4348  *
4349  * Initializes the power domain structures for @dev_priv depending upon the
4350  * supported platform.
4351  */
4352 int intel_power_domains_init(struct drm_i915_private *dev_priv)
4353 {
4354 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4355 	int err;
4356 
4357 	i915_modparams.disable_power_well =
4358 		sanitize_disable_power_well_option(dev_priv,
4359 						   i915_modparams.disable_power_well);
4360 	dev_priv->csr.allowed_dc_mask =
4361 		get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
4362 
4363 	dev_priv->csr.target_dc_state =
4364 		sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
4365 
4366 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
4367 
4368 	mutex_init(&power_domains->lock);
4369 
4370 	INIT_DELAYED_WORK(&power_domains->async_put_work,
4371 			  intel_display_power_put_async_work);
4372 
4373 	/*
4374 	 * The enabling order will be from lower to higher indexed wells,
4375 	 * the disabling order is reversed.
4376 	 */
4377 	if (IS_GEN(dev_priv, 12)) {
4378 		err = set_power_wells(power_domains, tgl_power_wells);
4379 	} else if (IS_ELKHARTLAKE(dev_priv)) {
4380 		err = set_power_wells(power_domains, ehl_power_wells);
4381 	} else if (IS_GEN(dev_priv, 11)) {
4382 		err = set_power_wells(power_domains, icl_power_wells);
4383 	} else if (IS_CANNONLAKE(dev_priv)) {
4384 		err = set_power_wells(power_domains, cnl_power_wells);
4385 
4386 		/*
4387 		 * DDI and Aux IO are getting enabled for all ports
4388 		 * regardless the presence or use. So, in order to avoid
4389 		 * timeouts, lets remove them from the list
4390 		 * for the SKUs without port F.
4391 		 */
4392 		if (!IS_CNL_WITH_PORT_F(dev_priv))
4393 			power_domains->power_well_count -= 2;
4394 	} else if (IS_GEMINILAKE(dev_priv)) {
4395 		err = set_power_wells(power_domains, glk_power_wells);
4396 	} else if (IS_BROXTON(dev_priv)) {
4397 		err = set_power_wells(power_domains, bxt_power_wells);
4398 	} else if (IS_GEN9_BC(dev_priv)) {
4399 		err = set_power_wells(power_domains, skl_power_wells);
4400 	} else if (IS_CHERRYVIEW(dev_priv)) {
4401 		err = set_power_wells(power_domains, chv_power_wells);
4402 	} else if (IS_BROADWELL(dev_priv)) {
4403 		err = set_power_wells(power_domains, bdw_power_wells);
4404 	} else if (IS_HASWELL(dev_priv)) {
4405 		err = set_power_wells(power_domains, hsw_power_wells);
4406 	} else if (IS_VALLEYVIEW(dev_priv)) {
4407 		err = set_power_wells(power_domains, vlv_power_wells);
4408 	} else if (IS_I830(dev_priv)) {
4409 		err = set_power_wells(power_domains, i830_power_wells);
4410 	} else {
4411 		err = set_power_wells(power_domains, i9xx_always_on_power_well);
4412 	}
4413 
4414 	return err;
4415 }
4416 
4417 /**
4418  * intel_power_domains_cleanup - clean up power domains resources
4419  * @dev_priv: i915 device instance
4420  *
4421  * Release any resources acquired by intel_power_domains_init()
4422  */
4423 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
4424 {
4425 	kfree(dev_priv->power_domains.power_wells);
4426 }
4427 
4428 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
4429 {
4430 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4431 	struct i915_power_well *power_well;
4432 
4433 	mutex_lock(&power_domains->lock);
4434 	for_each_power_well(dev_priv, power_well) {
4435 		power_well->desc->ops->sync_hw(dev_priv, power_well);
4436 		power_well->hw_enabled =
4437 			power_well->desc->ops->is_enabled(dev_priv, power_well);
4438 	}
4439 	mutex_unlock(&power_domains->lock);
4440 }
4441 
4442 static inline
4443 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
4444 			  i915_reg_t reg, bool enable)
4445 {
4446 	u32 val, status;
4447 
4448 	val = intel_de_read(dev_priv, reg);
4449 	val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
4450 	intel_de_write(dev_priv, reg, val);
4451 	intel_de_posting_read(dev_priv, reg);
4452 	udelay(10);
4453 
4454 	status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
4455 	if ((enable && !status) || (!enable && status)) {
4456 		drm_err(&dev_priv->drm, "DBus power %s timeout!\n",
4457 			enable ? "enable" : "disable");
4458 		return false;
4459 	}
4460 	return true;
4461 }
4462 
4463 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
4464 {
4465 	icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
4466 }
4467 
4468 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
4469 {
4470 	icl_dbuf_slices_update(dev_priv, 0);
4471 }
4472 
4473 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
4474 			    u8 req_slices)
4475 {
4476 	int i;
4477 	int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
4478 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4479 
4480 	drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
4481 		 "Invalid number of dbuf slices requested\n");
4482 
4483 	DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
4484 
4485 	/*
4486 	 * Might be running this in parallel to gen9_dc_off_power_well_enable
4487 	 * being called from intel_dp_detect for instance,
4488 	 * which causes assertion triggered by race condition,
4489 	 * as gen9_assert_dbuf_enabled might preempt this when registers
4490 	 * were already updated, while dev_priv was not.
4491 	 */
4492 	mutex_lock(&power_domains->lock);
4493 
4494 	for (i = 0; i < max_slices; i++) {
4495 		intel_dbuf_slice_set(dev_priv,
4496 				     DBUF_CTL_S(i),
4497 				     (req_slices & BIT(i)) != 0);
4498 	}
4499 
4500 	dev_priv->enabled_dbuf_slices_mask = req_slices;
4501 
4502 	mutex_unlock(&power_domains->lock);
4503 }
4504 
4505 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
4506 {
4507 	skl_ddb_get_hw_state(dev_priv);
4508 	/*
4509 	 * Just power up at least 1 slice, we will
4510 	 * figure out later which slices we have and what we need.
4511 	 */
4512 	icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask |
4513 			       BIT(DBUF_S1));
4514 }
4515 
4516 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
4517 {
4518 	icl_dbuf_slices_update(dev_priv, 0);
4519 }
4520 
4521 static void icl_mbus_init(struct drm_i915_private *dev_priv)
4522 {
4523 	u32 mask, val;
4524 
4525 	mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
4526 		MBUS_ABOX_BT_CREDIT_POOL2_MASK |
4527 		MBUS_ABOX_B_CREDIT_MASK |
4528 		MBUS_ABOX_BW_CREDIT_MASK;
4529 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4530 		MBUS_ABOX_BT_CREDIT_POOL2(16) |
4531 		MBUS_ABOX_B_CREDIT(1) |
4532 		MBUS_ABOX_BW_CREDIT(1);
4533 
4534 	intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val);
4535 	if (INTEL_GEN(dev_priv) >= 12) {
4536 		intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val);
4537 		intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val);
4538 	}
4539 }
4540 
4541 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4542 {
4543 	u32 val = intel_de_read(dev_priv, LCPLL_CTL);
4544 
4545 	/*
4546 	 * The LCPLL register should be turned on by the BIOS. For now
4547 	 * let's just check its state and print errors in case
4548 	 * something is wrong.  Don't even try to turn it on.
4549 	 */
4550 
4551 	if (val & LCPLL_CD_SOURCE_FCLK)
4552 		drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
4553 
4554 	if (val & LCPLL_PLL_DISABLE)
4555 		drm_err(&dev_priv->drm, "LCPLL is disabled\n");
4556 
4557 	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
4558 		drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
4559 }
4560 
4561 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4562 {
4563 	struct drm_device *dev = &dev_priv->drm;
4564 	struct intel_crtc *crtc;
4565 
4566 	for_each_intel_crtc(dev, crtc)
4567 		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4568 				pipe_name(crtc->pipe));
4569 
4570 	I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
4571 			"Display power well on\n");
4572 	I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
4573 			"SPLL enabled\n");
4574 	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4575 			"WRPLL1 enabled\n");
4576 	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4577 			"WRPLL2 enabled\n");
4578 	I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
4579 			"Panel power on\n");
4580 	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4581 			"CPU PWM1 enabled\n");
4582 	if (IS_HASWELL(dev_priv))
4583 		I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4584 				"CPU PWM2 enabled\n");
4585 	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4586 			"PCH PWM1 enabled\n");
4587 	I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4588 			"Utility pin enabled\n");
4589 	I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
4590 			"PCH GTC enabled\n");
4591 
4592 	/*
4593 	 * In theory we can still leave IRQs enabled, as long as only the HPD
4594 	 * interrupts remain enabled. We used to check for that, but since it's
4595 	 * gen-specific and since we only disable LCPLL after we fully disable
4596 	 * the interrupts, the check below should be enough.
4597 	 */
4598 	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4599 }
4600 
4601 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4602 {
4603 	if (IS_HASWELL(dev_priv))
4604 		return intel_de_read(dev_priv, D_COMP_HSW);
4605 	else
4606 		return intel_de_read(dev_priv, D_COMP_BDW);
4607 }
4608 
4609 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4610 {
4611 	if (IS_HASWELL(dev_priv)) {
4612 		if (sandybridge_pcode_write(dev_priv,
4613 					    GEN6_PCODE_WRITE_D_COMP, val))
4614 			drm_dbg_kms(&dev_priv->drm,
4615 				    "Failed to write to D_COMP\n");
4616 	} else {
4617 		intel_de_write(dev_priv, D_COMP_BDW, val);
4618 		intel_de_posting_read(dev_priv, D_COMP_BDW);
4619 	}
4620 }
4621 
4622 /*
4623  * This function implements pieces of two sequences from BSpec:
4624  * - Sequence for display software to disable LCPLL
4625  * - Sequence for display software to allow package C8+
4626  * The steps implemented here are just the steps that actually touch the LCPLL
4627  * register. Callers should take care of disabling all the display engine
4628  * functions, doing the mode unset, fixing interrupts, etc.
4629  */
4630 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4631 			      bool switch_to_fclk, bool allow_power_down)
4632 {
4633 	u32 val;
4634 
4635 	assert_can_disable_lcpll(dev_priv);
4636 
4637 	val = intel_de_read(dev_priv, LCPLL_CTL);
4638 
4639 	if (switch_to_fclk) {
4640 		val |= LCPLL_CD_SOURCE_FCLK;
4641 		intel_de_write(dev_priv, LCPLL_CTL, val);
4642 
4643 		if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
4644 				LCPLL_CD_SOURCE_FCLK_DONE, 1))
4645 			drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
4646 
4647 		val = intel_de_read(dev_priv, LCPLL_CTL);
4648 	}
4649 
4650 	val |= LCPLL_PLL_DISABLE;
4651 	intel_de_write(dev_priv, LCPLL_CTL, val);
4652 	intel_de_posting_read(dev_priv, LCPLL_CTL);
4653 
4654 	if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
4655 		drm_err(&dev_priv->drm, "LCPLL still locked\n");
4656 
4657 	val = hsw_read_dcomp(dev_priv);
4658 	val |= D_COMP_COMP_DISABLE;
4659 	hsw_write_dcomp(dev_priv, val);
4660 	ndelay(100);
4661 
4662 	if (wait_for((hsw_read_dcomp(dev_priv) &
4663 		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4664 		drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
4665 
4666 	if (allow_power_down) {
4667 		val = intel_de_read(dev_priv, LCPLL_CTL);
4668 		val |= LCPLL_POWER_DOWN_ALLOW;
4669 		intel_de_write(dev_priv, LCPLL_CTL, val);
4670 		intel_de_posting_read(dev_priv, LCPLL_CTL);
4671 	}
4672 }
4673 
4674 /*
4675  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4676  * source.
4677  */
4678 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4679 {
4680 	u32 val;
4681 
4682 	val = intel_de_read(dev_priv, LCPLL_CTL);
4683 
4684 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4685 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4686 		return;
4687 
4688 	/*
4689 	 * Make sure we're not on PC8 state before disabling PC8, otherwise
4690 	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4691 	 */
4692 	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4693 
4694 	if (val & LCPLL_POWER_DOWN_ALLOW) {
4695 		val &= ~LCPLL_POWER_DOWN_ALLOW;
4696 		intel_de_write(dev_priv, LCPLL_CTL, val);
4697 		intel_de_posting_read(dev_priv, LCPLL_CTL);
4698 	}
4699 
4700 	val = hsw_read_dcomp(dev_priv);
4701 	val |= D_COMP_COMP_FORCE;
4702 	val &= ~D_COMP_COMP_DISABLE;
4703 	hsw_write_dcomp(dev_priv, val);
4704 
4705 	val = intel_de_read(dev_priv, LCPLL_CTL);
4706 	val &= ~LCPLL_PLL_DISABLE;
4707 	intel_de_write(dev_priv, LCPLL_CTL, val);
4708 
4709 	if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
4710 		drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
4711 
4712 	if (val & LCPLL_CD_SOURCE_FCLK) {
4713 		val = intel_de_read(dev_priv, LCPLL_CTL);
4714 		val &= ~LCPLL_CD_SOURCE_FCLK;
4715 		intel_de_write(dev_priv, LCPLL_CTL, val);
4716 
4717 		if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
4718 				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4719 			drm_err(&dev_priv->drm,
4720 				"Switching back to LCPLL failed\n");
4721 	}
4722 
4723 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4724 
4725 	intel_update_cdclk(dev_priv);
4726 	intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
4727 }
4728 
4729 /*
4730  * Package states C8 and deeper are really deep PC states that can only be
4731  * reached when all the devices on the system allow it, so even if the graphics
4732  * device allows PC8+, it doesn't mean the system will actually get to these
4733  * states. Our driver only allows PC8+ when going into runtime PM.
4734  *
4735  * The requirements for PC8+ are that all the outputs are disabled, the power
4736  * well is disabled and most interrupts are disabled, and these are also
4737  * requirements for runtime PM. When these conditions are met, we manually do
4738  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4739  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4740  * hang the machine.
4741  *
4742  * When we really reach PC8 or deeper states (not just when we allow it) we lose
4743  * the state of some registers, so when we come back from PC8+ we need to
4744  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4745  * need to take care of the registers kept by RC6. Notice that this happens even
4746  * if we don't put the device in PCI D3 state (which is what currently happens
4747  * because of the runtime PM support).
4748  *
4749  * For more, read "Display Sequences for Package C8" on the hardware
4750  * documentation.
4751  */
4752 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4753 {
4754 	u32 val;
4755 
4756 	drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
4757 
4758 	if (HAS_PCH_LPT_LP(dev_priv)) {
4759 		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4760 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4761 		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4762 	}
4763 
4764 	lpt_disable_clkout_dp(dev_priv);
4765 	hsw_disable_lcpll(dev_priv, true, true);
4766 }
4767 
4768 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4769 {
4770 	u32 val;
4771 
4772 	drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
4773 
4774 	hsw_restore_lcpll(dev_priv);
4775 	intel_init_pch_refclk(dev_priv);
4776 
4777 	if (HAS_PCH_LPT_LP(dev_priv)) {
4778 		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4779 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
4780 		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4781 	}
4782 }
4783 
4784 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
4785 				      bool enable)
4786 {
4787 	i915_reg_t reg;
4788 	u32 reset_bits, val;
4789 
4790 	if (IS_IVYBRIDGE(dev_priv)) {
4791 		reg = GEN7_MSG_CTL;
4792 		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
4793 	} else {
4794 		reg = HSW_NDE_RSTWRN_OPT;
4795 		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
4796 	}
4797 
4798 	val = intel_de_read(dev_priv, reg);
4799 
4800 	if (enable)
4801 		val |= reset_bits;
4802 	else
4803 		val &= ~reset_bits;
4804 
4805 	intel_de_write(dev_priv, reg, val);
4806 }
4807 
4808 static void skl_display_core_init(struct drm_i915_private *dev_priv,
4809 				  bool resume)
4810 {
4811 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4812 	struct i915_power_well *well;
4813 
4814 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4815 
4816 	/* enable PCH reset handshake */
4817 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4818 
4819 	/* enable PG1 and Misc I/O */
4820 	mutex_lock(&power_domains->lock);
4821 
4822 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4823 	intel_power_well_enable(dev_priv, well);
4824 
4825 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
4826 	intel_power_well_enable(dev_priv, well);
4827 
4828 	mutex_unlock(&power_domains->lock);
4829 
4830 	intel_cdclk_init_hw(dev_priv);
4831 
4832 	gen9_dbuf_enable(dev_priv);
4833 
4834 	if (resume && dev_priv->csr.dmc_payload)
4835 		intel_csr_load_program(dev_priv);
4836 }
4837 
4838 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
4839 {
4840 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4841 	struct i915_power_well *well;
4842 
4843 	gen9_disable_dc_states(dev_priv);
4844 
4845 	gen9_dbuf_disable(dev_priv);
4846 
4847 	intel_cdclk_uninit_hw(dev_priv);
4848 
4849 	/* The spec doesn't call for removing the reset handshake flag */
4850 	/* disable PG1 and Misc I/O */
4851 
4852 	mutex_lock(&power_domains->lock);
4853 
4854 	/*
4855 	 * BSpec says to keep the MISC IO power well enabled here, only
4856 	 * remove our request for power well 1.
4857 	 * Note that even though the driver's request is removed power well 1
4858 	 * may stay enabled after this due to DMC's own request on it.
4859 	 */
4860 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4861 	intel_power_well_disable(dev_priv, well);
4862 
4863 	mutex_unlock(&power_domains->lock);
4864 
4865 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4866 }
4867 
4868 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4869 {
4870 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4871 	struct i915_power_well *well;
4872 
4873 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4874 
4875 	/*
4876 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
4877 	 * or else the reset will hang because there is no PCH to respond.
4878 	 * Move the handshake programming to initialization sequence.
4879 	 * Previously was left up to BIOS.
4880 	 */
4881 	intel_pch_reset_handshake(dev_priv, false);
4882 
4883 	/* Enable PG1 */
4884 	mutex_lock(&power_domains->lock);
4885 
4886 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4887 	intel_power_well_enable(dev_priv, well);
4888 
4889 	mutex_unlock(&power_domains->lock);
4890 
4891 	intel_cdclk_init_hw(dev_priv);
4892 
4893 	gen9_dbuf_enable(dev_priv);
4894 
4895 	if (resume && dev_priv->csr.dmc_payload)
4896 		intel_csr_load_program(dev_priv);
4897 }
4898 
4899 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4900 {
4901 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4902 	struct i915_power_well *well;
4903 
4904 	gen9_disable_dc_states(dev_priv);
4905 
4906 	gen9_dbuf_disable(dev_priv);
4907 
4908 	intel_cdclk_uninit_hw(dev_priv);
4909 
4910 	/* The spec doesn't call for removing the reset handshake flag */
4911 
4912 	/*
4913 	 * Disable PW1 (PG1).
4914 	 * Note that even though the driver's request is removed power well 1
4915 	 * may stay enabled after this due to DMC's own request on it.
4916 	 */
4917 	mutex_lock(&power_domains->lock);
4918 
4919 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4920 	intel_power_well_disable(dev_priv, well);
4921 
4922 	mutex_unlock(&power_domains->lock);
4923 
4924 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4925 }
4926 
4927 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4928 {
4929 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4930 	struct i915_power_well *well;
4931 
4932 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4933 
4934 	/* 1. Enable PCH Reset Handshake */
4935 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4936 
4937 	/* 2-3. */
4938 	intel_combo_phy_init(dev_priv);
4939 
4940 	/*
4941 	 * 4. Enable Power Well 1 (PG1).
4942 	 *    The AUX IO power wells will be enabled on demand.
4943 	 */
4944 	mutex_lock(&power_domains->lock);
4945 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4946 	intel_power_well_enable(dev_priv, well);
4947 	mutex_unlock(&power_domains->lock);
4948 
4949 	/* 5. Enable CD clock */
4950 	intel_cdclk_init_hw(dev_priv);
4951 
4952 	/* 6. Enable DBUF */
4953 	gen9_dbuf_enable(dev_priv);
4954 
4955 	if (resume && dev_priv->csr.dmc_payload)
4956 		intel_csr_load_program(dev_priv);
4957 }
4958 
4959 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
4960 {
4961 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4962 	struct i915_power_well *well;
4963 
4964 	gen9_disable_dc_states(dev_priv);
4965 
4966 	/* 1. Disable all display engine functions -> aready done */
4967 
4968 	/* 2. Disable DBUF */
4969 	gen9_dbuf_disable(dev_priv);
4970 
4971 	/* 3. Disable CD clock */
4972 	intel_cdclk_uninit_hw(dev_priv);
4973 
4974 	/*
4975 	 * 4. Disable Power Well 1 (PG1).
4976 	 *    The AUX IO power wells are toggled on demand, so they are already
4977 	 *    disabled at this point.
4978 	 */
4979 	mutex_lock(&power_domains->lock);
4980 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4981 	intel_power_well_disable(dev_priv, well);
4982 	mutex_unlock(&power_domains->lock);
4983 
4984 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4985 
4986 	/* 5. */
4987 	intel_combo_phy_uninit(dev_priv);
4988 }
4989 
4990 struct buddy_page_mask {
4991 	u32 page_mask;
4992 	u8 type;
4993 	u8 num_channels;
4994 };
4995 
4996 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
4997 	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
4998 	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
4999 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
5000 	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
5001 	{}
5002 };
5003 
5004 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
5005 	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
5006 	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
5007 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
5008 	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
5009 	{}
5010 };
5011 
5012 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
5013 {
5014 	enum intel_dram_type type = dev_priv->dram_info.type;
5015 	u8 num_channels = dev_priv->dram_info.num_channels;
5016 	const struct buddy_page_mask *table;
5017 	int i;
5018 
5019 	if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
5020 		/* Wa_1409767108: tgl */
5021 		table = wa_1409767108_buddy_page_masks;
5022 	else
5023 		table = tgl_buddy_page_masks;
5024 
5025 	for (i = 0; table[i].page_mask != 0; i++)
5026 		if (table[i].num_channels == num_channels &&
5027 		    table[i].type == type)
5028 			break;
5029 
5030 	if (table[i].page_mask == 0) {
5031 		drm_dbg(&dev_priv->drm,
5032 			"Unknown memory configuration; disabling address buddy logic.\n");
5033 		intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
5034 		intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
5035 	} else {
5036 		intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK,
5037 			       table[i].page_mask);
5038 		intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
5039 			       table[i].page_mask);
5040 
5041 		/* Wa_22010178259:tgl */
5042 		intel_de_rmw(dev_priv, BW_BUDDY1_CTL,
5043 			     BW_BUDDY_TLB_REQ_TIMER_MASK,
5044 			     REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
5045 		intel_de_rmw(dev_priv, BW_BUDDY2_CTL,
5046 			     BW_BUDDY_TLB_REQ_TIMER_MASK,
5047 			     REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
5048 	}
5049 }
5050 
5051 static void icl_display_core_init(struct drm_i915_private *dev_priv,
5052 				  bool resume)
5053 {
5054 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5055 	struct i915_power_well *well;
5056 
5057 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5058 
5059 	/* 1. Enable PCH reset handshake. */
5060 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5061 
5062 	/* 2. Initialize all combo phys */
5063 	intel_combo_phy_init(dev_priv);
5064 
5065 	/*
5066 	 * 3. Enable Power Well 1 (PG1).
5067 	 *    The AUX IO power wells will be enabled on demand.
5068 	 */
5069 	mutex_lock(&power_domains->lock);
5070 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5071 	intel_power_well_enable(dev_priv, well);
5072 	mutex_unlock(&power_domains->lock);
5073 
5074 	/* 4. Enable CDCLK. */
5075 	intel_cdclk_init_hw(dev_priv);
5076 
5077 	/* 5. Enable DBUF. */
5078 	icl_dbuf_enable(dev_priv);
5079 
5080 	/* 6. Setup MBUS. */
5081 	icl_mbus_init(dev_priv);
5082 
5083 	/* 7. Program arbiter BW_BUDDY registers */
5084 	if (INTEL_GEN(dev_priv) >= 12)
5085 		tgl_bw_buddy_init(dev_priv);
5086 
5087 	if (resume && dev_priv->csr.dmc_payload)
5088 		intel_csr_load_program(dev_priv);
5089 }
5090 
5091 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5092 {
5093 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5094 	struct i915_power_well *well;
5095 
5096 	gen9_disable_dc_states(dev_priv);
5097 
5098 	/* 1. Disable all display engine functions -> aready done */
5099 
5100 	/* 2. Disable DBUF */
5101 	icl_dbuf_disable(dev_priv);
5102 
5103 	/* 3. Disable CD clock */
5104 	intel_cdclk_uninit_hw(dev_priv);
5105 
5106 	/*
5107 	 * 4. Disable Power Well 1 (PG1).
5108 	 *    The AUX IO power wells are toggled on demand, so they are already
5109 	 *    disabled at this point.
5110 	 */
5111 	mutex_lock(&power_domains->lock);
5112 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5113 	intel_power_well_disable(dev_priv, well);
5114 	mutex_unlock(&power_domains->lock);
5115 
5116 	/* 5. */
5117 	intel_combo_phy_uninit(dev_priv);
5118 }
5119 
5120 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5121 {
5122 	struct i915_power_well *cmn_bc =
5123 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5124 	struct i915_power_well *cmn_d =
5125 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5126 
5127 	/*
5128 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5129 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
5130 	 * instead maintain a shadow copy ourselves. Use the actual
5131 	 * power well state and lane status to reconstruct the
5132 	 * expected initial value.
5133 	 */
5134 	dev_priv->chv_phy_control =
5135 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5136 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5137 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5138 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5139 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5140 
5141 	/*
5142 	 * If all lanes are disabled we leave the override disabled
5143 	 * with all power down bits cleared to match the state we
5144 	 * would use after disabling the port. Otherwise enable the
5145 	 * override and set the lane powerdown bits accding to the
5146 	 * current lane status.
5147 	 */
5148 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5149 		u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
5150 		unsigned int mask;
5151 
5152 		mask = status & DPLL_PORTB_READY_MASK;
5153 		if (mask == 0xf)
5154 			mask = 0x0;
5155 		else
5156 			dev_priv->chv_phy_control |=
5157 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5158 
5159 		dev_priv->chv_phy_control |=
5160 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5161 
5162 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5163 		if (mask == 0xf)
5164 			mask = 0x0;
5165 		else
5166 			dev_priv->chv_phy_control |=
5167 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5168 
5169 		dev_priv->chv_phy_control |=
5170 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5171 
5172 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5173 
5174 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5175 	} else {
5176 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5177 	}
5178 
5179 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5180 		u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
5181 		unsigned int mask;
5182 
5183 		mask = status & DPLL_PORTD_READY_MASK;
5184 
5185 		if (mask == 0xf)
5186 			mask = 0x0;
5187 		else
5188 			dev_priv->chv_phy_control |=
5189 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5190 
5191 		dev_priv->chv_phy_control |=
5192 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
5193 
5194 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
5195 
5196 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
5197 	} else {
5198 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
5199 	}
5200 
5201 	drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
5202 		    dev_priv->chv_phy_control);
5203 
5204 	/* Defer application of initial phy_control to enabling the powerwell */
5205 }
5206 
5207 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
5208 {
5209 	struct i915_power_well *cmn =
5210 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5211 	struct i915_power_well *disp2d =
5212 		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
5213 
5214 	/* If the display might be already active skip this */
5215 	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
5216 	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
5217 	    intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
5218 		return;
5219 
5220 	drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
5221 
5222 	/* cmnlane needs DPLL registers */
5223 	disp2d->desc->ops->enable(dev_priv, disp2d);
5224 
5225 	/*
5226 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
5227 	 * Need to assert and de-assert PHY SB reset by gating the
5228 	 * common lane power, then un-gating it.
5229 	 * Simply ungating isn't enough to reset the PHY enough to get
5230 	 * ports and lanes running.
5231 	 */
5232 	cmn->desc->ops->disable(dev_priv, cmn);
5233 }
5234 
5235 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
5236 {
5237 	bool ret;
5238 
5239 	vlv_punit_get(dev_priv);
5240 	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
5241 	vlv_punit_put(dev_priv);
5242 
5243 	return ret;
5244 }
5245 
5246 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
5247 {
5248 	drm_WARN(&dev_priv->drm,
5249 		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
5250 		 "VED not power gated\n");
5251 }
5252 
5253 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
5254 {
5255 	static const struct pci_device_id isp_ids[] = {
5256 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
5257 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
5258 		{}
5259 	};
5260 
5261 	drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
5262 		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
5263 		 "ISP not power gated\n");
5264 }
5265 
5266 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
5267 
5268 /**
5269  * intel_power_domains_init_hw - initialize hardware power domain state
5270  * @i915: i915 device instance
5271  * @resume: Called from resume code paths or not
5272  *
5273  * This function initializes the hardware power domain state and enables all
5274  * power wells belonging to the INIT power domain. Power wells in other
5275  * domains (and not in the INIT domain) are referenced or disabled by
5276  * intel_modeset_readout_hw_state(). After that the reference count of each
5277  * power well must match its HW enabled state, see
5278  * intel_power_domains_verify_state().
5279  *
5280  * It will return with power domains disabled (to be enabled later by
5281  * intel_power_domains_enable()) and must be paired with
5282  * intel_power_domains_driver_remove().
5283  */
5284 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
5285 {
5286 	struct i915_power_domains *power_domains = &i915->power_domains;
5287 
5288 	power_domains->initializing = true;
5289 
5290 	if (INTEL_GEN(i915) >= 11) {
5291 		icl_display_core_init(i915, resume);
5292 	} else if (IS_CANNONLAKE(i915)) {
5293 		cnl_display_core_init(i915, resume);
5294 	} else if (IS_GEN9_BC(i915)) {
5295 		skl_display_core_init(i915, resume);
5296 	} else if (IS_GEN9_LP(i915)) {
5297 		bxt_display_core_init(i915, resume);
5298 	} else if (IS_CHERRYVIEW(i915)) {
5299 		mutex_lock(&power_domains->lock);
5300 		chv_phy_control_init(i915);
5301 		mutex_unlock(&power_domains->lock);
5302 		assert_isp_power_gated(i915);
5303 	} else if (IS_VALLEYVIEW(i915)) {
5304 		mutex_lock(&power_domains->lock);
5305 		vlv_cmnlane_wa(i915);
5306 		mutex_unlock(&power_domains->lock);
5307 		assert_ved_power_gated(i915);
5308 		assert_isp_power_gated(i915);
5309 	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
5310 		hsw_assert_cdclk(i915);
5311 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5312 	} else if (IS_IVYBRIDGE(i915)) {
5313 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5314 	}
5315 
5316 	/*
5317 	 * Keep all power wells enabled for any dependent HW access during
5318 	 * initialization and to make sure we keep BIOS enabled display HW
5319 	 * resources powered until display HW readout is complete. We drop
5320 	 * this reference in intel_power_domains_enable().
5321 	 */
5322 	power_domains->wakeref =
5323 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5324 
5325 	/* Disable power support if the user asked so. */
5326 	if (!i915_modparams.disable_power_well)
5327 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5328 	intel_power_domains_sync_hw(i915);
5329 
5330 	power_domains->initializing = false;
5331 }
5332 
5333 /**
5334  * intel_power_domains_driver_remove - deinitialize hw power domain state
5335  * @i915: i915 device instance
5336  *
5337  * De-initializes the display power domain HW state. It also ensures that the
5338  * device stays powered up so that the driver can be reloaded.
5339  *
5340  * It must be called with power domains already disabled (after a call to
5341  * intel_power_domains_disable()) and must be paired with
5342  * intel_power_domains_init_hw().
5343  */
5344 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
5345 {
5346 	intel_wakeref_t wakeref __maybe_unused =
5347 		fetch_and_zero(&i915->power_domains.wakeref);
5348 
5349 	/* Remove the refcount we took to keep power well support disabled. */
5350 	if (!i915_modparams.disable_power_well)
5351 		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5352 
5353 	intel_display_power_flush_work_sync(i915);
5354 
5355 	intel_power_domains_verify_state(i915);
5356 
5357 	/* Keep the power well enabled, but cancel its rpm wakeref. */
5358 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
5359 }
5360 
5361 /**
5362  * intel_power_domains_enable - enable toggling of display power wells
5363  * @i915: i915 device instance
5364  *
5365  * Enable the ondemand enabling/disabling of the display power wells. Note that
5366  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
5367  * only at specific points of the display modeset sequence, thus they are not
5368  * affected by the intel_power_domains_enable()/disable() calls. The purpose
5369  * of these function is to keep the rest of power wells enabled until the end
5370  * of display HW readout (which will acquire the power references reflecting
5371  * the current HW state).
5372  */
5373 void intel_power_domains_enable(struct drm_i915_private *i915)
5374 {
5375 	intel_wakeref_t wakeref __maybe_unused =
5376 		fetch_and_zero(&i915->power_domains.wakeref);
5377 
5378 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5379 	intel_power_domains_verify_state(i915);
5380 }
5381 
5382 /**
5383  * intel_power_domains_disable - disable toggling of display power wells
5384  * @i915: i915 device instance
5385  *
5386  * Disable the ondemand enabling/disabling of the display power wells. See
5387  * intel_power_domains_enable() for which power wells this call controls.
5388  */
5389 void intel_power_domains_disable(struct drm_i915_private *i915)
5390 {
5391 	struct i915_power_domains *power_domains = &i915->power_domains;
5392 
5393 	drm_WARN_ON(&i915->drm, power_domains->wakeref);
5394 	power_domains->wakeref =
5395 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5396 
5397 	intel_power_domains_verify_state(i915);
5398 }
5399 
5400 /**
5401  * intel_power_domains_suspend - suspend power domain state
5402  * @i915: i915 device instance
5403  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
5404  *
5405  * This function prepares the hardware power domain state before entering
5406  * system suspend.
5407  *
5408  * It must be called with power domains already disabled (after a call to
5409  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
5410  */
5411 void intel_power_domains_suspend(struct drm_i915_private *i915,
5412 				 enum i915_drm_suspend_mode suspend_mode)
5413 {
5414 	struct i915_power_domains *power_domains = &i915->power_domains;
5415 	intel_wakeref_t wakeref __maybe_unused =
5416 		fetch_and_zero(&power_domains->wakeref);
5417 
5418 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5419 
5420 	/*
5421 	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
5422 	 * support don't manually deinit the power domains. This also means the
5423 	 * CSR/DMC firmware will stay active, it will power down any HW
5424 	 * resources as required and also enable deeper system power states
5425 	 * that would be blocked if the firmware was inactive.
5426 	 */
5427 	if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
5428 	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
5429 	    i915->csr.dmc_payload) {
5430 		intel_display_power_flush_work(i915);
5431 		intel_power_domains_verify_state(i915);
5432 		return;
5433 	}
5434 
5435 	/*
5436 	 * Even if power well support was disabled we still want to disable
5437 	 * power wells if power domains must be deinitialized for suspend.
5438 	 */
5439 	if (!i915_modparams.disable_power_well)
5440 		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5441 
5442 	intel_display_power_flush_work(i915);
5443 	intel_power_domains_verify_state(i915);
5444 
5445 	if (INTEL_GEN(i915) >= 11)
5446 		icl_display_core_uninit(i915);
5447 	else if (IS_CANNONLAKE(i915))
5448 		cnl_display_core_uninit(i915);
5449 	else if (IS_GEN9_BC(i915))
5450 		skl_display_core_uninit(i915);
5451 	else if (IS_GEN9_LP(i915))
5452 		bxt_display_core_uninit(i915);
5453 
5454 	power_domains->display_core_suspended = true;
5455 }
5456 
5457 /**
5458  * intel_power_domains_resume - resume power domain state
5459  * @i915: i915 device instance
5460  *
5461  * This function resume the hardware power domain state during system resume.
5462  *
5463  * It will return with power domain support disabled (to be enabled later by
5464  * intel_power_domains_enable()) and must be paired with
5465  * intel_power_domains_suspend().
5466  */
5467 void intel_power_domains_resume(struct drm_i915_private *i915)
5468 {
5469 	struct i915_power_domains *power_domains = &i915->power_domains;
5470 
5471 	if (power_domains->display_core_suspended) {
5472 		intel_power_domains_init_hw(i915, true);
5473 		power_domains->display_core_suspended = false;
5474 	} else {
5475 		drm_WARN_ON(&i915->drm, power_domains->wakeref);
5476 		power_domains->wakeref =
5477 			intel_display_power_get(i915, POWER_DOMAIN_INIT);
5478 	}
5479 
5480 	intel_power_domains_verify_state(i915);
5481 }
5482 
5483 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5484 
5485 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
5486 {
5487 	struct i915_power_domains *power_domains = &i915->power_domains;
5488 	struct i915_power_well *power_well;
5489 
5490 	for_each_power_well(i915, power_well) {
5491 		enum intel_display_power_domain domain;
5492 
5493 		drm_dbg(&i915->drm, "%-25s %d\n",
5494 			power_well->desc->name, power_well->count);
5495 
5496 		for_each_power_domain(domain, power_well->desc->domains)
5497 			drm_dbg(&i915->drm, "  %-23s %d\n",
5498 				intel_display_power_domain_str(domain),
5499 				power_domains->domain_use_count[domain]);
5500 	}
5501 }
5502 
5503 /**
5504  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
5505  * @i915: i915 device instance
5506  *
5507  * Verify if the reference count of each power well matches its HW enabled
5508  * state and the total refcount of the domains it belongs to. This must be
5509  * called after modeset HW state sanitization, which is responsible for
5510  * acquiring reference counts for any power wells in use and disabling the
5511  * ones left on by BIOS but not required by any active output.
5512  */
5513 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5514 {
5515 	struct i915_power_domains *power_domains = &i915->power_domains;
5516 	struct i915_power_well *power_well;
5517 	bool dump_domain_info;
5518 
5519 	mutex_lock(&power_domains->lock);
5520 
5521 	verify_async_put_domains_state(power_domains);
5522 
5523 	dump_domain_info = false;
5524 	for_each_power_well(i915, power_well) {
5525 		enum intel_display_power_domain domain;
5526 		int domains_count;
5527 		bool enabled;
5528 
5529 		enabled = power_well->desc->ops->is_enabled(i915, power_well);
5530 		if ((power_well->count || power_well->desc->always_on) !=
5531 		    enabled)
5532 			drm_err(&i915->drm,
5533 				"power well %s state mismatch (refcount %d/enabled %d)",
5534 				power_well->desc->name,
5535 				power_well->count, enabled);
5536 
5537 		domains_count = 0;
5538 		for_each_power_domain(domain, power_well->desc->domains)
5539 			domains_count += power_domains->domain_use_count[domain];
5540 
5541 		if (power_well->count != domains_count) {
5542 			drm_err(&i915->drm,
5543 				"power well %s refcount/domain refcount mismatch "
5544 				"(refcount %d/domains refcount %d)\n",
5545 				power_well->desc->name, power_well->count,
5546 				domains_count);
5547 			dump_domain_info = true;
5548 		}
5549 	}
5550 
5551 	if (dump_domain_info) {
5552 		static bool dumped;
5553 
5554 		if (!dumped) {
5555 			intel_power_domains_dump_info(i915);
5556 			dumped = true;
5557 		}
5558 	}
5559 
5560 	mutex_unlock(&power_domains->lock);
5561 }
5562 
5563 #else
5564 
5565 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5566 {
5567 }
5568 
5569 #endif
5570 
5571 void intel_display_power_suspend_late(struct drm_i915_private *i915)
5572 {
5573 	if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
5574 		bxt_enable_dc9(i915);
5575 	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
5576 		hsw_enable_pc8(i915);
5577 }
5578 
5579 void intel_display_power_resume_early(struct drm_i915_private *i915)
5580 {
5581 	if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
5582 		gen9_sanitize_dc_state(i915);
5583 		bxt_disable_dc9(i915);
5584 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5585 		hsw_disable_pc8(i915);
5586 	}
5587 }
5588 
5589 void intel_display_power_suspend(struct drm_i915_private *i915)
5590 {
5591 	if (INTEL_GEN(i915) >= 11) {
5592 		icl_display_core_uninit(i915);
5593 		bxt_enable_dc9(i915);
5594 	} else if (IS_GEN9_LP(i915)) {
5595 		bxt_display_core_uninit(i915);
5596 		bxt_enable_dc9(i915);
5597 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5598 		hsw_enable_pc8(i915);
5599 	}
5600 }
5601 
5602 void intel_display_power_resume(struct drm_i915_private *i915)
5603 {
5604 	if (INTEL_GEN(i915) >= 11) {
5605 		bxt_disable_dc9(i915);
5606 		icl_display_core_init(i915, true);
5607 		if (i915->csr.dmc_payload) {
5608 			if (i915->csr.allowed_dc_mask &
5609 			    DC_STATE_EN_UPTO_DC6)
5610 				skl_enable_dc6(i915);
5611 			else if (i915->csr.allowed_dc_mask &
5612 				 DC_STATE_EN_UPTO_DC5)
5613 				gen9_enable_dc5(i915);
5614 		}
5615 	} else if (IS_GEN9_LP(i915)) {
5616 		bxt_disable_dc9(i915);
5617 		bxt_display_core_init(i915, true);
5618 		if (i915->csr.dmc_payload &&
5619 		    (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
5620 			gen9_enable_dc5(i915);
5621 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5622 		hsw_disable_pc8(i915);
5623 	}
5624 }
5625