1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/vgaarb.h>
7 
8 #include "display/intel_crt.h"
9 #include "display/intel_dp.h"
10 
11 #include "i915_drv.h"
12 #include "i915_irq.h"
13 #include "intel_cdclk.h"
14 #include "intel_combo_phy.h"
15 #include "intel_csr.h"
16 #include "intel_dpio_phy.h"
17 #include "intel_drv.h"
18 #include "intel_hotplug.h"
19 #include "intel_sideband.h"
20 #include "intel_tc.h"
21 
22 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
23 					 enum i915_power_well_id power_well_id);
24 
25 const char *
26 intel_display_power_domain_str(struct drm_i915_private *i915,
27 			       enum intel_display_power_domain domain)
28 {
29 	bool ddi_tc_ports = IS_GEN(i915, 12);
30 
31 	switch (domain) {
32 	case POWER_DOMAIN_DISPLAY_CORE:
33 		return "DISPLAY_CORE";
34 	case POWER_DOMAIN_PIPE_A:
35 		return "PIPE_A";
36 	case POWER_DOMAIN_PIPE_B:
37 		return "PIPE_B";
38 	case POWER_DOMAIN_PIPE_C:
39 		return "PIPE_C";
40 	case POWER_DOMAIN_PIPE_D:
41 		return "PIPE_D";
42 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
43 		return "PIPE_A_PANEL_FITTER";
44 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
45 		return "PIPE_B_PANEL_FITTER";
46 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
47 		return "PIPE_C_PANEL_FITTER";
48 	case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
49 		return "PIPE_D_PANEL_FITTER";
50 	case POWER_DOMAIN_TRANSCODER_A:
51 		return "TRANSCODER_A";
52 	case POWER_DOMAIN_TRANSCODER_B:
53 		return "TRANSCODER_B";
54 	case POWER_DOMAIN_TRANSCODER_C:
55 		return "TRANSCODER_C";
56 	case POWER_DOMAIN_TRANSCODER_D:
57 		return "TRANSCODER_D";
58 	case POWER_DOMAIN_TRANSCODER_EDP:
59 		return "TRANSCODER_EDP";
60 	case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
61 		return "TRANSCODER_VDSC_PW2";
62 	case POWER_DOMAIN_TRANSCODER_DSI_A:
63 		return "TRANSCODER_DSI_A";
64 	case POWER_DOMAIN_TRANSCODER_DSI_C:
65 		return "TRANSCODER_DSI_C";
66 	case POWER_DOMAIN_PORT_DDI_A_LANES:
67 		return "PORT_DDI_A_LANES";
68 	case POWER_DOMAIN_PORT_DDI_B_LANES:
69 		return "PORT_DDI_B_LANES";
70 	case POWER_DOMAIN_PORT_DDI_C_LANES:
71 		return "PORT_DDI_C_LANES";
72 	case POWER_DOMAIN_PORT_DDI_D_LANES:
73 		BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_LANES !=
74 			     POWER_DOMAIN_PORT_DDI_TC1_LANES);
75 		return ddi_tc_ports ? "PORT_DDI_TC1_LANES" : "PORT_DDI_D_LANES";
76 	case POWER_DOMAIN_PORT_DDI_E_LANES:
77 		BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_LANES !=
78 			     POWER_DOMAIN_PORT_DDI_TC2_LANES);
79 		return ddi_tc_ports ? "PORT_DDI_TC2_LANES" : "PORT_DDI_E_LANES";
80 	case POWER_DOMAIN_PORT_DDI_F_LANES:
81 		BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_LANES !=
82 			     POWER_DOMAIN_PORT_DDI_TC3_LANES);
83 		return ddi_tc_ports ? "PORT_DDI_TC3_LANES" : "PORT_DDI_F_LANES";
84 	case POWER_DOMAIN_PORT_DDI_TC4_LANES:
85 		return "PORT_DDI_TC4_LANES";
86 	case POWER_DOMAIN_PORT_DDI_TC5_LANES:
87 		return "PORT_DDI_TC5_LANES";
88 	case POWER_DOMAIN_PORT_DDI_TC6_LANES:
89 		return "PORT_DDI_TC6_LANES";
90 	case POWER_DOMAIN_PORT_DDI_A_IO:
91 		return "PORT_DDI_A_IO";
92 	case POWER_DOMAIN_PORT_DDI_B_IO:
93 		return "PORT_DDI_B_IO";
94 	case POWER_DOMAIN_PORT_DDI_C_IO:
95 		return "PORT_DDI_C_IO";
96 	case POWER_DOMAIN_PORT_DDI_D_IO:
97 		BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_IO !=
98 			     POWER_DOMAIN_PORT_DDI_TC1_IO);
99 		return ddi_tc_ports ? "PORT_DDI_TC1_IO" : "PORT_DDI_D_IO";
100 	case POWER_DOMAIN_PORT_DDI_E_IO:
101 		BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_IO !=
102 			     POWER_DOMAIN_PORT_DDI_TC2_IO);
103 		return ddi_tc_ports ? "PORT_DDI_TC2_IO" : "PORT_DDI_E_IO";
104 	case POWER_DOMAIN_PORT_DDI_F_IO:
105 		BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_IO !=
106 			     POWER_DOMAIN_PORT_DDI_TC3_IO);
107 		return ddi_tc_ports ? "PORT_DDI_TC3_IO" : "PORT_DDI_F_IO";
108 	case POWER_DOMAIN_PORT_DDI_TC4_IO:
109 		return "PORT_DDI_TC4_IO";
110 	case POWER_DOMAIN_PORT_DDI_TC5_IO:
111 		return "PORT_DDI_TC5_IO";
112 	case POWER_DOMAIN_PORT_DDI_TC6_IO:
113 		return "PORT_DDI_TC6_IO";
114 	case POWER_DOMAIN_PORT_DSI:
115 		return "PORT_DSI";
116 	case POWER_DOMAIN_PORT_CRT:
117 		return "PORT_CRT";
118 	case POWER_DOMAIN_PORT_OTHER:
119 		return "PORT_OTHER";
120 	case POWER_DOMAIN_VGA:
121 		return "VGA";
122 	case POWER_DOMAIN_AUDIO:
123 		return "AUDIO";
124 	case POWER_DOMAIN_AUX_A:
125 		return "AUX_A";
126 	case POWER_DOMAIN_AUX_B:
127 		return "AUX_B";
128 	case POWER_DOMAIN_AUX_C:
129 		return "AUX_C";
130 	case POWER_DOMAIN_AUX_D:
131 		BUILD_BUG_ON(POWER_DOMAIN_AUX_D != POWER_DOMAIN_AUX_TC1);
132 		return ddi_tc_ports ? "AUX_TC1" : "AUX_D";
133 	case POWER_DOMAIN_AUX_E:
134 		BUILD_BUG_ON(POWER_DOMAIN_AUX_E != POWER_DOMAIN_AUX_TC2);
135 		return ddi_tc_ports ? "AUX_TC2" : "AUX_E";
136 	case POWER_DOMAIN_AUX_F:
137 		BUILD_BUG_ON(POWER_DOMAIN_AUX_F != POWER_DOMAIN_AUX_TC3);
138 		return ddi_tc_ports ? "AUX_TC3" : "AUX_F";
139 	case POWER_DOMAIN_AUX_TC4:
140 		return "AUX_TC4";
141 	case POWER_DOMAIN_AUX_TC5:
142 		return "AUX_TC5";
143 	case POWER_DOMAIN_AUX_TC6:
144 		return "AUX_TC6";
145 	case POWER_DOMAIN_AUX_IO_A:
146 		return "AUX_IO_A";
147 	case POWER_DOMAIN_AUX_TBT1:
148 		return "AUX_TBT1";
149 	case POWER_DOMAIN_AUX_TBT2:
150 		return "AUX_TBT2";
151 	case POWER_DOMAIN_AUX_TBT3:
152 		return "AUX_TBT3";
153 	case POWER_DOMAIN_AUX_TBT4:
154 		return "AUX_TBT4";
155 	case POWER_DOMAIN_AUX_TBT5:
156 		return "AUX_TBT5";
157 	case POWER_DOMAIN_AUX_TBT6:
158 		return "AUX_TBT6";
159 	case POWER_DOMAIN_GMBUS:
160 		return "GMBUS";
161 	case POWER_DOMAIN_INIT:
162 		return "INIT";
163 	case POWER_DOMAIN_MODESET:
164 		return "MODESET";
165 	case POWER_DOMAIN_GT_IRQ:
166 		return "GT_IRQ";
167 	case POWER_DOMAIN_DPLL_DC_OFF:
168 		return "DPLL_DC_OFF";
169 	default:
170 		MISSING_CASE(domain);
171 		return "?";
172 	}
173 }
174 
175 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
176 				    struct i915_power_well *power_well)
177 {
178 	DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
179 	power_well->desc->ops->enable(dev_priv, power_well);
180 	power_well->hw_enabled = true;
181 }
182 
183 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
184 				     struct i915_power_well *power_well)
185 {
186 	DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
187 	power_well->hw_enabled = false;
188 	power_well->desc->ops->disable(dev_priv, power_well);
189 }
190 
191 static void intel_power_well_get(struct drm_i915_private *dev_priv,
192 				 struct i915_power_well *power_well)
193 {
194 	if (!power_well->count++)
195 		intel_power_well_enable(dev_priv, power_well);
196 }
197 
198 static void intel_power_well_put(struct drm_i915_private *dev_priv,
199 				 struct i915_power_well *power_well)
200 {
201 	WARN(!power_well->count, "Use count on power well %s is already zero",
202 	     power_well->desc->name);
203 
204 	if (!--power_well->count)
205 		intel_power_well_disable(dev_priv, power_well);
206 }
207 
208 /**
209  * __intel_display_power_is_enabled - unlocked check for a power domain
210  * @dev_priv: i915 device instance
211  * @domain: power domain to check
212  *
213  * This is the unlocked version of intel_display_power_is_enabled() and should
214  * only be used from error capture and recovery code where deadlocks are
215  * possible.
216  *
217  * Returns:
218  * True when the power domain is enabled, false otherwise.
219  */
220 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
221 				      enum intel_display_power_domain domain)
222 {
223 	struct i915_power_well *power_well;
224 	bool is_enabled;
225 
226 	if (dev_priv->runtime_pm.suspended)
227 		return false;
228 
229 	is_enabled = true;
230 
231 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
232 		if (power_well->desc->always_on)
233 			continue;
234 
235 		if (!power_well->hw_enabled) {
236 			is_enabled = false;
237 			break;
238 		}
239 	}
240 
241 	return is_enabled;
242 }
243 
244 /**
245  * intel_display_power_is_enabled - check for a power domain
246  * @dev_priv: i915 device instance
247  * @domain: power domain to check
248  *
249  * This function can be used to check the hw power domain state. It is mostly
250  * used in hardware state readout functions. Everywhere else code should rely
251  * upon explicit power domain reference counting to ensure that the hardware
252  * block is powered up before accessing it.
253  *
254  * Callers must hold the relevant modesetting locks to ensure that concurrent
255  * threads can't disable the power well while the caller tries to read a few
256  * registers.
257  *
258  * Returns:
259  * True when the power domain is enabled, false otherwise.
260  */
261 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
262 				    enum intel_display_power_domain domain)
263 {
264 	struct i915_power_domains *power_domains;
265 	bool ret;
266 
267 	power_domains = &dev_priv->power_domains;
268 
269 	mutex_lock(&power_domains->lock);
270 	ret = __intel_display_power_is_enabled(dev_priv, domain);
271 	mutex_unlock(&power_domains->lock);
272 
273 	return ret;
274 }
275 
276 /*
277  * Starting with Haswell, we have a "Power Down Well" that can be turned off
278  * when not needed anymore. We have 4 registers that can request the power well
279  * to be enabled, and it will only be disabled if none of the registers is
280  * requesting it to be enabled.
281  */
282 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
283 				       u8 irq_pipe_mask, bool has_vga)
284 {
285 	struct pci_dev *pdev = dev_priv->drm.pdev;
286 
287 	/*
288 	 * After we re-enable the power well, if we touch VGA register 0x3d5
289 	 * we'll get unclaimed register interrupts. This stops after we write
290 	 * anything to the VGA MSR register. The vgacon module uses this
291 	 * register all the time, so if we unbind our driver and, as a
292 	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
293 	 * console_unlock(). So make here we touch the VGA MSR register, making
294 	 * sure vgacon can keep working normally without triggering interrupts
295 	 * and error messages.
296 	 */
297 	if (has_vga) {
298 		vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
299 		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
300 		vga_put(pdev, VGA_RSRC_LEGACY_IO);
301 	}
302 
303 	if (irq_pipe_mask)
304 		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
305 }
306 
307 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
308 				       u8 irq_pipe_mask)
309 {
310 	if (irq_pipe_mask)
311 		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
312 }
313 
314 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
315 					   struct i915_power_well *power_well)
316 {
317 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
318 	int pw_idx = power_well->desc->hsw.idx;
319 
320 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
321 	if (intel_wait_for_register(&dev_priv->uncore,
322 				    regs->driver,
323 				    HSW_PWR_WELL_CTL_STATE(pw_idx),
324 				    HSW_PWR_WELL_CTL_STATE(pw_idx),
325 				    1)) {
326 		DRM_DEBUG_KMS("%s power well enable timeout\n",
327 			      power_well->desc->name);
328 
329 		/* An AUX timeout is expected if the TBT DP tunnel is down. */
330 		WARN_ON(!power_well->desc->hsw.is_tc_tbt);
331 	}
332 }
333 
334 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
335 				     const struct i915_power_well_regs *regs,
336 				     int pw_idx)
337 {
338 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
339 	u32 ret;
340 
341 	ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
342 	ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
343 	if (regs->kvmr.reg)
344 		ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
345 	ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
346 
347 	return ret;
348 }
349 
350 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
351 					    struct i915_power_well *power_well)
352 {
353 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
354 	int pw_idx = power_well->desc->hsw.idx;
355 	bool disabled;
356 	u32 reqs;
357 
358 	/*
359 	 * Bspec doesn't require waiting for PWs to get disabled, but still do
360 	 * this for paranoia. The known cases where a PW will be forced on:
361 	 * - a KVMR request on any power well via the KVMR request register
362 	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
363 	 *   DEBUG request registers
364 	 * Skip the wait in case any of the request bits are set and print a
365 	 * diagnostic message.
366 	 */
367 	wait_for((disabled = !(I915_READ(regs->driver) &
368 			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
369 		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
370 	if (disabled)
371 		return;
372 
373 	DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
374 		      power_well->desc->name,
375 		      !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
376 }
377 
378 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
379 					   enum skl_power_gate pg)
380 {
381 	/* Timeout 5us for PG#0, for other PGs 1us */
382 	WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
383 					SKL_FUSE_PG_DIST_STATUS(pg),
384 					SKL_FUSE_PG_DIST_STATUS(pg), 1));
385 }
386 
387 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
388 				  struct i915_power_well *power_well)
389 {
390 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
391 	int pw_idx = power_well->desc->hsw.idx;
392 	bool wait_fuses = power_well->desc->hsw.has_fuses;
393 	enum skl_power_gate uninitialized_var(pg);
394 	u32 val;
395 
396 	if (wait_fuses) {
397 		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
398 						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
399 		/*
400 		 * For PW1 we have to wait both for the PW0/PG0 fuse state
401 		 * before enabling the power well and PW1/PG1's own fuse
402 		 * state after the enabling. For all other power wells with
403 		 * fuses we only have to wait for that PW/PG's fuse state
404 		 * after the enabling.
405 		 */
406 		if (pg == SKL_PG1)
407 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
408 	}
409 
410 	val = I915_READ(regs->driver);
411 	I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
412 	hsw_wait_for_power_well_enable(dev_priv, power_well);
413 
414 	/* Display WA #1178: cnl */
415 	if (IS_CANNONLAKE(dev_priv) &&
416 	    pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
417 	    pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
418 		val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
419 		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
420 		I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
421 	}
422 
423 	if (wait_fuses)
424 		gen9_wait_for_power_well_fuses(dev_priv, pg);
425 
426 	hsw_power_well_post_enable(dev_priv,
427 				   power_well->desc->hsw.irq_pipe_mask,
428 				   power_well->desc->hsw.has_vga);
429 }
430 
431 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
432 				   struct i915_power_well *power_well)
433 {
434 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
435 	int pw_idx = power_well->desc->hsw.idx;
436 	u32 val;
437 
438 	hsw_power_well_pre_disable(dev_priv,
439 				   power_well->desc->hsw.irq_pipe_mask);
440 
441 	val = I915_READ(regs->driver);
442 	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
443 	hsw_wait_for_power_well_disable(dev_priv, power_well);
444 }
445 
446 #define ICL_AUX_PW_TO_PHY(pw_idx)	((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
447 
448 static void
449 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
450 				    struct i915_power_well *power_well)
451 {
452 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
453 	int pw_idx = power_well->desc->hsw.idx;
454 	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
455 	u32 val;
456 	int wa_idx_max;
457 
458 	val = I915_READ(regs->driver);
459 	I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
460 
461 	if (INTEL_GEN(dev_priv) < 12) {
462 		val = I915_READ(ICL_PORT_CL_DW12(phy));
463 		I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX);
464 	}
465 
466 	hsw_wait_for_power_well_enable(dev_priv, power_well);
467 
468 	/* Display WA #1178: icl, tgl */
469 	if (IS_TIGERLAKE(dev_priv))
470 		wa_idx_max = ICL_PW_CTL_IDX_AUX_C;
471 	else
472 		wa_idx_max = ICL_PW_CTL_IDX_AUX_B;
473 
474 	if (!IS_ELKHARTLAKE(dev_priv) &&
475 	    pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= wa_idx_max &&
476 	    !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
477 		val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
478 		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
479 		I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
480 	}
481 }
482 
483 static void
484 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
485 				     struct i915_power_well *power_well)
486 {
487 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
488 	int pw_idx = power_well->desc->hsw.idx;
489 	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
490 	u32 val;
491 
492 	if (INTEL_GEN(dev_priv) < 12) {
493 		val = I915_READ(ICL_PORT_CL_DW12(phy));
494 		I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
495 	}
496 
497 	val = I915_READ(regs->driver);
498 	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
499 
500 	hsw_wait_for_power_well_disable(dev_priv, power_well);
501 }
502 
503 #define ICL_AUX_PW_TO_CH(pw_idx)	\
504 	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
505 
506 #define ICL_TBT_AUX_PW_TO_CH(pw_idx)	\
507 	((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
508 
509 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
510 				     struct i915_power_well *power_well)
511 {
512 	int pw_idx = power_well->desc->hsw.idx;
513 
514 	return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
515 						 ICL_AUX_PW_TO_CH(pw_idx);
516 }
517 
518 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
519 
520 static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
521 
522 static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
523 				      struct i915_power_well *power_well)
524 {
525 	int refs = hweight64(power_well->desc->domains &
526 			     async_put_domains_mask(&dev_priv->power_domains));
527 
528 	WARN_ON(refs > power_well->count);
529 
530 	return refs;
531 }
532 
533 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
534 					struct i915_power_well *power_well)
535 {
536 	enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
537 	struct intel_digital_port *dig_port = NULL;
538 	struct intel_encoder *encoder;
539 
540 	/* Bypass the check if all references are released asynchronously */
541 	if (power_well_async_ref_count(dev_priv, power_well) ==
542 	    power_well->count)
543 		return;
544 
545 	aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
546 
547 	for_each_intel_encoder(&dev_priv->drm, encoder) {
548 		enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
549 
550 		if (!intel_phy_is_tc(dev_priv, phy))
551 			continue;
552 
553 		/* We'll check the MST primary port */
554 		if (encoder->type == INTEL_OUTPUT_DP_MST)
555 			continue;
556 
557 		dig_port = enc_to_dig_port(&encoder->base);
558 		if (WARN_ON(!dig_port))
559 			continue;
560 
561 		if (dig_port->aux_ch != aux_ch) {
562 			dig_port = NULL;
563 			continue;
564 		}
565 
566 		break;
567 	}
568 
569 	if (WARN_ON(!dig_port))
570 		return;
571 
572 	WARN_ON(!intel_tc_port_ref_held(dig_port));
573 }
574 
575 #else
576 
577 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
578 					struct i915_power_well *power_well)
579 {
580 }
581 
582 #endif
583 
584 static void
585 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
586 				 struct i915_power_well *power_well)
587 {
588 	enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
589 	u32 val;
590 
591 	icl_tc_port_assert_ref_held(dev_priv, power_well);
592 
593 	val = I915_READ(DP_AUX_CH_CTL(aux_ch));
594 	val &= ~DP_AUX_CH_CTL_TBT_IO;
595 	if (power_well->desc->hsw.is_tc_tbt)
596 		val |= DP_AUX_CH_CTL_TBT_IO;
597 	I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
598 
599 	hsw_power_well_enable(dev_priv, power_well);
600 }
601 
602 static void
603 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
604 				  struct i915_power_well *power_well)
605 {
606 	icl_tc_port_assert_ref_held(dev_priv, power_well);
607 
608 	hsw_power_well_disable(dev_priv, power_well);
609 }
610 
611 /*
612  * We should only use the power well if we explicitly asked the hardware to
613  * enable it, so check if it's enabled and also check if we've requested it to
614  * be enabled.
615  */
616 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
617 				   struct i915_power_well *power_well)
618 {
619 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
620 	enum i915_power_well_id id = power_well->desc->id;
621 	int pw_idx = power_well->desc->hsw.idx;
622 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
623 		   HSW_PWR_WELL_CTL_STATE(pw_idx);
624 	u32 val;
625 
626 	val = I915_READ(regs->driver);
627 
628 	/*
629 	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
630 	 * and the MISC_IO PW will be not restored, so check instead for the
631 	 * BIOS's own request bits, which are forced-on for these power wells
632 	 * when exiting DC5/6.
633 	 */
634 	if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
635 	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
636 		val |= I915_READ(regs->bios);
637 
638 	return (val & mask) == mask;
639 }
640 
641 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
642 {
643 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
644 		  "DC9 already programmed to be enabled.\n");
645 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
646 		  "DC5 still not disabled to enable DC9.\n");
647 	WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
648 		  HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
649 		  "Power well 2 on.\n");
650 	WARN_ONCE(intel_irqs_enabled(dev_priv),
651 		  "Interrupts not disabled yet.\n");
652 
653 	 /*
654 	  * TODO: check for the following to verify the conditions to enter DC9
655 	  * state are satisfied:
656 	  * 1] Check relevant display engine registers to verify if mode set
657 	  * disable sequence was followed.
658 	  * 2] Check if display uninitialize sequence is initialized.
659 	  */
660 }
661 
662 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
663 {
664 	WARN_ONCE(intel_irqs_enabled(dev_priv),
665 		  "Interrupts not disabled yet.\n");
666 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
667 		  "DC5 still not disabled.\n");
668 
669 	 /*
670 	  * TODO: check for the following to verify DC9 state was indeed
671 	  * entered before programming to disable it:
672 	  * 1] Check relevant display engine registers to verify if mode
673 	  *  set disable sequence was followed.
674 	  * 2] Check if display uninitialize sequence is initialized.
675 	  */
676 }
677 
678 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
679 				u32 state)
680 {
681 	int rewrites = 0;
682 	int rereads = 0;
683 	u32 v;
684 
685 	I915_WRITE(DC_STATE_EN, state);
686 
687 	/* It has been observed that disabling the dc6 state sometimes
688 	 * doesn't stick and dmc keeps returning old value. Make sure
689 	 * the write really sticks enough times and also force rewrite until
690 	 * we are confident that state is exactly what we want.
691 	 */
692 	do  {
693 		v = I915_READ(DC_STATE_EN);
694 
695 		if (v != state) {
696 			I915_WRITE(DC_STATE_EN, state);
697 			rewrites++;
698 			rereads = 0;
699 		} else if (rereads++ > 5) {
700 			break;
701 		}
702 
703 	} while (rewrites < 100);
704 
705 	if (v != state)
706 		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
707 			  state, v);
708 
709 	/* Most of the times we need one retry, avoid spam */
710 	if (rewrites > 1)
711 		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
712 			      state, rewrites);
713 }
714 
715 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
716 {
717 	u32 mask;
718 
719 	mask = DC_STATE_EN_UPTO_DC5;
720 	if (INTEL_GEN(dev_priv) >= 11)
721 		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
722 	else if (IS_GEN9_LP(dev_priv))
723 		mask |= DC_STATE_EN_DC9;
724 	else
725 		mask |= DC_STATE_EN_UPTO_DC6;
726 
727 	return mask;
728 }
729 
730 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
731 {
732 	u32 val;
733 
734 	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
735 
736 	DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
737 		      dev_priv->csr.dc_state, val);
738 	dev_priv->csr.dc_state = val;
739 }
740 
741 /**
742  * gen9_set_dc_state - set target display C power state
743  * @dev_priv: i915 device instance
744  * @state: target DC power state
745  * - DC_STATE_DISABLE
746  * - DC_STATE_EN_UPTO_DC5
747  * - DC_STATE_EN_UPTO_DC6
748  * - DC_STATE_EN_DC9
749  *
750  * Signal to DMC firmware/HW the target DC power state passed in @state.
751  * DMC/HW can turn off individual display clocks and power rails when entering
752  * a deeper DC power state (higher in number) and turns these back when exiting
753  * that state to a shallower power state (lower in number). The HW will decide
754  * when to actually enter a given state on an on-demand basis, for instance
755  * depending on the active state of display pipes. The state of display
756  * registers backed by affected power rails are saved/restored as needed.
757  *
758  * Based on the above enabling a deeper DC power state is asynchronous wrt.
759  * enabling it. Disabling a deeper power state is synchronous: for instance
760  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
761  * back on and register state is restored. This is guaranteed by the MMIO write
762  * to DC_STATE_EN blocking until the state is restored.
763  */
764 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
765 {
766 	u32 val;
767 	u32 mask;
768 
769 	if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
770 		state &= dev_priv->csr.allowed_dc_mask;
771 
772 	val = I915_READ(DC_STATE_EN);
773 	mask = gen9_dc_mask(dev_priv);
774 	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
775 		      val & mask, state);
776 
777 	/* Check if DMC is ignoring our DC state requests */
778 	if ((val & mask) != dev_priv->csr.dc_state)
779 		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
780 			  dev_priv->csr.dc_state, val & mask);
781 
782 	val &= ~mask;
783 	val |= state;
784 
785 	gen9_write_dc_state(dev_priv, val);
786 
787 	dev_priv->csr.dc_state = val & mask;
788 }
789 
790 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
791 {
792 	assert_can_enable_dc9(dev_priv);
793 
794 	DRM_DEBUG_KMS("Enabling DC9\n");
795 	/*
796 	 * Power sequencer reset is not needed on
797 	 * platforms with South Display Engine on PCH,
798 	 * because PPS registers are always on.
799 	 */
800 	if (!HAS_PCH_SPLIT(dev_priv))
801 		intel_power_sequencer_reset(dev_priv);
802 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
803 }
804 
805 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
806 {
807 	assert_can_disable_dc9(dev_priv);
808 
809 	DRM_DEBUG_KMS("Disabling DC9\n");
810 
811 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
812 
813 	intel_pps_unlock_regs_wa(dev_priv);
814 }
815 
816 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
817 {
818 	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
819 		  "CSR program storage start is NULL\n");
820 	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
821 	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
822 }
823 
824 static struct i915_power_well *
825 lookup_power_well(struct drm_i915_private *dev_priv,
826 		  enum i915_power_well_id power_well_id)
827 {
828 	struct i915_power_well *power_well;
829 
830 	for_each_power_well(dev_priv, power_well)
831 		if (power_well->desc->id == power_well_id)
832 			return power_well;
833 
834 	/*
835 	 * It's not feasible to add error checking code to the callers since
836 	 * this condition really shouldn't happen and it doesn't even make sense
837 	 * to abort things like display initialization sequences. Just return
838 	 * the first power well and hope the WARN gets reported so we can fix
839 	 * our driver.
840 	 */
841 	WARN(1, "Power well %d not defined for this platform\n", power_well_id);
842 	return &dev_priv->power_domains.power_wells[0];
843 }
844 
845 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
846 {
847 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
848 					SKL_DISP_PW_2);
849 
850 	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
851 
852 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
853 		  "DC5 already programmed to be enabled.\n");
854 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
855 
856 	assert_csr_loaded(dev_priv);
857 }
858 
859 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
860 {
861 	assert_can_enable_dc5(dev_priv);
862 
863 	DRM_DEBUG_KMS("Enabling DC5\n");
864 
865 	/* Wa Display #1183: skl,kbl,cfl */
866 	if (IS_GEN9_BC(dev_priv))
867 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
868 			   SKL_SELECT_ALTERNATE_DC_EXIT);
869 
870 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
871 }
872 
873 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
874 {
875 	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
876 		  "Backlight is not disabled.\n");
877 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
878 		  "DC6 already programmed to be enabled.\n");
879 
880 	assert_csr_loaded(dev_priv);
881 }
882 
883 void skl_enable_dc6(struct drm_i915_private *dev_priv)
884 {
885 	assert_can_enable_dc6(dev_priv);
886 
887 	DRM_DEBUG_KMS("Enabling DC6\n");
888 
889 	/* Wa Display #1183: skl,kbl,cfl */
890 	if (IS_GEN9_BC(dev_priv))
891 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
892 			   SKL_SELECT_ALTERNATE_DC_EXIT);
893 
894 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
895 }
896 
897 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
898 				   struct i915_power_well *power_well)
899 {
900 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
901 	int pw_idx = power_well->desc->hsw.idx;
902 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
903 	u32 bios_req = I915_READ(regs->bios);
904 
905 	/* Take over the request bit if set by BIOS. */
906 	if (bios_req & mask) {
907 		u32 drv_req = I915_READ(regs->driver);
908 
909 		if (!(drv_req & mask))
910 			I915_WRITE(regs->driver, drv_req | mask);
911 		I915_WRITE(regs->bios, bios_req & ~mask);
912 	}
913 }
914 
915 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
916 					   struct i915_power_well *power_well)
917 {
918 	bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
919 }
920 
921 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
922 					    struct i915_power_well *power_well)
923 {
924 	bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
925 }
926 
927 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
928 					    struct i915_power_well *power_well)
929 {
930 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
931 }
932 
933 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
934 {
935 	struct i915_power_well *power_well;
936 
937 	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
938 	if (power_well->count > 0)
939 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
940 
941 	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
942 	if (power_well->count > 0)
943 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
944 
945 	if (IS_GEMINILAKE(dev_priv)) {
946 		power_well = lookup_power_well(dev_priv,
947 					       GLK_DISP_PW_DPIO_CMN_C);
948 		if (power_well->count > 0)
949 			bxt_ddi_phy_verify_state(dev_priv,
950 						 power_well->desc->bxt.phy);
951 	}
952 }
953 
954 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
955 					   struct i915_power_well *power_well)
956 {
957 	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
958 }
959 
960 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
961 {
962 	u32 tmp = I915_READ(DBUF_CTL);
963 
964 	WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
965 	     (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
966 	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
967 }
968 
969 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
970 					  struct i915_power_well *power_well)
971 {
972 	struct intel_cdclk_state cdclk_state = {};
973 
974 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
975 
976 	dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
977 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
978 	WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
979 
980 	gen9_assert_dbuf_enabled(dev_priv);
981 
982 	if (IS_GEN9_LP(dev_priv))
983 		bxt_verify_ddi_phy_power_wells(dev_priv);
984 
985 	if (INTEL_GEN(dev_priv) >= 11)
986 		/*
987 		 * DMC retains HW context only for port A, the other combo
988 		 * PHY's HW context for port B is lost after DC transitions,
989 		 * so we need to restore it manually.
990 		 */
991 		intel_combo_phy_init(dev_priv);
992 }
993 
994 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
995 					   struct i915_power_well *power_well)
996 {
997 	if (!dev_priv->csr.dmc_payload)
998 		return;
999 
1000 	if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
1001 		skl_enable_dc6(dev_priv);
1002 	else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
1003 		gen9_enable_dc5(dev_priv);
1004 }
1005 
1006 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1007 					 struct i915_power_well *power_well)
1008 {
1009 }
1010 
1011 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1012 					   struct i915_power_well *power_well)
1013 {
1014 }
1015 
1016 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1017 					     struct i915_power_well *power_well)
1018 {
1019 	return true;
1020 }
1021 
1022 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1023 					 struct i915_power_well *power_well)
1024 {
1025 	if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1026 		i830_enable_pipe(dev_priv, PIPE_A);
1027 	if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1028 		i830_enable_pipe(dev_priv, PIPE_B);
1029 }
1030 
1031 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1032 					  struct i915_power_well *power_well)
1033 {
1034 	i830_disable_pipe(dev_priv, PIPE_B);
1035 	i830_disable_pipe(dev_priv, PIPE_A);
1036 }
1037 
1038 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1039 					  struct i915_power_well *power_well)
1040 {
1041 	return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1042 		I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1043 }
1044 
1045 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1046 					  struct i915_power_well *power_well)
1047 {
1048 	if (power_well->count > 0)
1049 		i830_pipes_power_well_enable(dev_priv, power_well);
1050 	else
1051 		i830_pipes_power_well_disable(dev_priv, power_well);
1052 }
1053 
1054 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1055 			       struct i915_power_well *power_well, bool enable)
1056 {
1057 	int pw_idx = power_well->desc->vlv.idx;
1058 	u32 mask;
1059 	u32 state;
1060 	u32 ctrl;
1061 
1062 	mask = PUNIT_PWRGT_MASK(pw_idx);
1063 	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1064 			 PUNIT_PWRGT_PWR_GATE(pw_idx);
1065 
1066 	vlv_punit_get(dev_priv);
1067 
1068 #define COND \
1069 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1070 
1071 	if (COND)
1072 		goto out;
1073 
1074 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1075 	ctrl &= ~mask;
1076 	ctrl |= state;
1077 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1078 
1079 	if (wait_for(COND, 100))
1080 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1081 			  state,
1082 			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1083 
1084 #undef COND
1085 
1086 out:
1087 	vlv_punit_put(dev_priv);
1088 }
1089 
1090 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1091 				  struct i915_power_well *power_well)
1092 {
1093 	vlv_set_power_well(dev_priv, power_well, true);
1094 }
1095 
1096 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1097 				   struct i915_power_well *power_well)
1098 {
1099 	vlv_set_power_well(dev_priv, power_well, false);
1100 }
1101 
1102 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1103 				   struct i915_power_well *power_well)
1104 {
1105 	int pw_idx = power_well->desc->vlv.idx;
1106 	bool enabled = false;
1107 	u32 mask;
1108 	u32 state;
1109 	u32 ctrl;
1110 
1111 	mask = PUNIT_PWRGT_MASK(pw_idx);
1112 	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1113 
1114 	vlv_punit_get(dev_priv);
1115 
1116 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1117 	/*
1118 	 * We only ever set the power-on and power-gate states, anything
1119 	 * else is unexpected.
1120 	 */
1121 	WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1122 		state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1123 	if (state == ctrl)
1124 		enabled = true;
1125 
1126 	/*
1127 	 * A transient state at this point would mean some unexpected party
1128 	 * is poking at the power controls too.
1129 	 */
1130 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1131 	WARN_ON(ctrl != state);
1132 
1133 	vlv_punit_put(dev_priv);
1134 
1135 	return enabled;
1136 }
1137 
1138 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1139 {
1140 	u32 val;
1141 
1142 	/*
1143 	 * On driver load, a pipe may be active and driving a DSI display.
1144 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1145 	 * (and never recovering) in this case. intel_dsi_post_disable() will
1146 	 * clear it when we turn off the display.
1147 	 */
1148 	val = I915_READ(DSPCLK_GATE_D);
1149 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1150 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1151 	I915_WRITE(DSPCLK_GATE_D, val);
1152 
1153 	/*
1154 	 * Disable trickle feed and enable pnd deadline calculation
1155 	 */
1156 	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1157 	I915_WRITE(CBR1_VLV, 0);
1158 
1159 	WARN_ON(dev_priv->rawclk_freq == 0);
1160 
1161 	I915_WRITE(RAWCLK_FREQ_VLV,
1162 		   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1163 }
1164 
1165 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1166 {
1167 	struct intel_encoder *encoder;
1168 	enum pipe pipe;
1169 
1170 	/*
1171 	 * Enable the CRI clock source so we can get at the
1172 	 * display and the reference clock for VGA
1173 	 * hotplug / manual detection. Supposedly DSI also
1174 	 * needs the ref clock up and running.
1175 	 *
1176 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1177 	 */
1178 	for_each_pipe(dev_priv, pipe) {
1179 		u32 val = I915_READ(DPLL(pipe));
1180 
1181 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1182 		if (pipe != PIPE_A)
1183 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1184 
1185 		I915_WRITE(DPLL(pipe), val);
1186 	}
1187 
1188 	vlv_init_display_clock_gating(dev_priv);
1189 
1190 	spin_lock_irq(&dev_priv->irq_lock);
1191 	valleyview_enable_display_irqs(dev_priv);
1192 	spin_unlock_irq(&dev_priv->irq_lock);
1193 
1194 	/*
1195 	 * During driver initialization/resume we can avoid restoring the
1196 	 * part of the HW/SW state that will be inited anyway explicitly.
1197 	 */
1198 	if (dev_priv->power_domains.initializing)
1199 		return;
1200 
1201 	intel_hpd_init(dev_priv);
1202 
1203 	/* Re-enable the ADPA, if we have one */
1204 	for_each_intel_encoder(&dev_priv->drm, encoder) {
1205 		if (encoder->type == INTEL_OUTPUT_ANALOG)
1206 			intel_crt_reset(&encoder->base);
1207 	}
1208 
1209 	i915_redisable_vga_power_on(dev_priv);
1210 
1211 	intel_pps_unlock_regs_wa(dev_priv);
1212 }
1213 
1214 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1215 {
1216 	spin_lock_irq(&dev_priv->irq_lock);
1217 	valleyview_disable_display_irqs(dev_priv);
1218 	spin_unlock_irq(&dev_priv->irq_lock);
1219 
1220 	/* make sure we're done processing display irqs */
1221 	intel_synchronize_irq(dev_priv);
1222 
1223 	intel_power_sequencer_reset(dev_priv);
1224 
1225 	/* Prevent us from re-enabling polling on accident in late suspend */
1226 	if (!dev_priv->drm.dev->power.is_suspended)
1227 		intel_hpd_poll_init(dev_priv);
1228 }
1229 
1230 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1231 					  struct i915_power_well *power_well)
1232 {
1233 	vlv_set_power_well(dev_priv, power_well, true);
1234 
1235 	vlv_display_power_well_init(dev_priv);
1236 }
1237 
1238 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1239 					   struct i915_power_well *power_well)
1240 {
1241 	vlv_display_power_well_deinit(dev_priv);
1242 
1243 	vlv_set_power_well(dev_priv, power_well, false);
1244 }
1245 
1246 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1247 					   struct i915_power_well *power_well)
1248 {
1249 	/* since ref/cri clock was enabled */
1250 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1251 
1252 	vlv_set_power_well(dev_priv, power_well, true);
1253 
1254 	/*
1255 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1256 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1257 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1258 	 *   b.	The other bits such as sfr settings / modesel may all
1259 	 *	be set to 0.
1260 	 *
1261 	 * This should only be done on init and resume from S3 with
1262 	 * both PLLs disabled, or we risk losing DPIO and PLL
1263 	 * synchronization.
1264 	 */
1265 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1266 }
1267 
1268 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1269 					    struct i915_power_well *power_well)
1270 {
1271 	enum pipe pipe;
1272 
1273 	for_each_pipe(dev_priv, pipe)
1274 		assert_pll_disabled(dev_priv, pipe);
1275 
1276 	/* Assert common reset */
1277 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1278 
1279 	vlv_set_power_well(dev_priv, power_well, false);
1280 }
1281 
1282 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1283 
1284 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1285 
1286 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1287 {
1288 	struct i915_power_well *cmn_bc =
1289 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1290 	struct i915_power_well *cmn_d =
1291 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1292 	u32 phy_control = dev_priv->chv_phy_control;
1293 	u32 phy_status = 0;
1294 	u32 phy_status_mask = 0xffffffff;
1295 
1296 	/*
1297 	 * The BIOS can leave the PHY is some weird state
1298 	 * where it doesn't fully power down some parts.
1299 	 * Disable the asserts until the PHY has been fully
1300 	 * reset (ie. the power well has been disabled at
1301 	 * least once).
1302 	 */
1303 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1304 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1305 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1306 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1307 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1308 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1309 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1310 
1311 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1312 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1313 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1314 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1315 
1316 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1317 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1318 
1319 		/* this assumes override is only used to enable lanes */
1320 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1321 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1322 
1323 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1324 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1325 
1326 		/* CL1 is on whenever anything is on in either channel */
1327 		if (BITS_SET(phy_control,
1328 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1329 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1330 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1331 
1332 		/*
1333 		 * The DPLLB check accounts for the pipe B + port A usage
1334 		 * with CL2 powered up but all the lanes in the second channel
1335 		 * powered down.
1336 		 */
1337 		if (BITS_SET(phy_control,
1338 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1339 		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1340 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1341 
1342 		if (BITS_SET(phy_control,
1343 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1344 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1345 		if (BITS_SET(phy_control,
1346 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1347 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1348 
1349 		if (BITS_SET(phy_control,
1350 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1351 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1352 		if (BITS_SET(phy_control,
1353 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1354 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1355 	}
1356 
1357 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1358 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1359 
1360 		/* this assumes override is only used to enable lanes */
1361 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1362 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1363 
1364 		if (BITS_SET(phy_control,
1365 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1366 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1367 
1368 		if (BITS_SET(phy_control,
1369 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1370 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1371 		if (BITS_SET(phy_control,
1372 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1373 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1374 	}
1375 
1376 	phy_status &= phy_status_mask;
1377 
1378 	/*
1379 	 * The PHY may be busy with some initial calibration and whatnot,
1380 	 * so the power state can take a while to actually change.
1381 	 */
1382 	if (intel_wait_for_register(&dev_priv->uncore,
1383 				    DISPLAY_PHY_STATUS,
1384 				    phy_status_mask,
1385 				    phy_status,
1386 				    10))
1387 		DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1388 			  I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1389 			   phy_status, dev_priv->chv_phy_control);
1390 }
1391 
1392 #undef BITS_SET
1393 
1394 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1395 					   struct i915_power_well *power_well)
1396 {
1397 	enum dpio_phy phy;
1398 	enum pipe pipe;
1399 	u32 tmp;
1400 
1401 	WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1402 		     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1403 
1404 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1405 		pipe = PIPE_A;
1406 		phy = DPIO_PHY0;
1407 	} else {
1408 		pipe = PIPE_C;
1409 		phy = DPIO_PHY1;
1410 	}
1411 
1412 	/* since ref/cri clock was enabled */
1413 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1414 	vlv_set_power_well(dev_priv, power_well, true);
1415 
1416 	/* Poll for phypwrgood signal */
1417 	if (intel_wait_for_register(&dev_priv->uncore,
1418 				    DISPLAY_PHY_STATUS,
1419 				    PHY_POWERGOOD(phy),
1420 				    PHY_POWERGOOD(phy),
1421 				    1))
1422 		DRM_ERROR("Display PHY %d is not power up\n", phy);
1423 
1424 	vlv_dpio_get(dev_priv);
1425 
1426 	/* Enable dynamic power down */
1427 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1428 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1429 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1430 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1431 
1432 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1433 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1434 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1435 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1436 	} else {
1437 		/*
1438 		 * Force the non-existing CL2 off. BXT does this
1439 		 * too, so maybe it saves some power even though
1440 		 * CL2 doesn't exist?
1441 		 */
1442 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1443 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1444 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1445 	}
1446 
1447 	vlv_dpio_put(dev_priv);
1448 
1449 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1450 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1451 
1452 	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1453 		      phy, dev_priv->chv_phy_control);
1454 
1455 	assert_chv_phy_status(dev_priv);
1456 }
1457 
1458 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1459 					    struct i915_power_well *power_well)
1460 {
1461 	enum dpio_phy phy;
1462 
1463 	WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1464 		     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1465 
1466 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1467 		phy = DPIO_PHY0;
1468 		assert_pll_disabled(dev_priv, PIPE_A);
1469 		assert_pll_disabled(dev_priv, PIPE_B);
1470 	} else {
1471 		phy = DPIO_PHY1;
1472 		assert_pll_disabled(dev_priv, PIPE_C);
1473 	}
1474 
1475 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1476 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1477 
1478 	vlv_set_power_well(dev_priv, power_well, false);
1479 
1480 	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1481 		      phy, dev_priv->chv_phy_control);
1482 
1483 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1484 	dev_priv->chv_phy_assert[phy] = true;
1485 
1486 	assert_chv_phy_status(dev_priv);
1487 }
1488 
1489 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1490 				     enum dpio_channel ch, bool override, unsigned int mask)
1491 {
1492 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1493 	u32 reg, val, expected, actual;
1494 
1495 	/*
1496 	 * The BIOS can leave the PHY is some weird state
1497 	 * where it doesn't fully power down some parts.
1498 	 * Disable the asserts until the PHY has been fully
1499 	 * reset (ie. the power well has been disabled at
1500 	 * least once).
1501 	 */
1502 	if (!dev_priv->chv_phy_assert[phy])
1503 		return;
1504 
1505 	if (ch == DPIO_CH0)
1506 		reg = _CHV_CMN_DW0_CH0;
1507 	else
1508 		reg = _CHV_CMN_DW6_CH1;
1509 
1510 	vlv_dpio_get(dev_priv);
1511 	val = vlv_dpio_read(dev_priv, pipe, reg);
1512 	vlv_dpio_put(dev_priv);
1513 
1514 	/*
1515 	 * This assumes !override is only used when the port is disabled.
1516 	 * All lanes should power down even without the override when
1517 	 * the port is disabled.
1518 	 */
1519 	if (!override || mask == 0xf) {
1520 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1521 		/*
1522 		 * If CH1 common lane is not active anymore
1523 		 * (eg. for pipe B DPLL) the entire channel will
1524 		 * shut down, which causes the common lane registers
1525 		 * to read as 0. That means we can't actually check
1526 		 * the lane power down status bits, but as the entire
1527 		 * register reads as 0 it's a good indication that the
1528 		 * channel is indeed entirely powered down.
1529 		 */
1530 		if (ch == DPIO_CH1 && val == 0)
1531 			expected = 0;
1532 	} else if (mask != 0x0) {
1533 		expected = DPIO_ANYDL_POWERDOWN;
1534 	} else {
1535 		expected = 0;
1536 	}
1537 
1538 	if (ch == DPIO_CH0)
1539 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1540 	else
1541 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1542 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1543 
1544 	WARN(actual != expected,
1545 	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1546 	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1547 	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1548 	     reg, val);
1549 }
1550 
1551 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1552 			  enum dpio_channel ch, bool override)
1553 {
1554 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1555 	bool was_override;
1556 
1557 	mutex_lock(&power_domains->lock);
1558 
1559 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1560 
1561 	if (override == was_override)
1562 		goto out;
1563 
1564 	if (override)
1565 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1566 	else
1567 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1568 
1569 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1570 
1571 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1572 		      phy, ch, dev_priv->chv_phy_control);
1573 
1574 	assert_chv_phy_status(dev_priv);
1575 
1576 out:
1577 	mutex_unlock(&power_domains->lock);
1578 
1579 	return was_override;
1580 }
1581 
1582 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1583 			     bool override, unsigned int mask)
1584 {
1585 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1586 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1587 	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1588 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1589 
1590 	mutex_lock(&power_domains->lock);
1591 
1592 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1593 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1594 
1595 	if (override)
1596 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1597 	else
1598 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1599 
1600 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1601 
1602 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1603 		      phy, ch, mask, dev_priv->chv_phy_control);
1604 
1605 	assert_chv_phy_status(dev_priv);
1606 
1607 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1608 
1609 	mutex_unlock(&power_domains->lock);
1610 }
1611 
1612 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1613 					struct i915_power_well *power_well)
1614 {
1615 	enum pipe pipe = PIPE_A;
1616 	bool enabled;
1617 	u32 state, ctrl;
1618 
1619 	vlv_punit_get(dev_priv);
1620 
1621 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1622 	/*
1623 	 * We only ever set the power-on and power-gate states, anything
1624 	 * else is unexpected.
1625 	 */
1626 	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1627 	enabled = state == DP_SSS_PWR_ON(pipe);
1628 
1629 	/*
1630 	 * A transient state at this point would mean some unexpected party
1631 	 * is poking at the power controls too.
1632 	 */
1633 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1634 	WARN_ON(ctrl << 16 != state);
1635 
1636 	vlv_punit_put(dev_priv);
1637 
1638 	return enabled;
1639 }
1640 
1641 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1642 				    struct i915_power_well *power_well,
1643 				    bool enable)
1644 {
1645 	enum pipe pipe = PIPE_A;
1646 	u32 state;
1647 	u32 ctrl;
1648 
1649 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1650 
1651 	vlv_punit_get(dev_priv);
1652 
1653 #define COND \
1654 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1655 
1656 	if (COND)
1657 		goto out;
1658 
1659 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1660 	ctrl &= ~DP_SSC_MASK(pipe);
1661 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1662 	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1663 
1664 	if (wait_for(COND, 100))
1665 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1666 			  state,
1667 			  vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1668 
1669 #undef COND
1670 
1671 out:
1672 	vlv_punit_put(dev_priv);
1673 }
1674 
1675 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1676 				       struct i915_power_well *power_well)
1677 {
1678 	chv_set_pipe_power_well(dev_priv, power_well, true);
1679 
1680 	vlv_display_power_well_init(dev_priv);
1681 }
1682 
1683 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1684 					struct i915_power_well *power_well)
1685 {
1686 	vlv_display_power_well_deinit(dev_priv);
1687 
1688 	chv_set_pipe_power_well(dev_priv, power_well, false);
1689 }
1690 
1691 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1692 {
1693 	return power_domains->async_put_domains[0] |
1694 	       power_domains->async_put_domains[1];
1695 }
1696 
1697 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1698 
1699 static bool
1700 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1701 {
1702 	return !WARN_ON(power_domains->async_put_domains[0] &
1703 			power_domains->async_put_domains[1]);
1704 }
1705 
1706 static bool
1707 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1708 {
1709 	enum intel_display_power_domain domain;
1710 	bool err = false;
1711 
1712 	err |= !assert_async_put_domain_masks_disjoint(power_domains);
1713 	err |= WARN_ON(!!power_domains->async_put_wakeref !=
1714 		       !!__async_put_domains_mask(power_domains));
1715 
1716 	for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1717 		err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1718 
1719 	return !err;
1720 }
1721 
1722 static void print_power_domains(struct i915_power_domains *power_domains,
1723 				const char *prefix, u64 mask)
1724 {
1725 	struct drm_i915_private *i915 =
1726 		container_of(power_domains, struct drm_i915_private,
1727 			     power_domains);
1728 	enum intel_display_power_domain domain;
1729 
1730 	DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
1731 	for_each_power_domain(domain, mask)
1732 		DRM_DEBUG_DRIVER("%s use_count %d\n",
1733 				 intel_display_power_domain_str(i915, domain),
1734 				 power_domains->domain_use_count[domain]);
1735 }
1736 
1737 static void
1738 print_async_put_domains_state(struct i915_power_domains *power_domains)
1739 {
1740 	DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
1741 			 power_domains->async_put_wakeref);
1742 
1743 	print_power_domains(power_domains, "async_put_domains[0]",
1744 			    power_domains->async_put_domains[0]);
1745 	print_power_domains(power_domains, "async_put_domains[1]",
1746 			    power_domains->async_put_domains[1]);
1747 }
1748 
1749 static void
1750 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1751 {
1752 	if (!__async_put_domains_state_ok(power_domains))
1753 		print_async_put_domains_state(power_domains);
1754 }
1755 
1756 #else
1757 
1758 static void
1759 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1760 {
1761 }
1762 
1763 static void
1764 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1765 {
1766 }
1767 
1768 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
1769 
1770 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
1771 {
1772 	assert_async_put_domain_masks_disjoint(power_domains);
1773 
1774 	return __async_put_domains_mask(power_domains);
1775 }
1776 
1777 static void
1778 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
1779 			       enum intel_display_power_domain domain)
1780 {
1781 	assert_async_put_domain_masks_disjoint(power_domains);
1782 
1783 	power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
1784 	power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
1785 }
1786 
1787 static bool
1788 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
1789 				       enum intel_display_power_domain domain)
1790 {
1791 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1792 	bool ret = false;
1793 
1794 	if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
1795 		goto out_verify;
1796 
1797 	async_put_domains_clear_domain(power_domains, domain);
1798 
1799 	ret = true;
1800 
1801 	if (async_put_domains_mask(power_domains))
1802 		goto out_verify;
1803 
1804 	cancel_delayed_work(&power_domains->async_put_work);
1805 	intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
1806 				 fetch_and_zero(&power_domains->async_put_wakeref));
1807 out_verify:
1808 	verify_async_put_domains_state(power_domains);
1809 
1810 	return ret;
1811 }
1812 
1813 static void
1814 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1815 				 enum intel_display_power_domain domain)
1816 {
1817 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1818 	struct i915_power_well *power_well;
1819 
1820 	if (intel_display_power_grab_async_put_ref(dev_priv, domain))
1821 		return;
1822 
1823 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1824 		intel_power_well_get(dev_priv, power_well);
1825 
1826 	power_domains->domain_use_count[domain]++;
1827 }
1828 
1829 /**
1830  * intel_display_power_get - grab a power domain reference
1831  * @dev_priv: i915 device instance
1832  * @domain: power domain to reference
1833  *
1834  * This function grabs a power domain reference for @domain and ensures that the
1835  * power domain and all its parents are powered up. Therefore users should only
1836  * grab a reference to the innermost power domain they need.
1837  *
1838  * Any power domain reference obtained by this function must have a symmetric
1839  * call to intel_display_power_put() to release the reference again.
1840  */
1841 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
1842 					enum intel_display_power_domain domain)
1843 {
1844 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1845 	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1846 
1847 	mutex_lock(&power_domains->lock);
1848 	__intel_display_power_get_domain(dev_priv, domain);
1849 	mutex_unlock(&power_domains->lock);
1850 
1851 	return wakeref;
1852 }
1853 
1854 /**
1855  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1856  * @dev_priv: i915 device instance
1857  * @domain: power domain to reference
1858  *
1859  * This function grabs a power domain reference for @domain and ensures that the
1860  * power domain and all its parents are powered up. Therefore users should only
1861  * grab a reference to the innermost power domain they need.
1862  *
1863  * Any power domain reference obtained by this function must have a symmetric
1864  * call to intel_display_power_put() to release the reference again.
1865  */
1866 intel_wakeref_t
1867 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1868 				   enum intel_display_power_domain domain)
1869 {
1870 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1871 	intel_wakeref_t wakeref;
1872 	bool is_enabled;
1873 
1874 	wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
1875 	if (!wakeref)
1876 		return false;
1877 
1878 	mutex_lock(&power_domains->lock);
1879 
1880 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
1881 		__intel_display_power_get_domain(dev_priv, domain);
1882 		is_enabled = true;
1883 	} else {
1884 		is_enabled = false;
1885 	}
1886 
1887 	mutex_unlock(&power_domains->lock);
1888 
1889 	if (!is_enabled) {
1890 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1891 		wakeref = 0;
1892 	}
1893 
1894 	return wakeref;
1895 }
1896 
1897 static void
1898 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
1899 				 enum intel_display_power_domain domain)
1900 {
1901 	struct i915_power_domains *power_domains;
1902 	struct i915_power_well *power_well;
1903 	const char *name = intel_display_power_domain_str(dev_priv, domain);
1904 
1905 	power_domains = &dev_priv->power_domains;
1906 
1907 	WARN(!power_domains->domain_use_count[domain],
1908 	     "Use count on domain %s is already zero\n",
1909 	     name);
1910 	WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
1911 	     "Async disabling of domain %s is pending\n",
1912 	     name);
1913 
1914 	power_domains->domain_use_count[domain]--;
1915 
1916 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
1917 		intel_power_well_put(dev_priv, power_well);
1918 }
1919 
1920 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
1921 				      enum intel_display_power_domain domain)
1922 {
1923 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1924 
1925 	mutex_lock(&power_domains->lock);
1926 	__intel_display_power_put_domain(dev_priv, domain);
1927 	mutex_unlock(&power_domains->lock);
1928 }
1929 
1930 /**
1931  * intel_display_power_put_unchecked - release an unchecked power domain reference
1932  * @dev_priv: i915 device instance
1933  * @domain: power domain to reference
1934  *
1935  * This function drops the power domain reference obtained by
1936  * intel_display_power_get() and might power down the corresponding hardware
1937  * block right away if this is the last reference.
1938  *
1939  * This function exists only for historical reasons and should be avoided in
1940  * new code, as the correctness of its use cannot be checked. Always use
1941  * intel_display_power_put() instead.
1942  */
1943 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
1944 				       enum intel_display_power_domain domain)
1945 {
1946 	__intel_display_power_put(dev_priv, domain);
1947 	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
1948 }
1949 
1950 static void
1951 queue_async_put_domains_work(struct i915_power_domains *power_domains,
1952 			     intel_wakeref_t wakeref)
1953 {
1954 	WARN_ON(power_domains->async_put_wakeref);
1955 	power_domains->async_put_wakeref = wakeref;
1956 	WARN_ON(!queue_delayed_work(system_unbound_wq,
1957 				    &power_domains->async_put_work,
1958 				    msecs_to_jiffies(100)));
1959 }
1960 
1961 static void
1962 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
1963 {
1964 	struct drm_i915_private *dev_priv =
1965 		container_of(power_domains, struct drm_i915_private,
1966 			     power_domains);
1967 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1968 	enum intel_display_power_domain domain;
1969 	intel_wakeref_t wakeref;
1970 
1971 	/*
1972 	 * The caller must hold already raw wakeref, upgrade that to a proper
1973 	 * wakeref to make the state checker happy about the HW access during
1974 	 * power well disabling.
1975 	 */
1976 	assert_rpm_raw_wakeref_held(rpm);
1977 	wakeref = intel_runtime_pm_get(rpm);
1978 
1979 	for_each_power_domain(domain, mask) {
1980 		/* Clear before put, so put's sanity check is happy. */
1981 		async_put_domains_clear_domain(power_domains, domain);
1982 		__intel_display_power_put_domain(dev_priv, domain);
1983 	}
1984 
1985 	intel_runtime_pm_put(rpm, wakeref);
1986 }
1987 
1988 static void
1989 intel_display_power_put_async_work(struct work_struct *work)
1990 {
1991 	struct drm_i915_private *dev_priv =
1992 		container_of(work, struct drm_i915_private,
1993 			     power_domains.async_put_work.work);
1994 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1995 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1996 	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
1997 	intel_wakeref_t old_work_wakeref = 0;
1998 
1999 	mutex_lock(&power_domains->lock);
2000 
2001 	/*
2002 	 * Bail out if all the domain refs pending to be released were grabbed
2003 	 * by subsequent gets or a flush_work.
2004 	 */
2005 	old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2006 	if (!old_work_wakeref)
2007 		goto out_verify;
2008 
2009 	release_async_put_domains(power_domains,
2010 				  power_domains->async_put_domains[0]);
2011 
2012 	/* Requeue the work if more domains were async put meanwhile. */
2013 	if (power_domains->async_put_domains[1]) {
2014 		power_domains->async_put_domains[0] =
2015 			fetch_and_zero(&power_domains->async_put_domains[1]);
2016 		queue_async_put_domains_work(power_domains,
2017 					     fetch_and_zero(&new_work_wakeref));
2018 	}
2019 
2020 out_verify:
2021 	verify_async_put_domains_state(power_domains);
2022 
2023 	mutex_unlock(&power_domains->lock);
2024 
2025 	if (old_work_wakeref)
2026 		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2027 	if (new_work_wakeref)
2028 		intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2029 }
2030 
2031 /**
2032  * intel_display_power_put_async - release a power domain reference asynchronously
2033  * @i915: i915 device instance
2034  * @domain: power domain to reference
2035  * @wakeref: wakeref acquired for the reference that is being released
2036  *
2037  * This function drops the power domain reference obtained by
2038  * intel_display_power_get*() and schedules a work to power down the
2039  * corresponding hardware block if this is the last reference.
2040  */
2041 void __intel_display_power_put_async(struct drm_i915_private *i915,
2042 				     enum intel_display_power_domain domain,
2043 				     intel_wakeref_t wakeref)
2044 {
2045 	struct i915_power_domains *power_domains = &i915->power_domains;
2046 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
2047 	intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2048 
2049 	mutex_lock(&power_domains->lock);
2050 
2051 	if (power_domains->domain_use_count[domain] > 1) {
2052 		__intel_display_power_put_domain(i915, domain);
2053 
2054 		goto out_verify;
2055 	}
2056 
2057 	WARN_ON(power_domains->domain_use_count[domain] != 1);
2058 
2059 	/* Let a pending work requeue itself or queue a new one. */
2060 	if (power_domains->async_put_wakeref) {
2061 		power_domains->async_put_domains[1] |= BIT_ULL(domain);
2062 	} else {
2063 		power_domains->async_put_domains[0] |= BIT_ULL(domain);
2064 		queue_async_put_domains_work(power_domains,
2065 					     fetch_and_zero(&work_wakeref));
2066 	}
2067 
2068 out_verify:
2069 	verify_async_put_domains_state(power_domains);
2070 
2071 	mutex_unlock(&power_domains->lock);
2072 
2073 	if (work_wakeref)
2074 		intel_runtime_pm_put_raw(rpm, work_wakeref);
2075 
2076 	intel_runtime_pm_put(rpm, wakeref);
2077 }
2078 
2079 /**
2080  * intel_display_power_flush_work - flushes the async display power disabling work
2081  * @i915: i915 device instance
2082  *
2083  * Flushes any pending work that was scheduled by a preceding
2084  * intel_display_power_put_async() call, completing the disabling of the
2085  * corresponding power domains.
2086  *
2087  * Note that the work handler function may still be running after this
2088  * function returns; to ensure that the work handler isn't running use
2089  * intel_display_power_flush_work_sync() instead.
2090  */
2091 void intel_display_power_flush_work(struct drm_i915_private *i915)
2092 {
2093 	struct i915_power_domains *power_domains = &i915->power_domains;
2094 	intel_wakeref_t work_wakeref;
2095 
2096 	mutex_lock(&power_domains->lock);
2097 
2098 	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2099 	if (!work_wakeref)
2100 		goto out_verify;
2101 
2102 	release_async_put_domains(power_domains,
2103 				  async_put_domains_mask(power_domains));
2104 	cancel_delayed_work(&power_domains->async_put_work);
2105 
2106 out_verify:
2107 	verify_async_put_domains_state(power_domains);
2108 
2109 	mutex_unlock(&power_domains->lock);
2110 
2111 	if (work_wakeref)
2112 		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2113 }
2114 
2115 /**
2116  * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2117  * @i915: i915 device instance
2118  *
2119  * Like intel_display_power_flush_work(), but also ensure that the work
2120  * handler function is not running any more when this function returns.
2121  */
2122 static void
2123 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2124 {
2125 	struct i915_power_domains *power_domains = &i915->power_domains;
2126 
2127 	intel_display_power_flush_work(i915);
2128 	cancel_delayed_work_sync(&power_domains->async_put_work);
2129 
2130 	verify_async_put_domains_state(power_domains);
2131 
2132 	WARN_ON(power_domains->async_put_wakeref);
2133 }
2134 
2135 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2136 /**
2137  * intel_display_power_put - release a power domain reference
2138  * @dev_priv: i915 device instance
2139  * @domain: power domain to reference
2140  * @wakeref: wakeref acquired for the reference that is being released
2141  *
2142  * This function drops the power domain reference obtained by
2143  * intel_display_power_get() and might power down the corresponding hardware
2144  * block right away if this is the last reference.
2145  */
2146 void intel_display_power_put(struct drm_i915_private *dev_priv,
2147 			     enum intel_display_power_domain domain,
2148 			     intel_wakeref_t wakeref)
2149 {
2150 	__intel_display_power_put(dev_priv, domain);
2151 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2152 }
2153 #endif
2154 
2155 #define I830_PIPES_POWER_DOMAINS (		\
2156 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2157 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2158 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2159 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2160 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2161 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2162 	BIT_ULL(POWER_DOMAIN_INIT))
2163 
2164 #define VLV_DISPLAY_POWER_DOMAINS (		\
2165 	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2166 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2167 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2168 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2169 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2170 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2171 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2172 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2173 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2174 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2175 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2176 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2177 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2178 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2179 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2180 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2181 	BIT_ULL(POWER_DOMAIN_INIT))
2182 
2183 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
2184 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2185 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2186 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2187 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2188 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2189 	BIT_ULL(POWER_DOMAIN_INIT))
2190 
2191 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
2192 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2193 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2194 	BIT_ULL(POWER_DOMAIN_INIT))
2195 
2196 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
2197 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2198 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2199 	BIT_ULL(POWER_DOMAIN_INIT))
2200 
2201 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
2202 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2203 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2204 	BIT_ULL(POWER_DOMAIN_INIT))
2205 
2206 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
2207 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2208 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2209 	BIT_ULL(POWER_DOMAIN_INIT))
2210 
2211 #define CHV_DISPLAY_POWER_DOMAINS (		\
2212 	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2213 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2214 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2215 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
2216 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2217 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2218 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2219 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2220 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2221 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
2222 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2223 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2224 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2225 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2226 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2227 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2228 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2229 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2230 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2231 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2232 	BIT_ULL(POWER_DOMAIN_INIT))
2233 
2234 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
2235 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2236 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2237 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2238 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2239 	BIT_ULL(POWER_DOMAIN_INIT))
2240 
2241 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
2242 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2243 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2244 	BIT_ULL(POWER_DOMAIN_INIT))
2245 
2246 #define HSW_DISPLAY_POWER_DOMAINS (			\
2247 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2248 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2249 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
2250 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2251 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2252 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2253 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2254 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2255 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2256 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2257 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2258 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2259 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2260 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2261 	BIT_ULL(POWER_DOMAIN_INIT))
2262 
2263 #define BDW_DISPLAY_POWER_DOMAINS (			\
2264 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2265 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2266 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2267 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2268 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2269 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2270 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2271 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2272 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2273 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2274 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2275 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2276 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2277 	BIT_ULL(POWER_DOMAIN_INIT))
2278 
2279 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2280 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2281 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2282 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2283 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2284 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2285 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2286 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2287 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2288 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2289 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2290 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
2291 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2292 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2293 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2294 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2295 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2296 	BIT_ULL(POWER_DOMAIN_INIT))
2297 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
2298 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2299 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2300 	BIT_ULL(POWER_DOMAIN_INIT))
2301 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2302 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2303 	BIT_ULL(POWER_DOMAIN_INIT))
2304 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2305 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2306 	BIT_ULL(POWER_DOMAIN_INIT))
2307 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
2308 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2309 	BIT_ULL(POWER_DOMAIN_INIT))
2310 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2311 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2312 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2313 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2314 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2315 	BIT_ULL(POWER_DOMAIN_INIT))
2316 
2317 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2318 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2319 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2320 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2321 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2322 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2323 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2324 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2325 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2326 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2327 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2328 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2329 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2330 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2331 	BIT_ULL(POWER_DOMAIN_INIT))
2332 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2333 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2334 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2335 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2336 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2337 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2338 	BIT_ULL(POWER_DOMAIN_INIT))
2339 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
2340 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2341 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2342 	BIT_ULL(POWER_DOMAIN_INIT))
2343 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
2344 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2345 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2346 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2347 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2348 	BIT_ULL(POWER_DOMAIN_INIT))
2349 
2350 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2351 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2352 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2353 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2354 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2355 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2356 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2357 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2358 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2359 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2360 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2361 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2362 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2363 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2364 	BIT_ULL(POWER_DOMAIN_INIT))
2365 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
2366 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2367 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2368 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2369 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2370 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2371 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
2372 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2373 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2374 	BIT_ULL(POWER_DOMAIN_INIT))
2375 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
2376 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2377 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2378 	BIT_ULL(POWER_DOMAIN_INIT))
2379 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
2380 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2381 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2382 	BIT_ULL(POWER_DOMAIN_INIT))
2383 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
2384 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
2385 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2386 	BIT_ULL(POWER_DOMAIN_INIT))
2387 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
2388 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2389 	BIT_ULL(POWER_DOMAIN_INIT))
2390 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
2391 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2392 	BIT_ULL(POWER_DOMAIN_INIT))
2393 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2394 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2395 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2396 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2397 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2398 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2399 	BIT_ULL(POWER_DOMAIN_INIT))
2400 
2401 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2402 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2403 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2404 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2405 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2406 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2407 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2408 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2409 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2410 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2411 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2412 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
2413 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2414 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2415 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2416 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2417 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2418 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2419 	BIT_ULL(POWER_DOMAIN_INIT))
2420 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
2421 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2422 	BIT_ULL(POWER_DOMAIN_INIT))
2423 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
2424 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2425 	BIT_ULL(POWER_DOMAIN_INIT))
2426 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
2427 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2428 	BIT_ULL(POWER_DOMAIN_INIT))
2429 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
2430 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2431 	BIT_ULL(POWER_DOMAIN_INIT))
2432 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
2433 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2434 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2435 	BIT_ULL(POWER_DOMAIN_INIT))
2436 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
2437 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2438 	BIT_ULL(POWER_DOMAIN_INIT))
2439 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
2440 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2441 	BIT_ULL(POWER_DOMAIN_INIT))
2442 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
2443 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2444 	BIT_ULL(POWER_DOMAIN_INIT))
2445 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
2446 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2447 	BIT_ULL(POWER_DOMAIN_INIT))
2448 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
2449 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2450 	BIT_ULL(POWER_DOMAIN_INIT))
2451 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2452 	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2453 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2454 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2455 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2456 	BIT_ULL(POWER_DOMAIN_INIT))
2457 
2458 /*
2459  * ICL PW_0/PG_0 domains (HW/DMC control):
2460  * - PCI
2461  * - clocks except port PLL
2462  * - central power except FBC
2463  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2464  * ICL PW_1/PG_1 domains (HW/DMC control):
2465  * - DBUF function
2466  * - PIPE_A and its planes, except VGA
2467  * - transcoder EDP + PSR
2468  * - transcoder DSI
2469  * - DDI_A
2470  * - FBC
2471  */
2472 #define ICL_PW_4_POWER_DOMAINS (			\
2473 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2474 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2475 	BIT_ULL(POWER_DOMAIN_INIT))
2476 	/* VDSC/joining */
2477 #define ICL_PW_3_POWER_DOMAINS (			\
2478 	ICL_PW_4_POWER_DOMAINS |			\
2479 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2480 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2481 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2482 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2483 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2484 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2485 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2486 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2487 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2488 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2489 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2490 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2491 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2492 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2493 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2494 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2495 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2496 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2497 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2498 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2499 	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |		\
2500 	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |		\
2501 	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |		\
2502 	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |		\
2503 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2504 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2505 	BIT_ULL(POWER_DOMAIN_INIT))
2506 	/*
2507 	 * - transcoder WD
2508 	 * - KVMR (HW control)
2509 	 */
2510 #define ICL_PW_2_POWER_DOMAINS (			\
2511 	ICL_PW_3_POWER_DOMAINS |			\
2512 	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |		\
2513 	BIT_ULL(POWER_DOMAIN_INIT))
2514 	/*
2515 	 * - KVMR (HW control)
2516 	 */
2517 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2518 	ICL_PW_2_POWER_DOMAINS |			\
2519 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2520 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2521 	BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) |			\
2522 	BIT_ULL(POWER_DOMAIN_INIT))
2523 
2524 #define ICL_DDI_IO_A_POWER_DOMAINS (			\
2525 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2526 #define ICL_DDI_IO_B_POWER_DOMAINS (			\
2527 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2528 #define ICL_DDI_IO_C_POWER_DOMAINS (			\
2529 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2530 #define ICL_DDI_IO_D_POWER_DOMAINS (			\
2531 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2532 #define ICL_DDI_IO_E_POWER_DOMAINS (			\
2533 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2534 #define ICL_DDI_IO_F_POWER_DOMAINS (			\
2535 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2536 
2537 #define ICL_AUX_A_IO_POWER_DOMAINS (			\
2538 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2539 	BIT_ULL(POWER_DOMAIN_AUX_A))
2540 #define ICL_AUX_B_IO_POWER_DOMAINS (			\
2541 	BIT_ULL(POWER_DOMAIN_AUX_B))
2542 #define ICL_AUX_C_IO_POWER_DOMAINS (			\
2543 	BIT_ULL(POWER_DOMAIN_AUX_C))
2544 #define ICL_AUX_D_IO_POWER_DOMAINS (			\
2545 	BIT_ULL(POWER_DOMAIN_AUX_D))
2546 #define ICL_AUX_E_IO_POWER_DOMAINS (			\
2547 	BIT_ULL(POWER_DOMAIN_AUX_E))
2548 #define ICL_AUX_F_IO_POWER_DOMAINS (			\
2549 	BIT_ULL(POWER_DOMAIN_AUX_F))
2550 #define ICL_AUX_TBT1_IO_POWER_DOMAINS (			\
2551 	BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2552 #define ICL_AUX_TBT2_IO_POWER_DOMAINS (			\
2553 	BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2554 #define ICL_AUX_TBT3_IO_POWER_DOMAINS (			\
2555 	BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2556 #define ICL_AUX_TBT4_IO_POWER_DOMAINS (			\
2557 	BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2558 
2559 #define TGL_PW_5_POWER_DOMAINS (			\
2560 	BIT_ULL(POWER_DOMAIN_PIPE_D) |			\
2561 	BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |     \
2562 	BIT_ULL(POWER_DOMAIN_INIT))
2563 
2564 #define TGL_PW_4_POWER_DOMAINS (			\
2565 	TGL_PW_5_POWER_DOMAINS |			\
2566 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2567 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2568 	BIT_ULL(POWER_DOMAIN_INIT))
2569 
2570 #define TGL_PW_3_POWER_DOMAINS (			\
2571 	TGL_PW_4_POWER_DOMAINS |			\
2572 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2573 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2574 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2575 	BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |		\
2576 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2577 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_LANES) |	\
2578 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO) |		\
2579 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_LANES) |	\
2580 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO) |		\
2581 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_LANES) |	\
2582 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO) |		\
2583 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_LANES) |	\
2584 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO) |		\
2585 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_LANES) |	\
2586 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO) |		\
2587 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_LANES) |	\
2588 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO) |		\
2589 	BIT_ULL(POWER_DOMAIN_AUX_TC1) |		\
2590 	BIT_ULL(POWER_DOMAIN_AUX_TC2) |		\
2591 	BIT_ULL(POWER_DOMAIN_AUX_TC3) |		\
2592 	BIT_ULL(POWER_DOMAIN_AUX_TC4) |		\
2593 	BIT_ULL(POWER_DOMAIN_AUX_TC5) |		\
2594 	BIT_ULL(POWER_DOMAIN_AUX_TC6) |		\
2595 	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |		\
2596 	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |		\
2597 	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |		\
2598 	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |		\
2599 	BIT_ULL(POWER_DOMAIN_AUX_TBT5) |		\
2600 	BIT_ULL(POWER_DOMAIN_AUX_TBT6) |		\
2601 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2602 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2603 	BIT_ULL(POWER_DOMAIN_INIT))
2604 
2605 #define TGL_PW_2_POWER_DOMAINS (			\
2606 	TGL_PW_3_POWER_DOMAINS |			\
2607 	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |	\
2608 	BIT_ULL(POWER_DOMAIN_INIT))
2609 
2610 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2611 	TGL_PW_2_POWER_DOMAINS |			\
2612 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2613 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2614 	BIT_ULL(POWER_DOMAIN_INIT))
2615 
2616 #define TGL_DDI_IO_TC1_POWER_DOMAINS (		\
2617 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO))
2618 #define TGL_DDI_IO_TC2_POWER_DOMAINS (		\
2619 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO))
2620 #define TGL_DDI_IO_TC3_POWER_DOMAINS (		\
2621 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO))
2622 #define TGL_DDI_IO_TC4_POWER_DOMAINS (		\
2623 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO))
2624 #define TGL_DDI_IO_TC5_POWER_DOMAINS (		\
2625 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO))
2626 #define TGL_DDI_IO_TC6_POWER_DOMAINS (		\
2627 	BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO))
2628 
2629 #define TGL_AUX_TC1_IO_POWER_DOMAINS (		\
2630 	BIT_ULL(POWER_DOMAIN_AUX_TC1))
2631 #define TGL_AUX_TC2_IO_POWER_DOMAINS (		\
2632 	BIT_ULL(POWER_DOMAIN_AUX_TC2))
2633 #define TGL_AUX_TC3_IO_POWER_DOMAINS (		\
2634 	BIT_ULL(POWER_DOMAIN_AUX_TC3))
2635 #define TGL_AUX_TC4_IO_POWER_DOMAINS (		\
2636 	BIT_ULL(POWER_DOMAIN_AUX_TC4))
2637 #define TGL_AUX_TC5_IO_POWER_DOMAINS (		\
2638 	BIT_ULL(POWER_DOMAIN_AUX_TC5))
2639 #define TGL_AUX_TC6_IO_POWER_DOMAINS (		\
2640 	BIT_ULL(POWER_DOMAIN_AUX_TC6))
2641 #define TGL_AUX_TBT5_IO_POWER_DOMAINS (		\
2642 	BIT_ULL(POWER_DOMAIN_AUX_TBT5))
2643 #define TGL_AUX_TBT6_IO_POWER_DOMAINS (		\
2644 	BIT_ULL(POWER_DOMAIN_AUX_TBT6))
2645 
2646 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2647 	.sync_hw = i9xx_power_well_sync_hw_noop,
2648 	.enable = i9xx_always_on_power_well_noop,
2649 	.disable = i9xx_always_on_power_well_noop,
2650 	.is_enabled = i9xx_always_on_power_well_enabled,
2651 };
2652 
2653 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2654 	.sync_hw = i9xx_power_well_sync_hw_noop,
2655 	.enable = chv_pipe_power_well_enable,
2656 	.disable = chv_pipe_power_well_disable,
2657 	.is_enabled = chv_pipe_power_well_enabled,
2658 };
2659 
2660 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2661 	.sync_hw = i9xx_power_well_sync_hw_noop,
2662 	.enable = chv_dpio_cmn_power_well_enable,
2663 	.disable = chv_dpio_cmn_power_well_disable,
2664 	.is_enabled = vlv_power_well_enabled,
2665 };
2666 
2667 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2668 	{
2669 		.name = "always-on",
2670 		.always_on = true,
2671 		.domains = POWER_DOMAIN_MASK,
2672 		.ops = &i9xx_always_on_power_well_ops,
2673 		.id = DISP_PW_ID_NONE,
2674 	},
2675 };
2676 
2677 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2678 	.sync_hw = i830_pipes_power_well_sync_hw,
2679 	.enable = i830_pipes_power_well_enable,
2680 	.disable = i830_pipes_power_well_disable,
2681 	.is_enabled = i830_pipes_power_well_enabled,
2682 };
2683 
2684 static const struct i915_power_well_desc i830_power_wells[] = {
2685 	{
2686 		.name = "always-on",
2687 		.always_on = true,
2688 		.domains = POWER_DOMAIN_MASK,
2689 		.ops = &i9xx_always_on_power_well_ops,
2690 		.id = DISP_PW_ID_NONE,
2691 	},
2692 	{
2693 		.name = "pipes",
2694 		.domains = I830_PIPES_POWER_DOMAINS,
2695 		.ops = &i830_pipes_power_well_ops,
2696 		.id = DISP_PW_ID_NONE,
2697 	},
2698 };
2699 
2700 static const struct i915_power_well_ops hsw_power_well_ops = {
2701 	.sync_hw = hsw_power_well_sync_hw,
2702 	.enable = hsw_power_well_enable,
2703 	.disable = hsw_power_well_disable,
2704 	.is_enabled = hsw_power_well_enabled,
2705 };
2706 
2707 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2708 	.sync_hw = i9xx_power_well_sync_hw_noop,
2709 	.enable = gen9_dc_off_power_well_enable,
2710 	.disable = gen9_dc_off_power_well_disable,
2711 	.is_enabled = gen9_dc_off_power_well_enabled,
2712 };
2713 
2714 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2715 	.sync_hw = i9xx_power_well_sync_hw_noop,
2716 	.enable = bxt_dpio_cmn_power_well_enable,
2717 	.disable = bxt_dpio_cmn_power_well_disable,
2718 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
2719 };
2720 
2721 static const struct i915_power_well_regs hsw_power_well_regs = {
2722 	.bios	= HSW_PWR_WELL_CTL1,
2723 	.driver	= HSW_PWR_WELL_CTL2,
2724 	.kvmr	= HSW_PWR_WELL_CTL3,
2725 	.debug	= HSW_PWR_WELL_CTL4,
2726 };
2727 
2728 static const struct i915_power_well_desc hsw_power_wells[] = {
2729 	{
2730 		.name = "always-on",
2731 		.always_on = true,
2732 		.domains = POWER_DOMAIN_MASK,
2733 		.ops = &i9xx_always_on_power_well_ops,
2734 		.id = DISP_PW_ID_NONE,
2735 	},
2736 	{
2737 		.name = "display",
2738 		.domains = HSW_DISPLAY_POWER_DOMAINS,
2739 		.ops = &hsw_power_well_ops,
2740 		.id = HSW_DISP_PW_GLOBAL,
2741 		{
2742 			.hsw.regs = &hsw_power_well_regs,
2743 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2744 			.hsw.has_vga = true,
2745 		},
2746 	},
2747 };
2748 
2749 static const struct i915_power_well_desc bdw_power_wells[] = {
2750 	{
2751 		.name = "always-on",
2752 		.always_on = true,
2753 		.domains = POWER_DOMAIN_MASK,
2754 		.ops = &i9xx_always_on_power_well_ops,
2755 		.id = DISP_PW_ID_NONE,
2756 	},
2757 	{
2758 		.name = "display",
2759 		.domains = BDW_DISPLAY_POWER_DOMAINS,
2760 		.ops = &hsw_power_well_ops,
2761 		.id = HSW_DISP_PW_GLOBAL,
2762 		{
2763 			.hsw.regs = &hsw_power_well_regs,
2764 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2765 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2766 			.hsw.has_vga = true,
2767 		},
2768 	},
2769 };
2770 
2771 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2772 	.sync_hw = i9xx_power_well_sync_hw_noop,
2773 	.enable = vlv_display_power_well_enable,
2774 	.disable = vlv_display_power_well_disable,
2775 	.is_enabled = vlv_power_well_enabled,
2776 };
2777 
2778 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2779 	.sync_hw = i9xx_power_well_sync_hw_noop,
2780 	.enable = vlv_dpio_cmn_power_well_enable,
2781 	.disable = vlv_dpio_cmn_power_well_disable,
2782 	.is_enabled = vlv_power_well_enabled,
2783 };
2784 
2785 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2786 	.sync_hw = i9xx_power_well_sync_hw_noop,
2787 	.enable = vlv_power_well_enable,
2788 	.disable = vlv_power_well_disable,
2789 	.is_enabled = vlv_power_well_enabled,
2790 };
2791 
2792 static const struct i915_power_well_desc vlv_power_wells[] = {
2793 	{
2794 		.name = "always-on",
2795 		.always_on = true,
2796 		.domains = POWER_DOMAIN_MASK,
2797 		.ops = &i9xx_always_on_power_well_ops,
2798 		.id = DISP_PW_ID_NONE,
2799 	},
2800 	{
2801 		.name = "display",
2802 		.domains = VLV_DISPLAY_POWER_DOMAINS,
2803 		.ops = &vlv_display_power_well_ops,
2804 		.id = VLV_DISP_PW_DISP2D,
2805 		{
2806 			.vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2807 		},
2808 	},
2809 	{
2810 		.name = "dpio-tx-b-01",
2811 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2812 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2813 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2814 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2815 		.ops = &vlv_dpio_power_well_ops,
2816 		.id = DISP_PW_ID_NONE,
2817 		{
2818 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2819 		},
2820 	},
2821 	{
2822 		.name = "dpio-tx-b-23",
2823 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2824 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2825 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2826 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2827 		.ops = &vlv_dpio_power_well_ops,
2828 		.id = DISP_PW_ID_NONE,
2829 		{
2830 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2831 		},
2832 	},
2833 	{
2834 		.name = "dpio-tx-c-01",
2835 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2836 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2837 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2838 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2839 		.ops = &vlv_dpio_power_well_ops,
2840 		.id = DISP_PW_ID_NONE,
2841 		{
2842 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2843 		},
2844 	},
2845 	{
2846 		.name = "dpio-tx-c-23",
2847 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2848 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2849 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2850 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2851 		.ops = &vlv_dpio_power_well_ops,
2852 		.id = DISP_PW_ID_NONE,
2853 		{
2854 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2855 		},
2856 	},
2857 	{
2858 		.name = "dpio-common",
2859 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2860 		.ops = &vlv_dpio_cmn_power_well_ops,
2861 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2862 		{
2863 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2864 		},
2865 	},
2866 };
2867 
2868 static const struct i915_power_well_desc chv_power_wells[] = {
2869 	{
2870 		.name = "always-on",
2871 		.always_on = true,
2872 		.domains = POWER_DOMAIN_MASK,
2873 		.ops = &i9xx_always_on_power_well_ops,
2874 		.id = DISP_PW_ID_NONE,
2875 	},
2876 	{
2877 		.name = "display",
2878 		/*
2879 		 * Pipe A power well is the new disp2d well. Pipe B and C
2880 		 * power wells don't actually exist. Pipe A power well is
2881 		 * required for any pipe to work.
2882 		 */
2883 		.domains = CHV_DISPLAY_POWER_DOMAINS,
2884 		.ops = &chv_pipe_power_well_ops,
2885 		.id = DISP_PW_ID_NONE,
2886 	},
2887 	{
2888 		.name = "dpio-common-bc",
2889 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2890 		.ops = &chv_dpio_cmn_power_well_ops,
2891 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2892 		{
2893 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2894 		},
2895 	},
2896 	{
2897 		.name = "dpio-common-d",
2898 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2899 		.ops = &chv_dpio_cmn_power_well_ops,
2900 		.id = CHV_DISP_PW_DPIO_CMN_D,
2901 		{
2902 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
2903 		},
2904 	},
2905 };
2906 
2907 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2908 					 enum i915_power_well_id power_well_id)
2909 {
2910 	struct i915_power_well *power_well;
2911 	bool ret;
2912 
2913 	power_well = lookup_power_well(dev_priv, power_well_id);
2914 	ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
2915 
2916 	return ret;
2917 }
2918 
2919 static const struct i915_power_well_desc skl_power_wells[] = {
2920 	{
2921 		.name = "always-on",
2922 		.always_on = true,
2923 		.domains = POWER_DOMAIN_MASK,
2924 		.ops = &i9xx_always_on_power_well_ops,
2925 		.id = DISP_PW_ID_NONE,
2926 	},
2927 	{
2928 		.name = "power well 1",
2929 		/* Handled by the DMC firmware */
2930 		.always_on = true,
2931 		.domains = 0,
2932 		.ops = &hsw_power_well_ops,
2933 		.id = SKL_DISP_PW_1,
2934 		{
2935 			.hsw.regs = &hsw_power_well_regs,
2936 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2937 			.hsw.has_fuses = true,
2938 		},
2939 	},
2940 	{
2941 		.name = "MISC IO power well",
2942 		/* Handled by the DMC firmware */
2943 		.always_on = true,
2944 		.domains = 0,
2945 		.ops = &hsw_power_well_ops,
2946 		.id = SKL_DISP_PW_MISC_IO,
2947 		{
2948 			.hsw.regs = &hsw_power_well_regs,
2949 			.hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
2950 		},
2951 	},
2952 	{
2953 		.name = "DC off",
2954 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2955 		.ops = &gen9_dc_off_power_well_ops,
2956 		.id = DISP_PW_ID_NONE,
2957 	},
2958 	{
2959 		.name = "power well 2",
2960 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2961 		.ops = &hsw_power_well_ops,
2962 		.id = SKL_DISP_PW_2,
2963 		{
2964 			.hsw.regs = &hsw_power_well_regs,
2965 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
2966 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2967 			.hsw.has_vga = true,
2968 			.hsw.has_fuses = true,
2969 		},
2970 	},
2971 	{
2972 		.name = "DDI A/E IO power well",
2973 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2974 		.ops = &hsw_power_well_ops,
2975 		.id = DISP_PW_ID_NONE,
2976 		{
2977 			.hsw.regs = &hsw_power_well_regs,
2978 			.hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
2979 		},
2980 	},
2981 	{
2982 		.name = "DDI B IO power well",
2983 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2984 		.ops = &hsw_power_well_ops,
2985 		.id = DISP_PW_ID_NONE,
2986 		{
2987 			.hsw.regs = &hsw_power_well_regs,
2988 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2989 		},
2990 	},
2991 	{
2992 		.name = "DDI C IO power well",
2993 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2994 		.ops = &hsw_power_well_ops,
2995 		.id = DISP_PW_ID_NONE,
2996 		{
2997 			.hsw.regs = &hsw_power_well_regs,
2998 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2999 		},
3000 	},
3001 	{
3002 		.name = "DDI D IO power well",
3003 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3004 		.ops = &hsw_power_well_ops,
3005 		.id = DISP_PW_ID_NONE,
3006 		{
3007 			.hsw.regs = &hsw_power_well_regs,
3008 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3009 		},
3010 	},
3011 };
3012 
3013 static const struct i915_power_well_desc bxt_power_wells[] = {
3014 	{
3015 		.name = "always-on",
3016 		.always_on = true,
3017 		.domains = POWER_DOMAIN_MASK,
3018 		.ops = &i9xx_always_on_power_well_ops,
3019 		.id = DISP_PW_ID_NONE,
3020 	},
3021 	{
3022 		.name = "power well 1",
3023 		/* Handled by the DMC firmware */
3024 		.always_on = true,
3025 		.domains = 0,
3026 		.ops = &hsw_power_well_ops,
3027 		.id = SKL_DISP_PW_1,
3028 		{
3029 			.hsw.regs = &hsw_power_well_regs,
3030 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3031 			.hsw.has_fuses = true,
3032 		},
3033 	},
3034 	{
3035 		.name = "DC off",
3036 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3037 		.ops = &gen9_dc_off_power_well_ops,
3038 		.id = DISP_PW_ID_NONE,
3039 	},
3040 	{
3041 		.name = "power well 2",
3042 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3043 		.ops = &hsw_power_well_ops,
3044 		.id = SKL_DISP_PW_2,
3045 		{
3046 			.hsw.regs = &hsw_power_well_regs,
3047 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3048 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3049 			.hsw.has_vga = true,
3050 			.hsw.has_fuses = true,
3051 		},
3052 	},
3053 	{
3054 		.name = "dpio-common-a",
3055 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3056 		.ops = &bxt_dpio_cmn_power_well_ops,
3057 		.id = BXT_DISP_PW_DPIO_CMN_A,
3058 		{
3059 			.bxt.phy = DPIO_PHY1,
3060 		},
3061 	},
3062 	{
3063 		.name = "dpio-common-bc",
3064 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3065 		.ops = &bxt_dpio_cmn_power_well_ops,
3066 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3067 		{
3068 			.bxt.phy = DPIO_PHY0,
3069 		},
3070 	},
3071 };
3072 
3073 static const struct i915_power_well_desc glk_power_wells[] = {
3074 	{
3075 		.name = "always-on",
3076 		.always_on = true,
3077 		.domains = POWER_DOMAIN_MASK,
3078 		.ops = &i9xx_always_on_power_well_ops,
3079 		.id = DISP_PW_ID_NONE,
3080 	},
3081 	{
3082 		.name = "power well 1",
3083 		/* Handled by the DMC firmware */
3084 		.always_on = true,
3085 		.domains = 0,
3086 		.ops = &hsw_power_well_ops,
3087 		.id = SKL_DISP_PW_1,
3088 		{
3089 			.hsw.regs = &hsw_power_well_regs,
3090 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3091 			.hsw.has_fuses = true,
3092 		},
3093 	},
3094 	{
3095 		.name = "DC off",
3096 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3097 		.ops = &gen9_dc_off_power_well_ops,
3098 		.id = DISP_PW_ID_NONE,
3099 	},
3100 	{
3101 		.name = "power well 2",
3102 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3103 		.ops = &hsw_power_well_ops,
3104 		.id = SKL_DISP_PW_2,
3105 		{
3106 			.hsw.regs = &hsw_power_well_regs,
3107 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3108 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3109 			.hsw.has_vga = true,
3110 			.hsw.has_fuses = true,
3111 		},
3112 	},
3113 	{
3114 		.name = "dpio-common-a",
3115 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3116 		.ops = &bxt_dpio_cmn_power_well_ops,
3117 		.id = BXT_DISP_PW_DPIO_CMN_A,
3118 		{
3119 			.bxt.phy = DPIO_PHY1,
3120 		},
3121 	},
3122 	{
3123 		.name = "dpio-common-b",
3124 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3125 		.ops = &bxt_dpio_cmn_power_well_ops,
3126 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3127 		{
3128 			.bxt.phy = DPIO_PHY0,
3129 		},
3130 	},
3131 	{
3132 		.name = "dpio-common-c",
3133 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3134 		.ops = &bxt_dpio_cmn_power_well_ops,
3135 		.id = GLK_DISP_PW_DPIO_CMN_C,
3136 		{
3137 			.bxt.phy = DPIO_PHY2,
3138 		},
3139 	},
3140 	{
3141 		.name = "AUX A",
3142 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3143 		.ops = &hsw_power_well_ops,
3144 		.id = DISP_PW_ID_NONE,
3145 		{
3146 			.hsw.regs = &hsw_power_well_regs,
3147 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3148 		},
3149 	},
3150 	{
3151 		.name = "AUX B",
3152 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3153 		.ops = &hsw_power_well_ops,
3154 		.id = DISP_PW_ID_NONE,
3155 		{
3156 			.hsw.regs = &hsw_power_well_regs,
3157 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3158 		},
3159 	},
3160 	{
3161 		.name = "AUX C",
3162 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3163 		.ops = &hsw_power_well_ops,
3164 		.id = DISP_PW_ID_NONE,
3165 		{
3166 			.hsw.regs = &hsw_power_well_regs,
3167 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3168 		},
3169 	},
3170 	{
3171 		.name = "DDI A IO power well",
3172 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3173 		.ops = &hsw_power_well_ops,
3174 		.id = DISP_PW_ID_NONE,
3175 		{
3176 			.hsw.regs = &hsw_power_well_regs,
3177 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3178 		},
3179 	},
3180 	{
3181 		.name = "DDI B IO power well",
3182 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3183 		.ops = &hsw_power_well_ops,
3184 		.id = DISP_PW_ID_NONE,
3185 		{
3186 			.hsw.regs = &hsw_power_well_regs,
3187 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3188 		},
3189 	},
3190 	{
3191 		.name = "DDI C IO power well",
3192 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3193 		.ops = &hsw_power_well_ops,
3194 		.id = DISP_PW_ID_NONE,
3195 		{
3196 			.hsw.regs = &hsw_power_well_regs,
3197 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3198 		},
3199 	},
3200 };
3201 
3202 static const struct i915_power_well_desc cnl_power_wells[] = {
3203 	{
3204 		.name = "always-on",
3205 		.always_on = true,
3206 		.domains = POWER_DOMAIN_MASK,
3207 		.ops = &i9xx_always_on_power_well_ops,
3208 		.id = DISP_PW_ID_NONE,
3209 	},
3210 	{
3211 		.name = "power well 1",
3212 		/* Handled by the DMC firmware */
3213 		.always_on = true,
3214 		.domains = 0,
3215 		.ops = &hsw_power_well_ops,
3216 		.id = SKL_DISP_PW_1,
3217 		{
3218 			.hsw.regs = &hsw_power_well_regs,
3219 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3220 			.hsw.has_fuses = true,
3221 		},
3222 	},
3223 	{
3224 		.name = "AUX A",
3225 		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3226 		.ops = &hsw_power_well_ops,
3227 		.id = DISP_PW_ID_NONE,
3228 		{
3229 			.hsw.regs = &hsw_power_well_regs,
3230 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3231 		},
3232 	},
3233 	{
3234 		.name = "AUX B",
3235 		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3236 		.ops = &hsw_power_well_ops,
3237 		.id = DISP_PW_ID_NONE,
3238 		{
3239 			.hsw.regs = &hsw_power_well_regs,
3240 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3241 		},
3242 	},
3243 	{
3244 		.name = "AUX C",
3245 		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3246 		.ops = &hsw_power_well_ops,
3247 		.id = DISP_PW_ID_NONE,
3248 		{
3249 			.hsw.regs = &hsw_power_well_regs,
3250 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3251 		},
3252 	},
3253 	{
3254 		.name = "AUX D",
3255 		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3256 		.ops = &hsw_power_well_ops,
3257 		.id = DISP_PW_ID_NONE,
3258 		{
3259 			.hsw.regs = &hsw_power_well_regs,
3260 			.hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3261 		},
3262 	},
3263 	{
3264 		.name = "DC off",
3265 		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3266 		.ops = &gen9_dc_off_power_well_ops,
3267 		.id = DISP_PW_ID_NONE,
3268 	},
3269 	{
3270 		.name = "power well 2",
3271 		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3272 		.ops = &hsw_power_well_ops,
3273 		.id = SKL_DISP_PW_2,
3274 		{
3275 			.hsw.regs = &hsw_power_well_regs,
3276 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3277 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3278 			.hsw.has_vga = true,
3279 			.hsw.has_fuses = true,
3280 		},
3281 	},
3282 	{
3283 		.name = "DDI A IO power well",
3284 		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3285 		.ops = &hsw_power_well_ops,
3286 		.id = DISP_PW_ID_NONE,
3287 		{
3288 			.hsw.regs = &hsw_power_well_regs,
3289 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3290 		},
3291 	},
3292 	{
3293 		.name = "DDI B IO power well",
3294 		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3295 		.ops = &hsw_power_well_ops,
3296 		.id = DISP_PW_ID_NONE,
3297 		{
3298 			.hsw.regs = &hsw_power_well_regs,
3299 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3300 		},
3301 	},
3302 	{
3303 		.name = "DDI C IO power well",
3304 		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3305 		.ops = &hsw_power_well_ops,
3306 		.id = DISP_PW_ID_NONE,
3307 		{
3308 			.hsw.regs = &hsw_power_well_regs,
3309 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3310 		},
3311 	},
3312 	{
3313 		.name = "DDI D IO power well",
3314 		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3315 		.ops = &hsw_power_well_ops,
3316 		.id = DISP_PW_ID_NONE,
3317 		{
3318 			.hsw.regs = &hsw_power_well_regs,
3319 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3320 		},
3321 	},
3322 	{
3323 		.name = "DDI F IO power well",
3324 		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3325 		.ops = &hsw_power_well_ops,
3326 		.id = DISP_PW_ID_NONE,
3327 		{
3328 			.hsw.regs = &hsw_power_well_regs,
3329 			.hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3330 		},
3331 	},
3332 	{
3333 		.name = "AUX F",
3334 		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3335 		.ops = &hsw_power_well_ops,
3336 		.id = DISP_PW_ID_NONE,
3337 		{
3338 			.hsw.regs = &hsw_power_well_regs,
3339 			.hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3340 		},
3341 	},
3342 };
3343 
3344 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3345 	.sync_hw = hsw_power_well_sync_hw,
3346 	.enable = icl_combo_phy_aux_power_well_enable,
3347 	.disable = icl_combo_phy_aux_power_well_disable,
3348 	.is_enabled = hsw_power_well_enabled,
3349 };
3350 
3351 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3352 	.sync_hw = hsw_power_well_sync_hw,
3353 	.enable = icl_tc_phy_aux_power_well_enable,
3354 	.disable = icl_tc_phy_aux_power_well_disable,
3355 	.is_enabled = hsw_power_well_enabled,
3356 };
3357 
3358 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3359 	.bios	= ICL_PWR_WELL_CTL_AUX1,
3360 	.driver	= ICL_PWR_WELL_CTL_AUX2,
3361 	.debug	= ICL_PWR_WELL_CTL_AUX4,
3362 };
3363 
3364 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3365 	.bios	= ICL_PWR_WELL_CTL_DDI1,
3366 	.driver	= ICL_PWR_WELL_CTL_DDI2,
3367 	.debug	= ICL_PWR_WELL_CTL_DDI4,
3368 };
3369 
3370 static const struct i915_power_well_desc icl_power_wells[] = {
3371 	{
3372 		.name = "always-on",
3373 		.always_on = true,
3374 		.domains = POWER_DOMAIN_MASK,
3375 		.ops = &i9xx_always_on_power_well_ops,
3376 		.id = DISP_PW_ID_NONE,
3377 	},
3378 	{
3379 		.name = "power well 1",
3380 		/* Handled by the DMC firmware */
3381 		.always_on = true,
3382 		.domains = 0,
3383 		.ops = &hsw_power_well_ops,
3384 		.id = SKL_DISP_PW_1,
3385 		{
3386 			.hsw.regs = &hsw_power_well_regs,
3387 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3388 			.hsw.has_fuses = true,
3389 		},
3390 	},
3391 	{
3392 		.name = "DC off",
3393 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3394 		.ops = &gen9_dc_off_power_well_ops,
3395 		.id = DISP_PW_ID_NONE,
3396 	},
3397 	{
3398 		.name = "power well 2",
3399 		.domains = ICL_PW_2_POWER_DOMAINS,
3400 		.ops = &hsw_power_well_ops,
3401 		.id = SKL_DISP_PW_2,
3402 		{
3403 			.hsw.regs = &hsw_power_well_regs,
3404 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3405 			.hsw.has_fuses = true,
3406 		},
3407 	},
3408 	{
3409 		.name = "power well 3",
3410 		.domains = ICL_PW_3_POWER_DOMAINS,
3411 		.ops = &hsw_power_well_ops,
3412 		.id = DISP_PW_ID_NONE,
3413 		{
3414 			.hsw.regs = &hsw_power_well_regs,
3415 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3416 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3417 			.hsw.has_vga = true,
3418 			.hsw.has_fuses = true,
3419 		},
3420 	},
3421 	{
3422 		.name = "DDI A IO",
3423 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3424 		.ops = &hsw_power_well_ops,
3425 		.id = DISP_PW_ID_NONE,
3426 		{
3427 			.hsw.regs = &icl_ddi_power_well_regs,
3428 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3429 		},
3430 	},
3431 	{
3432 		.name = "DDI B IO",
3433 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3434 		.ops = &hsw_power_well_ops,
3435 		.id = DISP_PW_ID_NONE,
3436 		{
3437 			.hsw.regs = &icl_ddi_power_well_regs,
3438 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3439 		},
3440 	},
3441 	{
3442 		.name = "DDI C IO",
3443 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3444 		.ops = &hsw_power_well_ops,
3445 		.id = DISP_PW_ID_NONE,
3446 		{
3447 			.hsw.regs = &icl_ddi_power_well_regs,
3448 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3449 		},
3450 	},
3451 	{
3452 		.name = "DDI D IO",
3453 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3454 		.ops = &hsw_power_well_ops,
3455 		.id = DISP_PW_ID_NONE,
3456 		{
3457 			.hsw.regs = &icl_ddi_power_well_regs,
3458 			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3459 		},
3460 	},
3461 	{
3462 		.name = "DDI E IO",
3463 		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
3464 		.ops = &hsw_power_well_ops,
3465 		.id = DISP_PW_ID_NONE,
3466 		{
3467 			.hsw.regs = &icl_ddi_power_well_regs,
3468 			.hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3469 		},
3470 	},
3471 	{
3472 		.name = "DDI F IO",
3473 		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
3474 		.ops = &hsw_power_well_ops,
3475 		.id = DISP_PW_ID_NONE,
3476 		{
3477 			.hsw.regs = &icl_ddi_power_well_regs,
3478 			.hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3479 		},
3480 	},
3481 	{
3482 		.name = "AUX A",
3483 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3484 		.ops = &icl_combo_phy_aux_power_well_ops,
3485 		.id = DISP_PW_ID_NONE,
3486 		{
3487 			.hsw.regs = &icl_aux_power_well_regs,
3488 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3489 		},
3490 	},
3491 	{
3492 		.name = "AUX B",
3493 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3494 		.ops = &icl_combo_phy_aux_power_well_ops,
3495 		.id = DISP_PW_ID_NONE,
3496 		{
3497 			.hsw.regs = &icl_aux_power_well_regs,
3498 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3499 		},
3500 	},
3501 	{
3502 		.name = "AUX C",
3503 		.domains = ICL_AUX_C_IO_POWER_DOMAINS,
3504 		.ops = &icl_tc_phy_aux_power_well_ops,
3505 		.id = DISP_PW_ID_NONE,
3506 		{
3507 			.hsw.regs = &icl_aux_power_well_regs,
3508 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3509 			.hsw.is_tc_tbt = false,
3510 		},
3511 	},
3512 	{
3513 		.name = "AUX D",
3514 		.domains = ICL_AUX_D_IO_POWER_DOMAINS,
3515 		.ops = &icl_tc_phy_aux_power_well_ops,
3516 		.id = DISP_PW_ID_NONE,
3517 		{
3518 			.hsw.regs = &icl_aux_power_well_regs,
3519 			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3520 			.hsw.is_tc_tbt = false,
3521 		},
3522 	},
3523 	{
3524 		.name = "AUX E",
3525 		.domains = ICL_AUX_E_IO_POWER_DOMAINS,
3526 		.ops = &icl_tc_phy_aux_power_well_ops,
3527 		.id = DISP_PW_ID_NONE,
3528 		{
3529 			.hsw.regs = &icl_aux_power_well_regs,
3530 			.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3531 			.hsw.is_tc_tbt = false,
3532 		},
3533 	},
3534 	{
3535 		.name = "AUX F",
3536 		.domains = ICL_AUX_F_IO_POWER_DOMAINS,
3537 		.ops = &icl_tc_phy_aux_power_well_ops,
3538 		.id = DISP_PW_ID_NONE,
3539 		{
3540 			.hsw.regs = &icl_aux_power_well_regs,
3541 			.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3542 			.hsw.is_tc_tbt = false,
3543 		},
3544 	},
3545 	{
3546 		.name = "AUX TBT1",
3547 		.domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
3548 		.ops = &icl_tc_phy_aux_power_well_ops,
3549 		.id = DISP_PW_ID_NONE,
3550 		{
3551 			.hsw.regs = &icl_aux_power_well_regs,
3552 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3553 			.hsw.is_tc_tbt = true,
3554 		},
3555 	},
3556 	{
3557 		.name = "AUX TBT2",
3558 		.domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
3559 		.ops = &icl_tc_phy_aux_power_well_ops,
3560 		.id = DISP_PW_ID_NONE,
3561 		{
3562 			.hsw.regs = &icl_aux_power_well_regs,
3563 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3564 			.hsw.is_tc_tbt = true,
3565 		},
3566 	},
3567 	{
3568 		.name = "AUX TBT3",
3569 		.domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
3570 		.ops = &icl_tc_phy_aux_power_well_ops,
3571 		.id = DISP_PW_ID_NONE,
3572 		{
3573 			.hsw.regs = &icl_aux_power_well_regs,
3574 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3575 			.hsw.is_tc_tbt = true,
3576 		},
3577 	},
3578 	{
3579 		.name = "AUX TBT4",
3580 		.domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
3581 		.ops = &icl_tc_phy_aux_power_well_ops,
3582 		.id = DISP_PW_ID_NONE,
3583 		{
3584 			.hsw.regs = &icl_aux_power_well_regs,
3585 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3586 			.hsw.is_tc_tbt = true,
3587 		},
3588 	},
3589 	{
3590 		.name = "power well 4",
3591 		.domains = ICL_PW_4_POWER_DOMAINS,
3592 		.ops = &hsw_power_well_ops,
3593 		.id = DISP_PW_ID_NONE,
3594 		{
3595 			.hsw.regs = &hsw_power_well_regs,
3596 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3597 			.hsw.has_fuses = true,
3598 			.hsw.irq_pipe_mask = BIT(PIPE_C),
3599 		},
3600 	},
3601 };
3602 
3603 static const struct i915_power_well_desc tgl_power_wells[] = {
3604 	{
3605 		.name = "always-on",
3606 		.always_on = true,
3607 		.domains = POWER_DOMAIN_MASK,
3608 		.ops = &i9xx_always_on_power_well_ops,
3609 		.id = DISP_PW_ID_NONE,
3610 	},
3611 	{
3612 		.name = "power well 1",
3613 		/* Handled by the DMC firmware */
3614 		.always_on = true,
3615 		.domains = 0,
3616 		.ops = &hsw_power_well_ops,
3617 		.id = SKL_DISP_PW_1,
3618 		{
3619 			.hsw.regs = &hsw_power_well_regs,
3620 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3621 			.hsw.has_fuses = true,
3622 		},
3623 	},
3624 	{
3625 		.name = "DC off",
3626 		.domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
3627 		.ops = &gen9_dc_off_power_well_ops,
3628 		.id = DISP_PW_ID_NONE,
3629 	},
3630 	{
3631 		.name = "power well 2",
3632 		.domains = TGL_PW_2_POWER_DOMAINS,
3633 		.ops = &hsw_power_well_ops,
3634 		.id = SKL_DISP_PW_2,
3635 		{
3636 			.hsw.regs = &hsw_power_well_regs,
3637 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3638 			.hsw.has_fuses = true,
3639 		},
3640 	},
3641 	{
3642 		.name = "power well 3",
3643 		.domains = TGL_PW_3_POWER_DOMAINS,
3644 		.ops = &hsw_power_well_ops,
3645 		.id = DISP_PW_ID_NONE,
3646 		{
3647 			.hsw.regs = &hsw_power_well_regs,
3648 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3649 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3650 			.hsw.has_vga = true,
3651 			.hsw.has_fuses = true,
3652 		},
3653 	},
3654 	{
3655 		.name = "DDI A IO",
3656 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3657 		.ops = &hsw_power_well_ops,
3658 		.id = DISP_PW_ID_NONE,
3659 		{
3660 			.hsw.regs = &icl_ddi_power_well_regs,
3661 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3662 		}
3663 	},
3664 	{
3665 		.name = "DDI B IO",
3666 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3667 		.ops = &hsw_power_well_ops,
3668 		.id = DISP_PW_ID_NONE,
3669 		{
3670 			.hsw.regs = &icl_ddi_power_well_regs,
3671 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3672 		}
3673 	},
3674 	{
3675 		.name = "DDI C IO",
3676 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3677 		.ops = &hsw_power_well_ops,
3678 		.id = DISP_PW_ID_NONE,
3679 		{
3680 			.hsw.regs = &icl_ddi_power_well_regs,
3681 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3682 		}
3683 	},
3684 	{
3685 		.name = "DDI TC1 IO",
3686 		.domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
3687 		.ops = &hsw_power_well_ops,
3688 		.id = DISP_PW_ID_NONE,
3689 		{
3690 			.hsw.regs = &icl_ddi_power_well_regs,
3691 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
3692 		},
3693 	},
3694 	{
3695 		.name = "DDI TC2 IO",
3696 		.domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
3697 		.ops = &hsw_power_well_ops,
3698 		.id = DISP_PW_ID_NONE,
3699 		{
3700 			.hsw.regs = &icl_ddi_power_well_regs,
3701 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
3702 		},
3703 	},
3704 	{
3705 		.name = "DDI TC3 IO",
3706 		.domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
3707 		.ops = &hsw_power_well_ops,
3708 		.id = DISP_PW_ID_NONE,
3709 		{
3710 			.hsw.regs = &icl_ddi_power_well_regs,
3711 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
3712 		},
3713 	},
3714 	{
3715 		.name = "DDI TC4 IO",
3716 		.domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
3717 		.ops = &hsw_power_well_ops,
3718 		.id = DISP_PW_ID_NONE,
3719 		{
3720 			.hsw.regs = &icl_ddi_power_well_regs,
3721 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
3722 		},
3723 	},
3724 	{
3725 		.name = "DDI TC5 IO",
3726 		.domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
3727 		.ops = &hsw_power_well_ops,
3728 		.id = DISP_PW_ID_NONE,
3729 		{
3730 			.hsw.regs = &icl_ddi_power_well_regs,
3731 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
3732 		},
3733 	},
3734 	{
3735 		.name = "DDI TC6 IO",
3736 		.domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
3737 		.ops = &hsw_power_well_ops,
3738 		.id = DISP_PW_ID_NONE,
3739 		{
3740 			.hsw.regs = &icl_ddi_power_well_regs,
3741 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
3742 		},
3743 	},
3744 	{
3745 		.name = "AUX A",
3746 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3747 		.ops = &icl_combo_phy_aux_power_well_ops,
3748 		.id = DISP_PW_ID_NONE,
3749 		{
3750 			.hsw.regs = &icl_aux_power_well_regs,
3751 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3752 		},
3753 	},
3754 	{
3755 		.name = "AUX B",
3756 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3757 		.ops = &icl_combo_phy_aux_power_well_ops,
3758 		.id = DISP_PW_ID_NONE,
3759 		{
3760 			.hsw.regs = &icl_aux_power_well_regs,
3761 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3762 		},
3763 	},
3764 	{
3765 		.name = "AUX C",
3766 		.domains = ICL_AUX_C_IO_POWER_DOMAINS,
3767 		.ops = &icl_combo_phy_aux_power_well_ops,
3768 		.id = DISP_PW_ID_NONE,
3769 		{
3770 			.hsw.regs = &icl_aux_power_well_regs,
3771 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3772 		},
3773 	},
3774 	{
3775 		.name = "AUX TC1",
3776 		.domains = TGL_AUX_TC1_IO_POWER_DOMAINS,
3777 		.ops = &icl_tc_phy_aux_power_well_ops,
3778 		.id = DISP_PW_ID_NONE,
3779 		{
3780 			.hsw.regs = &icl_aux_power_well_regs,
3781 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
3782 			.hsw.is_tc_tbt = false,
3783 		},
3784 	},
3785 	{
3786 		.name = "AUX TC2",
3787 		.domains = TGL_AUX_TC2_IO_POWER_DOMAINS,
3788 		.ops = &icl_tc_phy_aux_power_well_ops,
3789 		.id = DISP_PW_ID_NONE,
3790 		{
3791 			.hsw.regs = &icl_aux_power_well_regs,
3792 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
3793 			.hsw.is_tc_tbt = false,
3794 		},
3795 	},
3796 	{
3797 		.name = "AUX TC3",
3798 		.domains = TGL_AUX_TC3_IO_POWER_DOMAINS,
3799 		.ops = &icl_tc_phy_aux_power_well_ops,
3800 		.id = DISP_PW_ID_NONE,
3801 		{
3802 			.hsw.regs = &icl_aux_power_well_regs,
3803 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
3804 			.hsw.is_tc_tbt = false,
3805 		},
3806 	},
3807 	{
3808 		.name = "AUX TC4",
3809 		.domains = TGL_AUX_TC4_IO_POWER_DOMAINS,
3810 		.ops = &icl_tc_phy_aux_power_well_ops,
3811 		.id = DISP_PW_ID_NONE,
3812 		{
3813 			.hsw.regs = &icl_aux_power_well_regs,
3814 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
3815 			.hsw.is_tc_tbt = false,
3816 		},
3817 	},
3818 	{
3819 		.name = "AUX TC5",
3820 		.domains = TGL_AUX_TC5_IO_POWER_DOMAINS,
3821 		.ops = &icl_tc_phy_aux_power_well_ops,
3822 		.id = DISP_PW_ID_NONE,
3823 		{
3824 			.hsw.regs = &icl_aux_power_well_regs,
3825 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
3826 			.hsw.is_tc_tbt = false,
3827 		},
3828 	},
3829 	{
3830 		.name = "AUX TC6",
3831 		.domains = TGL_AUX_TC6_IO_POWER_DOMAINS,
3832 		.ops = &icl_tc_phy_aux_power_well_ops,
3833 		.id = DISP_PW_ID_NONE,
3834 		{
3835 			.hsw.regs = &icl_aux_power_well_regs,
3836 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
3837 			.hsw.is_tc_tbt = false,
3838 		},
3839 	},
3840 	{
3841 		.name = "AUX TBT1",
3842 		.domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
3843 		.ops = &hsw_power_well_ops,
3844 		.id = DISP_PW_ID_NONE,
3845 		{
3846 			.hsw.regs = &icl_aux_power_well_regs,
3847 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
3848 			.hsw.is_tc_tbt = true,
3849 		},
3850 	},
3851 	{
3852 		.name = "AUX TBT2",
3853 		.domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
3854 		.ops = &hsw_power_well_ops,
3855 		.id = DISP_PW_ID_NONE,
3856 		{
3857 			.hsw.regs = &icl_aux_power_well_regs,
3858 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
3859 			.hsw.is_tc_tbt = true,
3860 		},
3861 	},
3862 	{
3863 		.name = "AUX TBT3",
3864 		.domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
3865 		.ops = &hsw_power_well_ops,
3866 		.id = DISP_PW_ID_NONE,
3867 		{
3868 			.hsw.regs = &icl_aux_power_well_regs,
3869 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
3870 			.hsw.is_tc_tbt = true,
3871 		},
3872 	},
3873 	{
3874 		.name = "AUX TBT4",
3875 		.domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
3876 		.ops = &hsw_power_well_ops,
3877 		.id = DISP_PW_ID_NONE,
3878 		{
3879 			.hsw.regs = &icl_aux_power_well_regs,
3880 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
3881 			.hsw.is_tc_tbt = true,
3882 		},
3883 	},
3884 	{
3885 		.name = "AUX TBT5",
3886 		.domains = TGL_AUX_TBT5_IO_POWER_DOMAINS,
3887 		.ops = &hsw_power_well_ops,
3888 		.id = DISP_PW_ID_NONE,
3889 		{
3890 			.hsw.regs = &icl_aux_power_well_regs,
3891 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
3892 			.hsw.is_tc_tbt = true,
3893 		},
3894 	},
3895 	{
3896 		.name = "AUX TBT6",
3897 		.domains = TGL_AUX_TBT6_IO_POWER_DOMAINS,
3898 		.ops = &hsw_power_well_ops,
3899 		.id = DISP_PW_ID_NONE,
3900 		{
3901 			.hsw.regs = &icl_aux_power_well_regs,
3902 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
3903 			.hsw.is_tc_tbt = true,
3904 		},
3905 	},
3906 	{
3907 		.name = "power well 4",
3908 		.domains = TGL_PW_4_POWER_DOMAINS,
3909 		.ops = &hsw_power_well_ops,
3910 		.id = DISP_PW_ID_NONE,
3911 		{
3912 			.hsw.regs = &hsw_power_well_regs,
3913 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3914 			.hsw.has_fuses = true,
3915 			.hsw.irq_pipe_mask = BIT(PIPE_C),
3916 		}
3917 	},
3918 	{
3919 		.name = "power well 5",
3920 		.domains = TGL_PW_5_POWER_DOMAINS,
3921 		.ops = &hsw_power_well_ops,
3922 		.id = DISP_PW_ID_NONE,
3923 		{
3924 			.hsw.regs = &hsw_power_well_regs,
3925 			.hsw.idx = TGL_PW_CTL_IDX_PW_5,
3926 			.hsw.has_fuses = true,
3927 			.hsw.irq_pipe_mask = BIT(PIPE_D),
3928 		},
3929 	},
3930 };
3931 
3932 static int
3933 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
3934 				   int disable_power_well)
3935 {
3936 	if (disable_power_well >= 0)
3937 		return !!disable_power_well;
3938 
3939 	return 1;
3940 }
3941 
3942 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
3943 			       int enable_dc)
3944 {
3945 	u32 mask;
3946 	int requested_dc;
3947 	int max_dc;
3948 
3949 	if (INTEL_GEN(dev_priv) >= 11) {
3950 		max_dc = 2;
3951 		/*
3952 		 * DC9 has a separate HW flow from the rest of the DC states,
3953 		 * not depending on the DMC firmware. It's needed by system
3954 		 * suspend/resume, so allow it unconditionally.
3955 		 */
3956 		mask = DC_STATE_EN_DC9;
3957 	} else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
3958 		max_dc = 2;
3959 		mask = 0;
3960 	} else if (IS_GEN9_LP(dev_priv)) {
3961 		max_dc = 1;
3962 		mask = DC_STATE_EN_DC9;
3963 	} else {
3964 		max_dc = 0;
3965 		mask = 0;
3966 	}
3967 
3968 	if (!i915_modparams.disable_power_well)
3969 		max_dc = 0;
3970 
3971 	if (enable_dc >= 0 && enable_dc <= max_dc) {
3972 		requested_dc = enable_dc;
3973 	} else if (enable_dc == -1) {
3974 		requested_dc = max_dc;
3975 	} else if (enable_dc > max_dc && enable_dc <= 2) {
3976 		DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
3977 			      enable_dc, max_dc);
3978 		requested_dc = max_dc;
3979 	} else {
3980 		DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
3981 		requested_dc = max_dc;
3982 	}
3983 
3984 	if (requested_dc > 1)
3985 		mask |= DC_STATE_EN_UPTO_DC6;
3986 	if (requested_dc > 0)
3987 		mask |= DC_STATE_EN_UPTO_DC5;
3988 
3989 	DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
3990 
3991 	return mask;
3992 }
3993 
3994 static int
3995 __set_power_wells(struct i915_power_domains *power_domains,
3996 		  const struct i915_power_well_desc *power_well_descs,
3997 		  int power_well_count)
3998 {
3999 	u64 power_well_ids = 0;
4000 	int i;
4001 
4002 	power_domains->power_well_count = power_well_count;
4003 	power_domains->power_wells =
4004 				kcalloc(power_well_count,
4005 					sizeof(*power_domains->power_wells),
4006 					GFP_KERNEL);
4007 	if (!power_domains->power_wells)
4008 		return -ENOMEM;
4009 
4010 	for (i = 0; i < power_well_count; i++) {
4011 		enum i915_power_well_id id = power_well_descs[i].id;
4012 
4013 		power_domains->power_wells[i].desc = &power_well_descs[i];
4014 
4015 		if (id == DISP_PW_ID_NONE)
4016 			continue;
4017 
4018 		WARN_ON(id >= sizeof(power_well_ids) * 8);
4019 		WARN_ON(power_well_ids & BIT_ULL(id));
4020 		power_well_ids |= BIT_ULL(id);
4021 	}
4022 
4023 	return 0;
4024 }
4025 
4026 #define set_power_wells(power_domains, __power_well_descs) \
4027 	__set_power_wells(power_domains, __power_well_descs, \
4028 			  ARRAY_SIZE(__power_well_descs))
4029 
4030 /**
4031  * intel_power_domains_init - initializes the power domain structures
4032  * @dev_priv: i915 device instance
4033  *
4034  * Initializes the power domain structures for @dev_priv depending upon the
4035  * supported platform.
4036  */
4037 int intel_power_domains_init(struct drm_i915_private *dev_priv)
4038 {
4039 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4040 	int err;
4041 
4042 	i915_modparams.disable_power_well =
4043 		sanitize_disable_power_well_option(dev_priv,
4044 						   i915_modparams.disable_power_well);
4045 	dev_priv->csr.allowed_dc_mask =
4046 		get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
4047 
4048 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
4049 
4050 	mutex_init(&power_domains->lock);
4051 
4052 	INIT_DELAYED_WORK(&power_domains->async_put_work,
4053 			  intel_display_power_put_async_work);
4054 
4055 	/*
4056 	 * The enabling order will be from lower to higher indexed wells,
4057 	 * the disabling order is reversed.
4058 	 */
4059 	if (IS_GEN(dev_priv, 12)) {
4060 		err = set_power_wells(power_domains, tgl_power_wells);
4061 	} else if (IS_GEN(dev_priv, 11)) {
4062 		err = set_power_wells(power_domains, icl_power_wells);
4063 	} else if (IS_CANNONLAKE(dev_priv)) {
4064 		err = set_power_wells(power_domains, cnl_power_wells);
4065 
4066 		/*
4067 		 * DDI and Aux IO are getting enabled for all ports
4068 		 * regardless the presence or use. So, in order to avoid
4069 		 * timeouts, lets remove them from the list
4070 		 * for the SKUs without port F.
4071 		 */
4072 		if (!IS_CNL_WITH_PORT_F(dev_priv))
4073 			power_domains->power_well_count -= 2;
4074 	} else if (IS_GEMINILAKE(dev_priv)) {
4075 		err = set_power_wells(power_domains, glk_power_wells);
4076 	} else if (IS_BROXTON(dev_priv)) {
4077 		err = set_power_wells(power_domains, bxt_power_wells);
4078 	} else if (IS_GEN9_BC(dev_priv)) {
4079 		err = set_power_wells(power_domains, skl_power_wells);
4080 	} else if (IS_CHERRYVIEW(dev_priv)) {
4081 		err = set_power_wells(power_domains, chv_power_wells);
4082 	} else if (IS_BROADWELL(dev_priv)) {
4083 		err = set_power_wells(power_domains, bdw_power_wells);
4084 	} else if (IS_HASWELL(dev_priv)) {
4085 		err = set_power_wells(power_domains, hsw_power_wells);
4086 	} else if (IS_VALLEYVIEW(dev_priv)) {
4087 		err = set_power_wells(power_domains, vlv_power_wells);
4088 	} else if (IS_I830(dev_priv)) {
4089 		err = set_power_wells(power_domains, i830_power_wells);
4090 	} else {
4091 		err = set_power_wells(power_domains, i9xx_always_on_power_well);
4092 	}
4093 
4094 	return err;
4095 }
4096 
4097 /**
4098  * intel_power_domains_cleanup - clean up power domains resources
4099  * @dev_priv: i915 device instance
4100  *
4101  * Release any resources acquired by intel_power_domains_init()
4102  */
4103 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
4104 {
4105 	kfree(dev_priv->power_domains.power_wells);
4106 }
4107 
4108 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
4109 {
4110 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4111 	struct i915_power_well *power_well;
4112 
4113 	mutex_lock(&power_domains->lock);
4114 	for_each_power_well(dev_priv, power_well) {
4115 		power_well->desc->ops->sync_hw(dev_priv, power_well);
4116 		power_well->hw_enabled =
4117 			power_well->desc->ops->is_enabled(dev_priv, power_well);
4118 	}
4119 	mutex_unlock(&power_domains->lock);
4120 }
4121 
4122 static inline
4123 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
4124 			  i915_reg_t reg, bool enable)
4125 {
4126 	u32 val, status;
4127 
4128 	val = I915_READ(reg);
4129 	val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
4130 	I915_WRITE(reg, val);
4131 	POSTING_READ(reg);
4132 	udelay(10);
4133 
4134 	status = I915_READ(reg) & DBUF_POWER_STATE;
4135 	if ((enable && !status) || (!enable && status)) {
4136 		DRM_ERROR("DBus power %s timeout!\n",
4137 			  enable ? "enable" : "disable");
4138 		return false;
4139 	}
4140 	return true;
4141 }
4142 
4143 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
4144 {
4145 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
4146 }
4147 
4148 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
4149 {
4150 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
4151 }
4152 
4153 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
4154 {
4155 	if (INTEL_GEN(dev_priv) < 11)
4156 		return 1;
4157 	return 2;
4158 }
4159 
4160 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
4161 			    u8 req_slices)
4162 {
4163 	const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
4164 	bool ret;
4165 
4166 	if (req_slices > intel_dbuf_max_slices(dev_priv)) {
4167 		DRM_ERROR("Invalid number of dbuf slices requested\n");
4168 		return;
4169 	}
4170 
4171 	if (req_slices == hw_enabled_slices || req_slices == 0)
4172 		return;
4173 
4174 	if (req_slices > hw_enabled_slices)
4175 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
4176 	else
4177 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
4178 
4179 	if (ret)
4180 		dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
4181 }
4182 
4183 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
4184 {
4185 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
4186 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
4187 	POSTING_READ(DBUF_CTL_S2);
4188 
4189 	udelay(10);
4190 
4191 	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
4192 	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
4193 		DRM_ERROR("DBuf power enable timeout\n");
4194 	else
4195 		/*
4196 		 * FIXME: for now pretend that we only have 1 slice, see
4197 		 * intel_enabled_dbuf_slices_num().
4198 		 */
4199 		dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
4200 }
4201 
4202 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
4203 {
4204 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
4205 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
4206 	POSTING_READ(DBUF_CTL_S2);
4207 
4208 	udelay(10);
4209 
4210 	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
4211 	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
4212 		DRM_ERROR("DBuf power disable timeout!\n");
4213 	else
4214 		/*
4215 		 * FIXME: for now pretend that the first slice is always
4216 		 * enabled, see intel_enabled_dbuf_slices_num().
4217 		 */
4218 		dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
4219 }
4220 
4221 static void icl_mbus_init(struct drm_i915_private *dev_priv)
4222 {
4223 	u32 val;
4224 
4225 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4226 	      MBUS_ABOX_BT_CREDIT_POOL2(16) |
4227 	      MBUS_ABOX_B_CREDIT(1) |
4228 	      MBUS_ABOX_BW_CREDIT(1);
4229 
4230 	I915_WRITE(MBUS_ABOX_CTL, val);
4231 }
4232 
4233 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4234 {
4235 	u32 val = I915_READ(LCPLL_CTL);
4236 
4237 	/*
4238 	 * The LCPLL register should be turned on by the BIOS. For now
4239 	 * let's just check its state and print errors in case
4240 	 * something is wrong.  Don't even try to turn it on.
4241 	 */
4242 
4243 	if (val & LCPLL_CD_SOURCE_FCLK)
4244 		DRM_ERROR("CDCLK source is not LCPLL\n");
4245 
4246 	if (val & LCPLL_PLL_DISABLE)
4247 		DRM_ERROR("LCPLL is disabled\n");
4248 
4249 	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
4250 		DRM_ERROR("LCPLL not using non-SSC reference\n");
4251 }
4252 
4253 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4254 {
4255 	struct drm_device *dev = &dev_priv->drm;
4256 	struct intel_crtc *crtc;
4257 
4258 	for_each_intel_crtc(dev, crtc)
4259 		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4260 				pipe_name(crtc->pipe));
4261 
4262 	I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
4263 			"Display power well on\n");
4264 	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
4265 			"SPLL enabled\n");
4266 	I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4267 			"WRPLL1 enabled\n");
4268 	I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4269 			"WRPLL2 enabled\n");
4270 	I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
4271 			"Panel power on\n");
4272 	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4273 			"CPU PWM1 enabled\n");
4274 	if (IS_HASWELL(dev_priv))
4275 		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4276 				"CPU PWM2 enabled\n");
4277 	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4278 			"PCH PWM1 enabled\n");
4279 	I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4280 			"Utility pin enabled\n");
4281 	I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
4282 			"PCH GTC enabled\n");
4283 
4284 	/*
4285 	 * In theory we can still leave IRQs enabled, as long as only the HPD
4286 	 * interrupts remain enabled. We used to check for that, but since it's
4287 	 * gen-specific and since we only disable LCPLL after we fully disable
4288 	 * the interrupts, the check below should be enough.
4289 	 */
4290 	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4291 }
4292 
4293 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4294 {
4295 	if (IS_HASWELL(dev_priv))
4296 		return I915_READ(D_COMP_HSW);
4297 	else
4298 		return I915_READ(D_COMP_BDW);
4299 }
4300 
4301 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4302 {
4303 	if (IS_HASWELL(dev_priv)) {
4304 		if (sandybridge_pcode_write(dev_priv,
4305 					    GEN6_PCODE_WRITE_D_COMP, val))
4306 			DRM_DEBUG_KMS("Failed to write to D_COMP\n");
4307 	} else {
4308 		I915_WRITE(D_COMP_BDW, val);
4309 		POSTING_READ(D_COMP_BDW);
4310 	}
4311 }
4312 
4313 /*
4314  * This function implements pieces of two sequences from BSpec:
4315  * - Sequence for display software to disable LCPLL
4316  * - Sequence for display software to allow package C8+
4317  * The steps implemented here are just the steps that actually touch the LCPLL
4318  * register. Callers should take care of disabling all the display engine
4319  * functions, doing the mode unset, fixing interrupts, etc.
4320  */
4321 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4322 			      bool switch_to_fclk, bool allow_power_down)
4323 {
4324 	u32 val;
4325 
4326 	assert_can_disable_lcpll(dev_priv);
4327 
4328 	val = I915_READ(LCPLL_CTL);
4329 
4330 	if (switch_to_fclk) {
4331 		val |= LCPLL_CD_SOURCE_FCLK;
4332 		I915_WRITE(LCPLL_CTL, val);
4333 
4334 		if (wait_for_us(I915_READ(LCPLL_CTL) &
4335 				LCPLL_CD_SOURCE_FCLK_DONE, 1))
4336 			DRM_ERROR("Switching to FCLK failed\n");
4337 
4338 		val = I915_READ(LCPLL_CTL);
4339 	}
4340 
4341 	val |= LCPLL_PLL_DISABLE;
4342 	I915_WRITE(LCPLL_CTL, val);
4343 	POSTING_READ(LCPLL_CTL);
4344 
4345 	if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
4346 				    LCPLL_PLL_LOCK, 0, 1))
4347 		DRM_ERROR("LCPLL still locked\n");
4348 
4349 	val = hsw_read_dcomp(dev_priv);
4350 	val |= D_COMP_COMP_DISABLE;
4351 	hsw_write_dcomp(dev_priv, val);
4352 	ndelay(100);
4353 
4354 	if (wait_for((hsw_read_dcomp(dev_priv) &
4355 		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4356 		DRM_ERROR("D_COMP RCOMP still in progress\n");
4357 
4358 	if (allow_power_down) {
4359 		val = I915_READ(LCPLL_CTL);
4360 		val |= LCPLL_POWER_DOWN_ALLOW;
4361 		I915_WRITE(LCPLL_CTL, val);
4362 		POSTING_READ(LCPLL_CTL);
4363 	}
4364 }
4365 
4366 /*
4367  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4368  * source.
4369  */
4370 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4371 {
4372 	u32 val;
4373 
4374 	val = I915_READ(LCPLL_CTL);
4375 
4376 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4377 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4378 		return;
4379 
4380 	/*
4381 	 * Make sure we're not on PC8 state before disabling PC8, otherwise
4382 	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4383 	 */
4384 	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4385 
4386 	if (val & LCPLL_POWER_DOWN_ALLOW) {
4387 		val &= ~LCPLL_POWER_DOWN_ALLOW;
4388 		I915_WRITE(LCPLL_CTL, val);
4389 		POSTING_READ(LCPLL_CTL);
4390 	}
4391 
4392 	val = hsw_read_dcomp(dev_priv);
4393 	val |= D_COMP_COMP_FORCE;
4394 	val &= ~D_COMP_COMP_DISABLE;
4395 	hsw_write_dcomp(dev_priv, val);
4396 
4397 	val = I915_READ(LCPLL_CTL);
4398 	val &= ~LCPLL_PLL_DISABLE;
4399 	I915_WRITE(LCPLL_CTL, val);
4400 
4401 	if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
4402 				    LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5))
4403 		DRM_ERROR("LCPLL not locked yet\n");
4404 
4405 	if (val & LCPLL_CD_SOURCE_FCLK) {
4406 		val = I915_READ(LCPLL_CTL);
4407 		val &= ~LCPLL_CD_SOURCE_FCLK;
4408 		I915_WRITE(LCPLL_CTL, val);
4409 
4410 		if (wait_for_us((I915_READ(LCPLL_CTL) &
4411 				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4412 			DRM_ERROR("Switching back to LCPLL failed\n");
4413 	}
4414 
4415 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4416 
4417 	intel_update_cdclk(dev_priv);
4418 	intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
4419 }
4420 
4421 /*
4422  * Package states C8 and deeper are really deep PC states that can only be
4423  * reached when all the devices on the system allow it, so even if the graphics
4424  * device allows PC8+, it doesn't mean the system will actually get to these
4425  * states. Our driver only allows PC8+ when going into runtime PM.
4426  *
4427  * The requirements for PC8+ are that all the outputs are disabled, the power
4428  * well is disabled and most interrupts are disabled, and these are also
4429  * requirements for runtime PM. When these conditions are met, we manually do
4430  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4431  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4432  * hang the machine.
4433  *
4434  * When we really reach PC8 or deeper states (not just when we allow it) we lose
4435  * the state of some registers, so when we come back from PC8+ we need to
4436  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4437  * need to take care of the registers kept by RC6. Notice that this happens even
4438  * if we don't put the device in PCI D3 state (which is what currently happens
4439  * because of the runtime PM support).
4440  *
4441  * For more, read "Display Sequences for Package C8" on the hardware
4442  * documentation.
4443  */
4444 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4445 {
4446 	u32 val;
4447 
4448 	DRM_DEBUG_KMS("Enabling package C8+\n");
4449 
4450 	if (HAS_PCH_LPT_LP(dev_priv)) {
4451 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
4452 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4453 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4454 	}
4455 
4456 	lpt_disable_clkout_dp(dev_priv);
4457 	hsw_disable_lcpll(dev_priv, true, true);
4458 }
4459 
4460 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4461 {
4462 	u32 val;
4463 
4464 	DRM_DEBUG_KMS("Disabling package C8+\n");
4465 
4466 	hsw_restore_lcpll(dev_priv);
4467 	intel_init_pch_refclk(dev_priv);
4468 
4469 	if (HAS_PCH_LPT_LP(dev_priv)) {
4470 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
4471 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
4472 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4473 	}
4474 }
4475 
4476 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
4477 				      bool enable)
4478 {
4479 	i915_reg_t reg;
4480 	u32 reset_bits, val;
4481 
4482 	if (IS_IVYBRIDGE(dev_priv)) {
4483 		reg = GEN7_MSG_CTL;
4484 		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
4485 	} else {
4486 		reg = HSW_NDE_RSTWRN_OPT;
4487 		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
4488 	}
4489 
4490 	val = I915_READ(reg);
4491 
4492 	if (enable)
4493 		val |= reset_bits;
4494 	else
4495 		val &= ~reset_bits;
4496 
4497 	I915_WRITE(reg, val);
4498 }
4499 
4500 static void skl_display_core_init(struct drm_i915_private *dev_priv,
4501 				  bool resume)
4502 {
4503 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4504 	struct i915_power_well *well;
4505 
4506 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4507 
4508 	/* enable PCH reset handshake */
4509 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4510 
4511 	/* enable PG1 and Misc I/O */
4512 	mutex_lock(&power_domains->lock);
4513 
4514 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4515 	intel_power_well_enable(dev_priv, well);
4516 
4517 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
4518 	intel_power_well_enable(dev_priv, well);
4519 
4520 	mutex_unlock(&power_domains->lock);
4521 
4522 	intel_cdclk_init(dev_priv);
4523 
4524 	gen9_dbuf_enable(dev_priv);
4525 
4526 	if (resume && dev_priv->csr.dmc_payload)
4527 		intel_csr_load_program(dev_priv);
4528 }
4529 
4530 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
4531 {
4532 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4533 	struct i915_power_well *well;
4534 
4535 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4536 
4537 	gen9_dbuf_disable(dev_priv);
4538 
4539 	intel_cdclk_uninit(dev_priv);
4540 
4541 	/* The spec doesn't call for removing the reset handshake flag */
4542 	/* disable PG1 and Misc I/O */
4543 
4544 	mutex_lock(&power_domains->lock);
4545 
4546 	/*
4547 	 * BSpec says to keep the MISC IO power well enabled here, only
4548 	 * remove our request for power well 1.
4549 	 * Note that even though the driver's request is removed power well 1
4550 	 * may stay enabled after this due to DMC's own request on it.
4551 	 */
4552 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4553 	intel_power_well_disable(dev_priv, well);
4554 
4555 	mutex_unlock(&power_domains->lock);
4556 
4557 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4558 }
4559 
4560 void bxt_display_core_init(struct drm_i915_private *dev_priv,
4561 			   bool resume)
4562 {
4563 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4564 	struct i915_power_well *well;
4565 
4566 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4567 
4568 	/*
4569 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
4570 	 * or else the reset will hang because there is no PCH to respond.
4571 	 * Move the handshake programming to initialization sequence.
4572 	 * Previously was left up to BIOS.
4573 	 */
4574 	intel_pch_reset_handshake(dev_priv, false);
4575 
4576 	/* Enable PG1 */
4577 	mutex_lock(&power_domains->lock);
4578 
4579 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4580 	intel_power_well_enable(dev_priv, well);
4581 
4582 	mutex_unlock(&power_domains->lock);
4583 
4584 	intel_cdclk_init(dev_priv);
4585 
4586 	gen9_dbuf_enable(dev_priv);
4587 
4588 	if (resume && dev_priv->csr.dmc_payload)
4589 		intel_csr_load_program(dev_priv);
4590 }
4591 
4592 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4593 {
4594 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4595 	struct i915_power_well *well;
4596 
4597 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4598 
4599 	gen9_dbuf_disable(dev_priv);
4600 
4601 	intel_cdclk_uninit(dev_priv);
4602 
4603 	/* The spec doesn't call for removing the reset handshake flag */
4604 
4605 	/*
4606 	 * Disable PW1 (PG1).
4607 	 * Note that even though the driver's request is removed power well 1
4608 	 * may stay enabled after this due to DMC's own request on it.
4609 	 */
4610 	mutex_lock(&power_domains->lock);
4611 
4612 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4613 	intel_power_well_disable(dev_priv, well);
4614 
4615 	mutex_unlock(&power_domains->lock);
4616 
4617 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4618 }
4619 
4620 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4621 {
4622 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4623 	struct i915_power_well *well;
4624 
4625 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4626 
4627 	/* 1. Enable PCH Reset Handshake */
4628 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4629 
4630 	/* 2-3. */
4631 	intel_combo_phy_init(dev_priv);
4632 
4633 	/*
4634 	 * 4. Enable Power Well 1 (PG1).
4635 	 *    The AUX IO power wells will be enabled on demand.
4636 	 */
4637 	mutex_lock(&power_domains->lock);
4638 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4639 	intel_power_well_enable(dev_priv, well);
4640 	mutex_unlock(&power_domains->lock);
4641 
4642 	/* 5. Enable CD clock */
4643 	intel_cdclk_init(dev_priv);
4644 
4645 	/* 6. Enable DBUF */
4646 	gen9_dbuf_enable(dev_priv);
4647 
4648 	if (resume && dev_priv->csr.dmc_payload)
4649 		intel_csr_load_program(dev_priv);
4650 }
4651 
4652 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
4653 {
4654 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4655 	struct i915_power_well *well;
4656 
4657 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4658 
4659 	/* 1. Disable all display engine functions -> aready done */
4660 
4661 	/* 2. Disable DBUF */
4662 	gen9_dbuf_disable(dev_priv);
4663 
4664 	/* 3. Disable CD clock */
4665 	intel_cdclk_uninit(dev_priv);
4666 
4667 	/*
4668 	 * 4. Disable Power Well 1 (PG1).
4669 	 *    The AUX IO power wells are toggled on demand, so they are already
4670 	 *    disabled at this point.
4671 	 */
4672 	mutex_lock(&power_domains->lock);
4673 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4674 	intel_power_well_disable(dev_priv, well);
4675 	mutex_unlock(&power_domains->lock);
4676 
4677 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4678 
4679 	/* 5. */
4680 	intel_combo_phy_uninit(dev_priv);
4681 }
4682 
4683 void icl_display_core_init(struct drm_i915_private *dev_priv,
4684 			   bool resume)
4685 {
4686 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4687 	struct i915_power_well *well;
4688 
4689 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4690 
4691 	/* 1. Enable PCH reset handshake. */
4692 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4693 
4694 	/* 2. Initialize all combo phys */
4695 	intel_combo_phy_init(dev_priv);
4696 
4697 	/*
4698 	 * 3. Enable Power Well 1 (PG1).
4699 	 *    The AUX IO power wells will be enabled on demand.
4700 	 */
4701 	mutex_lock(&power_domains->lock);
4702 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4703 	intel_power_well_enable(dev_priv, well);
4704 	mutex_unlock(&power_domains->lock);
4705 
4706 	/* 4. Enable CDCLK. */
4707 	intel_cdclk_init(dev_priv);
4708 
4709 	/* 5. Enable DBUF. */
4710 	icl_dbuf_enable(dev_priv);
4711 
4712 	/* 6. Setup MBUS. */
4713 	icl_mbus_init(dev_priv);
4714 
4715 	if (resume && dev_priv->csr.dmc_payload)
4716 		intel_csr_load_program(dev_priv);
4717 }
4718 
4719 void icl_display_core_uninit(struct drm_i915_private *dev_priv)
4720 {
4721 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4722 	struct i915_power_well *well;
4723 
4724 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4725 
4726 	/* 1. Disable all display engine functions -> aready done */
4727 
4728 	/* 2. Disable DBUF */
4729 	icl_dbuf_disable(dev_priv);
4730 
4731 	/* 3. Disable CD clock */
4732 	intel_cdclk_uninit(dev_priv);
4733 
4734 	/*
4735 	 * 4. Disable Power Well 1 (PG1).
4736 	 *    The AUX IO power wells are toggled on demand, so they are already
4737 	 *    disabled at this point.
4738 	 */
4739 	mutex_lock(&power_domains->lock);
4740 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4741 	intel_power_well_disable(dev_priv, well);
4742 	mutex_unlock(&power_domains->lock);
4743 
4744 	/* 5. */
4745 	intel_combo_phy_uninit(dev_priv);
4746 }
4747 
4748 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
4749 {
4750 	struct i915_power_well *cmn_bc =
4751 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
4752 	struct i915_power_well *cmn_d =
4753 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
4754 
4755 	/*
4756 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
4757 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
4758 	 * instead maintain a shadow copy ourselves. Use the actual
4759 	 * power well state and lane status to reconstruct the
4760 	 * expected initial value.
4761 	 */
4762 	dev_priv->chv_phy_control =
4763 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
4764 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
4765 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
4766 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
4767 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
4768 
4769 	/*
4770 	 * If all lanes are disabled we leave the override disabled
4771 	 * with all power down bits cleared to match the state we
4772 	 * would use after disabling the port. Otherwise enable the
4773 	 * override and set the lane powerdown bits accding to the
4774 	 * current lane status.
4775 	 */
4776 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
4777 		u32 status = I915_READ(DPLL(PIPE_A));
4778 		unsigned int mask;
4779 
4780 		mask = status & DPLL_PORTB_READY_MASK;
4781 		if (mask == 0xf)
4782 			mask = 0x0;
4783 		else
4784 			dev_priv->chv_phy_control |=
4785 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
4786 
4787 		dev_priv->chv_phy_control |=
4788 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
4789 
4790 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
4791 		if (mask == 0xf)
4792 			mask = 0x0;
4793 		else
4794 			dev_priv->chv_phy_control |=
4795 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
4796 
4797 		dev_priv->chv_phy_control |=
4798 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
4799 
4800 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
4801 
4802 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
4803 	} else {
4804 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
4805 	}
4806 
4807 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
4808 		u32 status = I915_READ(DPIO_PHY_STATUS);
4809 		unsigned int mask;
4810 
4811 		mask = status & DPLL_PORTD_READY_MASK;
4812 
4813 		if (mask == 0xf)
4814 			mask = 0x0;
4815 		else
4816 			dev_priv->chv_phy_control |=
4817 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
4818 
4819 		dev_priv->chv_phy_control |=
4820 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
4821 
4822 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
4823 
4824 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
4825 	} else {
4826 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
4827 	}
4828 
4829 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
4830 
4831 	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
4832 		      dev_priv->chv_phy_control);
4833 }
4834 
4835 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
4836 {
4837 	struct i915_power_well *cmn =
4838 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
4839 	struct i915_power_well *disp2d =
4840 		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
4841 
4842 	/* If the display might be already active skip this */
4843 	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
4844 	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
4845 	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
4846 		return;
4847 
4848 	DRM_DEBUG_KMS("toggling display PHY side reset\n");
4849 
4850 	/* cmnlane needs DPLL registers */
4851 	disp2d->desc->ops->enable(dev_priv, disp2d);
4852 
4853 	/*
4854 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
4855 	 * Need to assert and de-assert PHY SB reset by gating the
4856 	 * common lane power, then un-gating it.
4857 	 * Simply ungating isn't enough to reset the PHY enough to get
4858 	 * ports and lanes running.
4859 	 */
4860 	cmn->desc->ops->disable(dev_priv, cmn);
4861 }
4862 
4863 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
4864 {
4865 	bool ret;
4866 
4867 	vlv_punit_get(dev_priv);
4868 	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
4869 	vlv_punit_put(dev_priv);
4870 
4871 	return ret;
4872 }
4873 
4874 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
4875 {
4876 	WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
4877 	     "VED not power gated\n");
4878 }
4879 
4880 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
4881 {
4882 	static const struct pci_device_id isp_ids[] = {
4883 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
4884 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
4885 		{}
4886 	};
4887 
4888 	WARN(!pci_dev_present(isp_ids) &&
4889 	     !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
4890 	     "ISP not power gated\n");
4891 }
4892 
4893 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
4894 
4895 /**
4896  * intel_power_domains_init_hw - initialize hardware power domain state
4897  * @i915: i915 device instance
4898  * @resume: Called from resume code paths or not
4899  *
4900  * This function initializes the hardware power domain state and enables all
4901  * power wells belonging to the INIT power domain. Power wells in other
4902  * domains (and not in the INIT domain) are referenced or disabled by
4903  * intel_modeset_readout_hw_state(). After that the reference count of each
4904  * power well must match its HW enabled state, see
4905  * intel_power_domains_verify_state().
4906  *
4907  * It will return with power domains disabled (to be enabled later by
4908  * intel_power_domains_enable()) and must be paired with
4909  * intel_power_domains_driver_remove().
4910  */
4911 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
4912 {
4913 	struct i915_power_domains *power_domains = &i915->power_domains;
4914 
4915 	power_domains->initializing = true;
4916 
4917 	if (INTEL_GEN(i915) >= 11) {
4918 		icl_display_core_init(i915, resume);
4919 	} else if (IS_CANNONLAKE(i915)) {
4920 		cnl_display_core_init(i915, resume);
4921 	} else if (IS_GEN9_BC(i915)) {
4922 		skl_display_core_init(i915, resume);
4923 	} else if (IS_GEN9_LP(i915)) {
4924 		bxt_display_core_init(i915, resume);
4925 	} else if (IS_CHERRYVIEW(i915)) {
4926 		mutex_lock(&power_domains->lock);
4927 		chv_phy_control_init(i915);
4928 		mutex_unlock(&power_domains->lock);
4929 		assert_isp_power_gated(i915);
4930 	} else if (IS_VALLEYVIEW(i915)) {
4931 		mutex_lock(&power_domains->lock);
4932 		vlv_cmnlane_wa(i915);
4933 		mutex_unlock(&power_domains->lock);
4934 		assert_ved_power_gated(i915);
4935 		assert_isp_power_gated(i915);
4936 	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
4937 		hsw_assert_cdclk(i915);
4938 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4939 	} else if (IS_IVYBRIDGE(i915)) {
4940 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4941 	}
4942 
4943 	/*
4944 	 * Keep all power wells enabled for any dependent HW access during
4945 	 * initialization and to make sure we keep BIOS enabled display HW
4946 	 * resources powered until display HW readout is complete. We drop
4947 	 * this reference in intel_power_domains_enable().
4948 	 */
4949 	power_domains->wakeref =
4950 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
4951 
4952 	/* Disable power support if the user asked so. */
4953 	if (!i915_modparams.disable_power_well)
4954 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
4955 	intel_power_domains_sync_hw(i915);
4956 
4957 	power_domains->initializing = false;
4958 }
4959 
4960 /**
4961  * intel_power_domains_driver_remove - deinitialize hw power domain state
4962  * @i915: i915 device instance
4963  *
4964  * De-initializes the display power domain HW state. It also ensures that the
4965  * device stays powered up so that the driver can be reloaded.
4966  *
4967  * It must be called with power domains already disabled (after a call to
4968  * intel_power_domains_disable()) and must be paired with
4969  * intel_power_domains_init_hw().
4970  */
4971 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
4972 {
4973 	intel_wakeref_t wakeref __maybe_unused =
4974 		fetch_and_zero(&i915->power_domains.wakeref);
4975 
4976 	/* Remove the refcount we took to keep power well support disabled. */
4977 	if (!i915_modparams.disable_power_well)
4978 		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4979 
4980 	intel_display_power_flush_work_sync(i915);
4981 
4982 	intel_power_domains_verify_state(i915);
4983 
4984 	/* Keep the power well enabled, but cancel its rpm wakeref. */
4985 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
4986 }
4987 
4988 /**
4989  * intel_power_domains_enable - enable toggling of display power wells
4990  * @i915: i915 device instance
4991  *
4992  * Enable the ondemand enabling/disabling of the display power wells. Note that
4993  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
4994  * only at specific points of the display modeset sequence, thus they are not
4995  * affected by the intel_power_domains_enable()/disable() calls. The purpose
4996  * of these function is to keep the rest of power wells enabled until the end
4997  * of display HW readout (which will acquire the power references reflecting
4998  * the current HW state).
4999  */
5000 void intel_power_domains_enable(struct drm_i915_private *i915)
5001 {
5002 	intel_wakeref_t wakeref __maybe_unused =
5003 		fetch_and_zero(&i915->power_domains.wakeref);
5004 
5005 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5006 	intel_power_domains_verify_state(i915);
5007 }
5008 
5009 /**
5010  * intel_power_domains_disable - disable toggling of display power wells
5011  * @i915: i915 device instance
5012  *
5013  * Disable the ondemand enabling/disabling of the display power wells. See
5014  * intel_power_domains_enable() for which power wells this call controls.
5015  */
5016 void intel_power_domains_disable(struct drm_i915_private *i915)
5017 {
5018 	struct i915_power_domains *power_domains = &i915->power_domains;
5019 
5020 	WARN_ON(power_domains->wakeref);
5021 	power_domains->wakeref =
5022 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5023 
5024 	intel_power_domains_verify_state(i915);
5025 }
5026 
5027 /**
5028  * intel_power_domains_suspend - suspend power domain state
5029  * @i915: i915 device instance
5030  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
5031  *
5032  * This function prepares the hardware power domain state before entering
5033  * system suspend.
5034  *
5035  * It must be called with power domains already disabled (after a call to
5036  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
5037  */
5038 void intel_power_domains_suspend(struct drm_i915_private *i915,
5039 				 enum i915_drm_suspend_mode suspend_mode)
5040 {
5041 	struct i915_power_domains *power_domains = &i915->power_domains;
5042 	intel_wakeref_t wakeref __maybe_unused =
5043 		fetch_and_zero(&power_domains->wakeref);
5044 
5045 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5046 
5047 	/*
5048 	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
5049 	 * support don't manually deinit the power domains. This also means the
5050 	 * CSR/DMC firmware will stay active, it will power down any HW
5051 	 * resources as required and also enable deeper system power states
5052 	 * that would be blocked if the firmware was inactive.
5053 	 */
5054 	if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
5055 	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
5056 	    i915->csr.dmc_payload) {
5057 		intel_display_power_flush_work(i915);
5058 		intel_power_domains_verify_state(i915);
5059 		return;
5060 	}
5061 
5062 	/*
5063 	 * Even if power well support was disabled we still want to disable
5064 	 * power wells if power domains must be deinitialized for suspend.
5065 	 */
5066 	if (!i915_modparams.disable_power_well)
5067 		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5068 
5069 	intel_display_power_flush_work(i915);
5070 	intel_power_domains_verify_state(i915);
5071 
5072 	if (INTEL_GEN(i915) >= 11)
5073 		icl_display_core_uninit(i915);
5074 	else if (IS_CANNONLAKE(i915))
5075 		cnl_display_core_uninit(i915);
5076 	else if (IS_GEN9_BC(i915))
5077 		skl_display_core_uninit(i915);
5078 	else if (IS_GEN9_LP(i915))
5079 		bxt_display_core_uninit(i915);
5080 
5081 	power_domains->display_core_suspended = true;
5082 }
5083 
5084 /**
5085  * intel_power_domains_resume - resume power domain state
5086  * @i915: i915 device instance
5087  *
5088  * This function resume the hardware power domain state during system resume.
5089  *
5090  * It will return with power domain support disabled (to be enabled later by
5091  * intel_power_domains_enable()) and must be paired with
5092  * intel_power_domains_suspend().
5093  */
5094 void intel_power_domains_resume(struct drm_i915_private *i915)
5095 {
5096 	struct i915_power_domains *power_domains = &i915->power_domains;
5097 
5098 	if (power_domains->display_core_suspended) {
5099 		intel_power_domains_init_hw(i915, true);
5100 		power_domains->display_core_suspended = false;
5101 	} else {
5102 		WARN_ON(power_domains->wakeref);
5103 		power_domains->wakeref =
5104 			intel_display_power_get(i915, POWER_DOMAIN_INIT);
5105 	}
5106 
5107 	intel_power_domains_verify_state(i915);
5108 }
5109 
5110 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5111 
5112 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
5113 {
5114 	struct i915_power_domains *power_domains = &i915->power_domains;
5115 	struct i915_power_well *power_well;
5116 
5117 	for_each_power_well(i915, power_well) {
5118 		enum intel_display_power_domain domain;
5119 
5120 		DRM_DEBUG_DRIVER("%-25s %d\n",
5121 				 power_well->desc->name, power_well->count);
5122 
5123 		for_each_power_domain(domain, power_well->desc->domains)
5124 			DRM_DEBUG_DRIVER("  %-23s %d\n",
5125 					 intel_display_power_domain_str(i915,
5126 									domain),
5127 					 power_domains->domain_use_count[domain]);
5128 	}
5129 }
5130 
5131 /**
5132  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
5133  * @i915: i915 device instance
5134  *
5135  * Verify if the reference count of each power well matches its HW enabled
5136  * state and the total refcount of the domains it belongs to. This must be
5137  * called after modeset HW state sanitization, which is responsible for
5138  * acquiring reference counts for any power wells in use and disabling the
5139  * ones left on by BIOS but not required by any active output.
5140  */
5141 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5142 {
5143 	struct i915_power_domains *power_domains = &i915->power_domains;
5144 	struct i915_power_well *power_well;
5145 	bool dump_domain_info;
5146 
5147 	mutex_lock(&power_domains->lock);
5148 
5149 	verify_async_put_domains_state(power_domains);
5150 
5151 	dump_domain_info = false;
5152 	for_each_power_well(i915, power_well) {
5153 		enum intel_display_power_domain domain;
5154 		int domains_count;
5155 		bool enabled;
5156 
5157 		enabled = power_well->desc->ops->is_enabled(i915, power_well);
5158 		if ((power_well->count || power_well->desc->always_on) !=
5159 		    enabled)
5160 			DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
5161 				  power_well->desc->name,
5162 				  power_well->count, enabled);
5163 
5164 		domains_count = 0;
5165 		for_each_power_domain(domain, power_well->desc->domains)
5166 			domains_count += power_domains->domain_use_count[domain];
5167 
5168 		if (power_well->count != domains_count) {
5169 			DRM_ERROR("power well %s refcount/domain refcount mismatch "
5170 				  "(refcount %d/domains refcount %d)\n",
5171 				  power_well->desc->name, power_well->count,
5172 				  domains_count);
5173 			dump_domain_info = true;
5174 		}
5175 	}
5176 
5177 	if (dump_domain_info) {
5178 		static bool dumped;
5179 
5180 		if (!dumped) {
5181 			intel_power_domains_dump_info(i915);
5182 			dumped = true;
5183 		}
5184 	}
5185 
5186 	mutex_unlock(&power_domains->lock);
5187 }
5188 
5189 #else
5190 
5191 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5192 {
5193 }
5194 
5195 #endif
5196