1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/vgaarb.h>
7 
8 #include "display/intel_crt.h"
9 #include "display/intel_dp.h"
10 
11 #include "i915_drv.h"
12 #include "i915_irq.h"
13 #include "intel_cdclk.h"
14 #include "intel_combo_phy.h"
15 #include "intel_csr.h"
16 #include "intel_dpio_phy.h"
17 #include "intel_drv.h"
18 #include "intel_hotplug.h"
19 #include "intel_sideband.h"
20 
21 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
22 					 enum i915_power_well_id power_well_id);
23 
24 const char *
25 intel_display_power_domain_str(enum intel_display_power_domain domain)
26 {
27 	switch (domain) {
28 	case POWER_DOMAIN_DISPLAY_CORE:
29 		return "DISPLAY_CORE";
30 	case POWER_DOMAIN_PIPE_A:
31 		return "PIPE_A";
32 	case POWER_DOMAIN_PIPE_B:
33 		return "PIPE_B";
34 	case POWER_DOMAIN_PIPE_C:
35 		return "PIPE_C";
36 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
37 		return "PIPE_A_PANEL_FITTER";
38 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
39 		return "PIPE_B_PANEL_FITTER";
40 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
41 		return "PIPE_C_PANEL_FITTER";
42 	case POWER_DOMAIN_TRANSCODER_A:
43 		return "TRANSCODER_A";
44 	case POWER_DOMAIN_TRANSCODER_B:
45 		return "TRANSCODER_B";
46 	case POWER_DOMAIN_TRANSCODER_C:
47 		return "TRANSCODER_C";
48 	case POWER_DOMAIN_TRANSCODER_EDP:
49 		return "TRANSCODER_EDP";
50 	case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
51 		return "TRANSCODER_EDP_VDSC";
52 	case POWER_DOMAIN_TRANSCODER_DSI_A:
53 		return "TRANSCODER_DSI_A";
54 	case POWER_DOMAIN_TRANSCODER_DSI_C:
55 		return "TRANSCODER_DSI_C";
56 	case POWER_DOMAIN_PORT_DDI_A_LANES:
57 		return "PORT_DDI_A_LANES";
58 	case POWER_DOMAIN_PORT_DDI_B_LANES:
59 		return "PORT_DDI_B_LANES";
60 	case POWER_DOMAIN_PORT_DDI_C_LANES:
61 		return "PORT_DDI_C_LANES";
62 	case POWER_DOMAIN_PORT_DDI_D_LANES:
63 		return "PORT_DDI_D_LANES";
64 	case POWER_DOMAIN_PORT_DDI_E_LANES:
65 		return "PORT_DDI_E_LANES";
66 	case POWER_DOMAIN_PORT_DDI_F_LANES:
67 		return "PORT_DDI_F_LANES";
68 	case POWER_DOMAIN_PORT_DDI_A_IO:
69 		return "PORT_DDI_A_IO";
70 	case POWER_DOMAIN_PORT_DDI_B_IO:
71 		return "PORT_DDI_B_IO";
72 	case POWER_DOMAIN_PORT_DDI_C_IO:
73 		return "PORT_DDI_C_IO";
74 	case POWER_DOMAIN_PORT_DDI_D_IO:
75 		return "PORT_DDI_D_IO";
76 	case POWER_DOMAIN_PORT_DDI_E_IO:
77 		return "PORT_DDI_E_IO";
78 	case POWER_DOMAIN_PORT_DDI_F_IO:
79 		return "PORT_DDI_F_IO";
80 	case POWER_DOMAIN_PORT_DSI:
81 		return "PORT_DSI";
82 	case POWER_DOMAIN_PORT_CRT:
83 		return "PORT_CRT";
84 	case POWER_DOMAIN_PORT_OTHER:
85 		return "PORT_OTHER";
86 	case POWER_DOMAIN_VGA:
87 		return "VGA";
88 	case POWER_DOMAIN_AUDIO:
89 		return "AUDIO";
90 	case POWER_DOMAIN_AUX_A:
91 		return "AUX_A";
92 	case POWER_DOMAIN_AUX_B:
93 		return "AUX_B";
94 	case POWER_DOMAIN_AUX_C:
95 		return "AUX_C";
96 	case POWER_DOMAIN_AUX_D:
97 		return "AUX_D";
98 	case POWER_DOMAIN_AUX_E:
99 		return "AUX_E";
100 	case POWER_DOMAIN_AUX_F:
101 		return "AUX_F";
102 	case POWER_DOMAIN_AUX_IO_A:
103 		return "AUX_IO_A";
104 	case POWER_DOMAIN_AUX_TBT1:
105 		return "AUX_TBT1";
106 	case POWER_DOMAIN_AUX_TBT2:
107 		return "AUX_TBT2";
108 	case POWER_DOMAIN_AUX_TBT3:
109 		return "AUX_TBT3";
110 	case POWER_DOMAIN_AUX_TBT4:
111 		return "AUX_TBT4";
112 	case POWER_DOMAIN_GMBUS:
113 		return "GMBUS";
114 	case POWER_DOMAIN_INIT:
115 		return "INIT";
116 	case POWER_DOMAIN_MODESET:
117 		return "MODESET";
118 	case POWER_DOMAIN_GT_IRQ:
119 		return "GT_IRQ";
120 	default:
121 		MISSING_CASE(domain);
122 		return "?";
123 	}
124 }
125 
126 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
127 				    struct i915_power_well *power_well)
128 {
129 	DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
130 	power_well->desc->ops->enable(dev_priv, power_well);
131 	power_well->hw_enabled = true;
132 }
133 
134 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
135 				     struct i915_power_well *power_well)
136 {
137 	DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
138 	power_well->hw_enabled = false;
139 	power_well->desc->ops->disable(dev_priv, power_well);
140 }
141 
142 static void intel_power_well_get(struct drm_i915_private *dev_priv,
143 				 struct i915_power_well *power_well)
144 {
145 	if (!power_well->count++)
146 		intel_power_well_enable(dev_priv, power_well);
147 }
148 
149 static void intel_power_well_put(struct drm_i915_private *dev_priv,
150 				 struct i915_power_well *power_well)
151 {
152 	WARN(!power_well->count, "Use count on power well %s is already zero",
153 	     power_well->desc->name);
154 
155 	if (!--power_well->count)
156 		intel_power_well_disable(dev_priv, power_well);
157 }
158 
159 /**
160  * __intel_display_power_is_enabled - unlocked check for a power domain
161  * @dev_priv: i915 device instance
162  * @domain: power domain to check
163  *
164  * This is the unlocked version of intel_display_power_is_enabled() and should
165  * only be used from error capture and recovery code where deadlocks are
166  * possible.
167  *
168  * Returns:
169  * True when the power domain is enabled, false otherwise.
170  */
171 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
172 				      enum intel_display_power_domain domain)
173 {
174 	struct i915_power_well *power_well;
175 	bool is_enabled;
176 
177 	if (dev_priv->runtime_pm.suspended)
178 		return false;
179 
180 	is_enabled = true;
181 
182 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
183 		if (power_well->desc->always_on)
184 			continue;
185 
186 		if (!power_well->hw_enabled) {
187 			is_enabled = false;
188 			break;
189 		}
190 	}
191 
192 	return is_enabled;
193 }
194 
195 /**
196  * intel_display_power_is_enabled - check for a power domain
197  * @dev_priv: i915 device instance
198  * @domain: power domain to check
199  *
200  * This function can be used to check the hw power domain state. It is mostly
201  * used in hardware state readout functions. Everywhere else code should rely
202  * upon explicit power domain reference counting to ensure that the hardware
203  * block is powered up before accessing it.
204  *
205  * Callers must hold the relevant modesetting locks to ensure that concurrent
206  * threads can't disable the power well while the caller tries to read a few
207  * registers.
208  *
209  * Returns:
210  * True when the power domain is enabled, false otherwise.
211  */
212 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
213 				    enum intel_display_power_domain domain)
214 {
215 	struct i915_power_domains *power_domains;
216 	bool ret;
217 
218 	power_domains = &dev_priv->power_domains;
219 
220 	mutex_lock(&power_domains->lock);
221 	ret = __intel_display_power_is_enabled(dev_priv, domain);
222 	mutex_unlock(&power_domains->lock);
223 
224 	return ret;
225 }
226 
227 /*
228  * Starting with Haswell, we have a "Power Down Well" that can be turned off
229  * when not needed anymore. We have 4 registers that can request the power well
230  * to be enabled, and it will only be disabled if none of the registers is
231  * requesting it to be enabled.
232  */
233 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
234 				       u8 irq_pipe_mask, bool has_vga)
235 {
236 	struct pci_dev *pdev = dev_priv->drm.pdev;
237 
238 	/*
239 	 * After we re-enable the power well, if we touch VGA register 0x3d5
240 	 * we'll get unclaimed register interrupts. This stops after we write
241 	 * anything to the VGA MSR register. The vgacon module uses this
242 	 * register all the time, so if we unbind our driver and, as a
243 	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
244 	 * console_unlock(). So make here we touch the VGA MSR register, making
245 	 * sure vgacon can keep working normally without triggering interrupts
246 	 * and error messages.
247 	 */
248 	if (has_vga) {
249 		vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
250 		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
251 		vga_put(pdev, VGA_RSRC_LEGACY_IO);
252 	}
253 
254 	if (irq_pipe_mask)
255 		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
256 }
257 
258 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
259 				       u8 irq_pipe_mask)
260 {
261 	if (irq_pipe_mask)
262 		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
263 }
264 
265 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
266 					   struct i915_power_well *power_well)
267 {
268 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
269 	int pw_idx = power_well->desc->hsw.idx;
270 
271 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
272 	WARN_ON(intel_wait_for_register(&dev_priv->uncore,
273 					regs->driver,
274 					HSW_PWR_WELL_CTL_STATE(pw_idx),
275 					HSW_PWR_WELL_CTL_STATE(pw_idx),
276 					1));
277 }
278 
279 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
280 				     const struct i915_power_well_regs *regs,
281 				     int pw_idx)
282 {
283 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
284 	u32 ret;
285 
286 	ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
287 	ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
288 	if (regs->kvmr.reg)
289 		ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
290 	ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
291 
292 	return ret;
293 }
294 
295 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
296 					    struct i915_power_well *power_well)
297 {
298 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
299 	int pw_idx = power_well->desc->hsw.idx;
300 	bool disabled;
301 	u32 reqs;
302 
303 	/*
304 	 * Bspec doesn't require waiting for PWs to get disabled, but still do
305 	 * this for paranoia. The known cases where a PW will be forced on:
306 	 * - a KVMR request on any power well via the KVMR request register
307 	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
308 	 *   DEBUG request registers
309 	 * Skip the wait in case any of the request bits are set and print a
310 	 * diagnostic message.
311 	 */
312 	wait_for((disabled = !(I915_READ(regs->driver) &
313 			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
314 		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
315 	if (disabled)
316 		return;
317 
318 	DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
319 		      power_well->desc->name,
320 		      !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
321 }
322 
323 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
324 					   enum skl_power_gate pg)
325 {
326 	/* Timeout 5us for PG#0, for other PGs 1us */
327 	WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
328 					SKL_FUSE_PG_DIST_STATUS(pg),
329 					SKL_FUSE_PG_DIST_STATUS(pg), 1));
330 }
331 
332 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
333 				  struct i915_power_well *power_well)
334 {
335 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
336 	int pw_idx = power_well->desc->hsw.idx;
337 	bool wait_fuses = power_well->desc->hsw.has_fuses;
338 	enum skl_power_gate uninitialized_var(pg);
339 	u32 val;
340 
341 	if (wait_fuses) {
342 		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
343 						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
344 		/*
345 		 * For PW1 we have to wait both for the PW0/PG0 fuse state
346 		 * before enabling the power well and PW1/PG1's own fuse
347 		 * state after the enabling. For all other power wells with
348 		 * fuses we only have to wait for that PW/PG's fuse state
349 		 * after the enabling.
350 		 */
351 		if (pg == SKL_PG1)
352 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
353 	}
354 
355 	val = I915_READ(regs->driver);
356 	I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
357 	hsw_wait_for_power_well_enable(dev_priv, power_well);
358 
359 	/* Display WA #1178: cnl */
360 	if (IS_CANNONLAKE(dev_priv) &&
361 	    pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
362 	    pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
363 		val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
364 		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
365 		I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
366 	}
367 
368 	if (wait_fuses)
369 		gen9_wait_for_power_well_fuses(dev_priv, pg);
370 
371 	hsw_power_well_post_enable(dev_priv,
372 				   power_well->desc->hsw.irq_pipe_mask,
373 				   power_well->desc->hsw.has_vga);
374 }
375 
376 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
377 				   struct i915_power_well *power_well)
378 {
379 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
380 	int pw_idx = power_well->desc->hsw.idx;
381 	u32 val;
382 
383 	hsw_power_well_pre_disable(dev_priv,
384 				   power_well->desc->hsw.irq_pipe_mask);
385 
386 	val = I915_READ(regs->driver);
387 	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
388 	hsw_wait_for_power_well_disable(dev_priv, power_well);
389 }
390 
391 #define ICL_AUX_PW_TO_PORT(pw_idx)	((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
392 
393 static void
394 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
395 				    struct i915_power_well *power_well)
396 {
397 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
398 	int pw_idx = power_well->desc->hsw.idx;
399 	enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
400 	u32 val;
401 
402 	val = I915_READ(regs->driver);
403 	I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
404 
405 	val = I915_READ(ICL_PORT_CL_DW12(port));
406 	I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
407 
408 	hsw_wait_for_power_well_enable(dev_priv, power_well);
409 
410 	/* Display WA #1178: icl */
411 	if (IS_ICELAKE(dev_priv) &&
412 	    pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
413 	    !intel_bios_is_port_edp(dev_priv, port)) {
414 		val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
415 		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
416 		I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
417 	}
418 }
419 
420 static void
421 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
422 				     struct i915_power_well *power_well)
423 {
424 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
425 	int pw_idx = power_well->desc->hsw.idx;
426 	enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
427 	u32 val;
428 
429 	val = I915_READ(ICL_PORT_CL_DW12(port));
430 	I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
431 
432 	val = I915_READ(regs->driver);
433 	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
434 
435 	hsw_wait_for_power_well_disable(dev_priv, power_well);
436 }
437 
438 #define ICL_AUX_PW_TO_CH(pw_idx)	\
439 	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
440 
441 static void
442 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
443 				 struct i915_power_well *power_well)
444 {
445 	enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
446 	u32 val;
447 
448 	val = I915_READ(DP_AUX_CH_CTL(aux_ch));
449 	val &= ~DP_AUX_CH_CTL_TBT_IO;
450 	if (power_well->desc->hsw.is_tc_tbt)
451 		val |= DP_AUX_CH_CTL_TBT_IO;
452 	I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
453 
454 	hsw_power_well_enable(dev_priv, power_well);
455 }
456 
457 /*
458  * We should only use the power well if we explicitly asked the hardware to
459  * enable it, so check if it's enabled and also check if we've requested it to
460  * be enabled.
461  */
462 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
463 				   struct i915_power_well *power_well)
464 {
465 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
466 	enum i915_power_well_id id = power_well->desc->id;
467 	int pw_idx = power_well->desc->hsw.idx;
468 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
469 		   HSW_PWR_WELL_CTL_STATE(pw_idx);
470 	u32 val;
471 
472 	val = I915_READ(regs->driver);
473 
474 	/*
475 	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
476 	 * and the MISC_IO PW will be not restored, so check instead for the
477 	 * BIOS's own request bits, which are forced-on for these power wells
478 	 * when exiting DC5/6.
479 	 */
480 	if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
481 	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
482 		val |= I915_READ(regs->bios);
483 
484 	return (val & mask) == mask;
485 }
486 
487 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
488 {
489 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
490 		  "DC9 already programmed to be enabled.\n");
491 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
492 		  "DC5 still not disabled to enable DC9.\n");
493 	WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
494 		  HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
495 		  "Power well 2 on.\n");
496 	WARN_ONCE(intel_irqs_enabled(dev_priv),
497 		  "Interrupts not disabled yet.\n");
498 
499 	 /*
500 	  * TODO: check for the following to verify the conditions to enter DC9
501 	  * state are satisfied:
502 	  * 1] Check relevant display engine registers to verify if mode set
503 	  * disable sequence was followed.
504 	  * 2] Check if display uninitialize sequence is initialized.
505 	  */
506 }
507 
508 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
509 {
510 	WARN_ONCE(intel_irqs_enabled(dev_priv),
511 		  "Interrupts not disabled yet.\n");
512 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
513 		  "DC5 still not disabled.\n");
514 
515 	 /*
516 	  * TODO: check for the following to verify DC9 state was indeed
517 	  * entered before programming to disable it:
518 	  * 1] Check relevant display engine registers to verify if mode
519 	  *  set disable sequence was followed.
520 	  * 2] Check if display uninitialize sequence is initialized.
521 	  */
522 }
523 
524 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
525 				u32 state)
526 {
527 	int rewrites = 0;
528 	int rereads = 0;
529 	u32 v;
530 
531 	I915_WRITE(DC_STATE_EN, state);
532 
533 	/* It has been observed that disabling the dc6 state sometimes
534 	 * doesn't stick and dmc keeps returning old value. Make sure
535 	 * the write really sticks enough times and also force rewrite until
536 	 * we are confident that state is exactly what we want.
537 	 */
538 	do  {
539 		v = I915_READ(DC_STATE_EN);
540 
541 		if (v != state) {
542 			I915_WRITE(DC_STATE_EN, state);
543 			rewrites++;
544 			rereads = 0;
545 		} else if (rereads++ > 5) {
546 			break;
547 		}
548 
549 	} while (rewrites < 100);
550 
551 	if (v != state)
552 		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
553 			  state, v);
554 
555 	/* Most of the times we need one retry, avoid spam */
556 	if (rewrites > 1)
557 		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
558 			      state, rewrites);
559 }
560 
561 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
562 {
563 	u32 mask;
564 
565 	mask = DC_STATE_EN_UPTO_DC5;
566 	if (INTEL_GEN(dev_priv) >= 11)
567 		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
568 	else if (IS_GEN9_LP(dev_priv))
569 		mask |= DC_STATE_EN_DC9;
570 	else
571 		mask |= DC_STATE_EN_UPTO_DC6;
572 
573 	return mask;
574 }
575 
576 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
577 {
578 	u32 val;
579 
580 	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
581 
582 	DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
583 		      dev_priv->csr.dc_state, val);
584 	dev_priv->csr.dc_state = val;
585 }
586 
587 /**
588  * gen9_set_dc_state - set target display C power state
589  * @dev_priv: i915 device instance
590  * @state: target DC power state
591  * - DC_STATE_DISABLE
592  * - DC_STATE_EN_UPTO_DC5
593  * - DC_STATE_EN_UPTO_DC6
594  * - DC_STATE_EN_DC9
595  *
596  * Signal to DMC firmware/HW the target DC power state passed in @state.
597  * DMC/HW can turn off individual display clocks and power rails when entering
598  * a deeper DC power state (higher in number) and turns these back when exiting
599  * that state to a shallower power state (lower in number). The HW will decide
600  * when to actually enter a given state on an on-demand basis, for instance
601  * depending on the active state of display pipes. The state of display
602  * registers backed by affected power rails are saved/restored as needed.
603  *
604  * Based on the above enabling a deeper DC power state is asynchronous wrt.
605  * enabling it. Disabling a deeper power state is synchronous: for instance
606  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
607  * back on and register state is restored. This is guaranteed by the MMIO write
608  * to DC_STATE_EN blocking until the state is restored.
609  */
610 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
611 {
612 	u32 val;
613 	u32 mask;
614 
615 	if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
616 		state &= dev_priv->csr.allowed_dc_mask;
617 
618 	val = I915_READ(DC_STATE_EN);
619 	mask = gen9_dc_mask(dev_priv);
620 	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
621 		      val & mask, state);
622 
623 	/* Check if DMC is ignoring our DC state requests */
624 	if ((val & mask) != dev_priv->csr.dc_state)
625 		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
626 			  dev_priv->csr.dc_state, val & mask);
627 
628 	val &= ~mask;
629 	val |= state;
630 
631 	gen9_write_dc_state(dev_priv, val);
632 
633 	dev_priv->csr.dc_state = val & mask;
634 }
635 
636 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
637 {
638 	assert_can_enable_dc9(dev_priv);
639 
640 	DRM_DEBUG_KMS("Enabling DC9\n");
641 	/*
642 	 * Power sequencer reset is not needed on
643 	 * platforms with South Display Engine on PCH,
644 	 * because PPS registers are always on.
645 	 */
646 	if (!HAS_PCH_SPLIT(dev_priv))
647 		intel_power_sequencer_reset(dev_priv);
648 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
649 }
650 
651 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
652 {
653 	assert_can_disable_dc9(dev_priv);
654 
655 	DRM_DEBUG_KMS("Disabling DC9\n");
656 
657 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
658 
659 	intel_pps_unlock_regs_wa(dev_priv);
660 }
661 
662 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
663 {
664 	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
665 		  "CSR program storage start is NULL\n");
666 	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
667 	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
668 }
669 
670 static struct i915_power_well *
671 lookup_power_well(struct drm_i915_private *dev_priv,
672 		  enum i915_power_well_id power_well_id)
673 {
674 	struct i915_power_well *power_well;
675 
676 	for_each_power_well(dev_priv, power_well)
677 		if (power_well->desc->id == power_well_id)
678 			return power_well;
679 
680 	/*
681 	 * It's not feasible to add error checking code to the callers since
682 	 * this condition really shouldn't happen and it doesn't even make sense
683 	 * to abort things like display initialization sequences. Just return
684 	 * the first power well and hope the WARN gets reported so we can fix
685 	 * our driver.
686 	 */
687 	WARN(1, "Power well %d not defined for this platform\n", power_well_id);
688 	return &dev_priv->power_domains.power_wells[0];
689 }
690 
691 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
692 {
693 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
694 					SKL_DISP_PW_2);
695 
696 	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
697 
698 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
699 		  "DC5 already programmed to be enabled.\n");
700 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
701 
702 	assert_csr_loaded(dev_priv);
703 }
704 
705 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
706 {
707 	assert_can_enable_dc5(dev_priv);
708 
709 	DRM_DEBUG_KMS("Enabling DC5\n");
710 
711 	/* Wa Display #1183: skl,kbl,cfl */
712 	if (IS_GEN9_BC(dev_priv))
713 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
714 			   SKL_SELECT_ALTERNATE_DC_EXIT);
715 
716 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
717 }
718 
719 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
720 {
721 	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
722 		  "Backlight is not disabled.\n");
723 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
724 		  "DC6 already programmed to be enabled.\n");
725 
726 	assert_csr_loaded(dev_priv);
727 }
728 
729 void skl_enable_dc6(struct drm_i915_private *dev_priv)
730 {
731 	assert_can_enable_dc6(dev_priv);
732 
733 	DRM_DEBUG_KMS("Enabling DC6\n");
734 
735 	/* Wa Display #1183: skl,kbl,cfl */
736 	if (IS_GEN9_BC(dev_priv))
737 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
738 			   SKL_SELECT_ALTERNATE_DC_EXIT);
739 
740 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
741 }
742 
743 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
744 				   struct i915_power_well *power_well)
745 {
746 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
747 	int pw_idx = power_well->desc->hsw.idx;
748 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
749 	u32 bios_req = I915_READ(regs->bios);
750 
751 	/* Take over the request bit if set by BIOS. */
752 	if (bios_req & mask) {
753 		u32 drv_req = I915_READ(regs->driver);
754 
755 		if (!(drv_req & mask))
756 			I915_WRITE(regs->driver, drv_req | mask);
757 		I915_WRITE(regs->bios, bios_req & ~mask);
758 	}
759 }
760 
761 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
762 					   struct i915_power_well *power_well)
763 {
764 	bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
765 }
766 
767 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
768 					    struct i915_power_well *power_well)
769 {
770 	bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
771 }
772 
773 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
774 					    struct i915_power_well *power_well)
775 {
776 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
777 }
778 
779 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
780 {
781 	struct i915_power_well *power_well;
782 
783 	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
784 	if (power_well->count > 0)
785 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
786 
787 	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
788 	if (power_well->count > 0)
789 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
790 
791 	if (IS_GEMINILAKE(dev_priv)) {
792 		power_well = lookup_power_well(dev_priv,
793 					       GLK_DISP_PW_DPIO_CMN_C);
794 		if (power_well->count > 0)
795 			bxt_ddi_phy_verify_state(dev_priv,
796 						 power_well->desc->bxt.phy);
797 	}
798 }
799 
800 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
801 					   struct i915_power_well *power_well)
802 {
803 	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
804 }
805 
806 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
807 {
808 	u32 tmp = I915_READ(DBUF_CTL);
809 
810 	WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
811 	     (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
812 	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
813 }
814 
815 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
816 					  struct i915_power_well *power_well)
817 {
818 	struct intel_cdclk_state cdclk_state = {};
819 
820 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
821 
822 	dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
823 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
824 	WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
825 
826 	gen9_assert_dbuf_enabled(dev_priv);
827 
828 	if (IS_GEN9_LP(dev_priv))
829 		bxt_verify_ddi_phy_power_wells(dev_priv);
830 
831 	if (INTEL_GEN(dev_priv) >= 11)
832 		/*
833 		 * DMC retains HW context only for port A, the other combo
834 		 * PHY's HW context for port B is lost after DC transitions,
835 		 * so we need to restore it manually.
836 		 */
837 		intel_combo_phy_init(dev_priv);
838 }
839 
840 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
841 					   struct i915_power_well *power_well)
842 {
843 	if (!dev_priv->csr.dmc_payload)
844 		return;
845 
846 	if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
847 		skl_enable_dc6(dev_priv);
848 	else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
849 		gen9_enable_dc5(dev_priv);
850 }
851 
852 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
853 					 struct i915_power_well *power_well)
854 {
855 }
856 
857 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
858 					   struct i915_power_well *power_well)
859 {
860 }
861 
862 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
863 					     struct i915_power_well *power_well)
864 {
865 	return true;
866 }
867 
868 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
869 					 struct i915_power_well *power_well)
870 {
871 	if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
872 		i830_enable_pipe(dev_priv, PIPE_A);
873 	if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
874 		i830_enable_pipe(dev_priv, PIPE_B);
875 }
876 
877 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
878 					  struct i915_power_well *power_well)
879 {
880 	i830_disable_pipe(dev_priv, PIPE_B);
881 	i830_disable_pipe(dev_priv, PIPE_A);
882 }
883 
884 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
885 					  struct i915_power_well *power_well)
886 {
887 	return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
888 		I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
889 }
890 
891 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
892 					  struct i915_power_well *power_well)
893 {
894 	if (power_well->count > 0)
895 		i830_pipes_power_well_enable(dev_priv, power_well);
896 	else
897 		i830_pipes_power_well_disable(dev_priv, power_well);
898 }
899 
900 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
901 			       struct i915_power_well *power_well, bool enable)
902 {
903 	int pw_idx = power_well->desc->vlv.idx;
904 	u32 mask;
905 	u32 state;
906 	u32 ctrl;
907 
908 	mask = PUNIT_PWRGT_MASK(pw_idx);
909 	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
910 			 PUNIT_PWRGT_PWR_GATE(pw_idx);
911 
912 	vlv_punit_get(dev_priv);
913 
914 #define COND \
915 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
916 
917 	if (COND)
918 		goto out;
919 
920 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
921 	ctrl &= ~mask;
922 	ctrl |= state;
923 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
924 
925 	if (wait_for(COND, 100))
926 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
927 			  state,
928 			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
929 
930 #undef COND
931 
932 out:
933 	vlv_punit_put(dev_priv);
934 }
935 
936 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
937 				  struct i915_power_well *power_well)
938 {
939 	vlv_set_power_well(dev_priv, power_well, true);
940 }
941 
942 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
943 				   struct i915_power_well *power_well)
944 {
945 	vlv_set_power_well(dev_priv, power_well, false);
946 }
947 
948 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
949 				   struct i915_power_well *power_well)
950 {
951 	int pw_idx = power_well->desc->vlv.idx;
952 	bool enabled = false;
953 	u32 mask;
954 	u32 state;
955 	u32 ctrl;
956 
957 	mask = PUNIT_PWRGT_MASK(pw_idx);
958 	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
959 
960 	vlv_punit_get(dev_priv);
961 
962 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
963 	/*
964 	 * We only ever set the power-on and power-gate states, anything
965 	 * else is unexpected.
966 	 */
967 	WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
968 		state != PUNIT_PWRGT_PWR_GATE(pw_idx));
969 	if (state == ctrl)
970 		enabled = true;
971 
972 	/*
973 	 * A transient state at this point would mean some unexpected party
974 	 * is poking at the power controls too.
975 	 */
976 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
977 	WARN_ON(ctrl != state);
978 
979 	vlv_punit_put(dev_priv);
980 
981 	return enabled;
982 }
983 
984 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
985 {
986 	u32 val;
987 
988 	/*
989 	 * On driver load, a pipe may be active and driving a DSI display.
990 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
991 	 * (and never recovering) in this case. intel_dsi_post_disable() will
992 	 * clear it when we turn off the display.
993 	 */
994 	val = I915_READ(DSPCLK_GATE_D);
995 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
996 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
997 	I915_WRITE(DSPCLK_GATE_D, val);
998 
999 	/*
1000 	 * Disable trickle feed and enable pnd deadline calculation
1001 	 */
1002 	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1003 	I915_WRITE(CBR1_VLV, 0);
1004 
1005 	WARN_ON(dev_priv->rawclk_freq == 0);
1006 
1007 	I915_WRITE(RAWCLK_FREQ_VLV,
1008 		   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1009 }
1010 
1011 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1012 {
1013 	struct intel_encoder *encoder;
1014 	enum pipe pipe;
1015 
1016 	/*
1017 	 * Enable the CRI clock source so we can get at the
1018 	 * display and the reference clock for VGA
1019 	 * hotplug / manual detection. Supposedly DSI also
1020 	 * needs the ref clock up and running.
1021 	 *
1022 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1023 	 */
1024 	for_each_pipe(dev_priv, pipe) {
1025 		u32 val = I915_READ(DPLL(pipe));
1026 
1027 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1028 		if (pipe != PIPE_A)
1029 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1030 
1031 		I915_WRITE(DPLL(pipe), val);
1032 	}
1033 
1034 	vlv_init_display_clock_gating(dev_priv);
1035 
1036 	spin_lock_irq(&dev_priv->irq_lock);
1037 	valleyview_enable_display_irqs(dev_priv);
1038 	spin_unlock_irq(&dev_priv->irq_lock);
1039 
1040 	/*
1041 	 * During driver initialization/resume we can avoid restoring the
1042 	 * part of the HW/SW state that will be inited anyway explicitly.
1043 	 */
1044 	if (dev_priv->power_domains.initializing)
1045 		return;
1046 
1047 	intel_hpd_init(dev_priv);
1048 
1049 	/* Re-enable the ADPA, if we have one */
1050 	for_each_intel_encoder(&dev_priv->drm, encoder) {
1051 		if (encoder->type == INTEL_OUTPUT_ANALOG)
1052 			intel_crt_reset(&encoder->base);
1053 	}
1054 
1055 	i915_redisable_vga_power_on(dev_priv);
1056 
1057 	intel_pps_unlock_regs_wa(dev_priv);
1058 }
1059 
1060 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1061 {
1062 	spin_lock_irq(&dev_priv->irq_lock);
1063 	valleyview_disable_display_irqs(dev_priv);
1064 	spin_unlock_irq(&dev_priv->irq_lock);
1065 
1066 	/* make sure we're done processing display irqs */
1067 	synchronize_irq(dev_priv->drm.irq);
1068 
1069 	intel_power_sequencer_reset(dev_priv);
1070 
1071 	/* Prevent us from re-enabling polling on accident in late suspend */
1072 	if (!dev_priv->drm.dev->power.is_suspended)
1073 		intel_hpd_poll_init(dev_priv);
1074 }
1075 
1076 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1077 					  struct i915_power_well *power_well)
1078 {
1079 	vlv_set_power_well(dev_priv, power_well, true);
1080 
1081 	vlv_display_power_well_init(dev_priv);
1082 }
1083 
1084 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1085 					   struct i915_power_well *power_well)
1086 {
1087 	vlv_display_power_well_deinit(dev_priv);
1088 
1089 	vlv_set_power_well(dev_priv, power_well, false);
1090 }
1091 
1092 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1093 					   struct i915_power_well *power_well)
1094 {
1095 	/* since ref/cri clock was enabled */
1096 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1097 
1098 	vlv_set_power_well(dev_priv, power_well, true);
1099 
1100 	/*
1101 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1102 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1103 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1104 	 *   b.	The other bits such as sfr settings / modesel may all
1105 	 *	be set to 0.
1106 	 *
1107 	 * This should only be done on init and resume from S3 with
1108 	 * both PLLs disabled, or we risk losing DPIO and PLL
1109 	 * synchronization.
1110 	 */
1111 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1112 }
1113 
1114 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1115 					    struct i915_power_well *power_well)
1116 {
1117 	enum pipe pipe;
1118 
1119 	for_each_pipe(dev_priv, pipe)
1120 		assert_pll_disabled(dev_priv, pipe);
1121 
1122 	/* Assert common reset */
1123 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1124 
1125 	vlv_set_power_well(dev_priv, power_well, false);
1126 }
1127 
1128 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1129 
1130 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1131 
1132 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1133 {
1134 	struct i915_power_well *cmn_bc =
1135 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1136 	struct i915_power_well *cmn_d =
1137 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1138 	u32 phy_control = dev_priv->chv_phy_control;
1139 	u32 phy_status = 0;
1140 	u32 phy_status_mask = 0xffffffff;
1141 
1142 	/*
1143 	 * The BIOS can leave the PHY is some weird state
1144 	 * where it doesn't fully power down some parts.
1145 	 * Disable the asserts until the PHY has been fully
1146 	 * reset (ie. the power well has been disabled at
1147 	 * least once).
1148 	 */
1149 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1150 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1151 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1152 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1153 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1154 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1155 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1156 
1157 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1158 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1159 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1160 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1161 
1162 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1163 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1164 
1165 		/* this assumes override is only used to enable lanes */
1166 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1167 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1168 
1169 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1170 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1171 
1172 		/* CL1 is on whenever anything is on in either channel */
1173 		if (BITS_SET(phy_control,
1174 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1175 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1176 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1177 
1178 		/*
1179 		 * The DPLLB check accounts for the pipe B + port A usage
1180 		 * with CL2 powered up but all the lanes in the second channel
1181 		 * powered down.
1182 		 */
1183 		if (BITS_SET(phy_control,
1184 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1185 		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1186 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1187 
1188 		if (BITS_SET(phy_control,
1189 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1190 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1191 		if (BITS_SET(phy_control,
1192 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1193 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1194 
1195 		if (BITS_SET(phy_control,
1196 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1197 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1198 		if (BITS_SET(phy_control,
1199 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1200 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1201 	}
1202 
1203 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1204 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1205 
1206 		/* this assumes override is only used to enable lanes */
1207 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1208 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1209 
1210 		if (BITS_SET(phy_control,
1211 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1212 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1213 
1214 		if (BITS_SET(phy_control,
1215 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1216 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1217 		if (BITS_SET(phy_control,
1218 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1219 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1220 	}
1221 
1222 	phy_status &= phy_status_mask;
1223 
1224 	/*
1225 	 * The PHY may be busy with some initial calibration and whatnot,
1226 	 * so the power state can take a while to actually change.
1227 	 */
1228 	if (intel_wait_for_register(&dev_priv->uncore,
1229 				    DISPLAY_PHY_STATUS,
1230 				    phy_status_mask,
1231 				    phy_status,
1232 				    10))
1233 		DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1234 			  I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1235 			   phy_status, dev_priv->chv_phy_control);
1236 }
1237 
1238 #undef BITS_SET
1239 
1240 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1241 					   struct i915_power_well *power_well)
1242 {
1243 	enum dpio_phy phy;
1244 	enum pipe pipe;
1245 	u32 tmp;
1246 
1247 	WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1248 		     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1249 
1250 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1251 		pipe = PIPE_A;
1252 		phy = DPIO_PHY0;
1253 	} else {
1254 		pipe = PIPE_C;
1255 		phy = DPIO_PHY1;
1256 	}
1257 
1258 	/* since ref/cri clock was enabled */
1259 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1260 	vlv_set_power_well(dev_priv, power_well, true);
1261 
1262 	/* Poll for phypwrgood signal */
1263 	if (intel_wait_for_register(&dev_priv->uncore,
1264 				    DISPLAY_PHY_STATUS,
1265 				    PHY_POWERGOOD(phy),
1266 				    PHY_POWERGOOD(phy),
1267 				    1))
1268 		DRM_ERROR("Display PHY %d is not power up\n", phy);
1269 
1270 	vlv_dpio_get(dev_priv);
1271 
1272 	/* Enable dynamic power down */
1273 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1274 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1275 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1276 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1277 
1278 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1279 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1280 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1281 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1282 	} else {
1283 		/*
1284 		 * Force the non-existing CL2 off. BXT does this
1285 		 * too, so maybe it saves some power even though
1286 		 * CL2 doesn't exist?
1287 		 */
1288 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1289 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1290 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1291 	}
1292 
1293 	vlv_dpio_put(dev_priv);
1294 
1295 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1296 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1297 
1298 	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1299 		      phy, dev_priv->chv_phy_control);
1300 
1301 	assert_chv_phy_status(dev_priv);
1302 }
1303 
1304 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1305 					    struct i915_power_well *power_well)
1306 {
1307 	enum dpio_phy phy;
1308 
1309 	WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1310 		     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1311 
1312 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1313 		phy = DPIO_PHY0;
1314 		assert_pll_disabled(dev_priv, PIPE_A);
1315 		assert_pll_disabled(dev_priv, PIPE_B);
1316 	} else {
1317 		phy = DPIO_PHY1;
1318 		assert_pll_disabled(dev_priv, PIPE_C);
1319 	}
1320 
1321 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1322 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1323 
1324 	vlv_set_power_well(dev_priv, power_well, false);
1325 
1326 	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1327 		      phy, dev_priv->chv_phy_control);
1328 
1329 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1330 	dev_priv->chv_phy_assert[phy] = true;
1331 
1332 	assert_chv_phy_status(dev_priv);
1333 }
1334 
1335 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1336 				     enum dpio_channel ch, bool override, unsigned int mask)
1337 {
1338 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1339 	u32 reg, val, expected, actual;
1340 
1341 	/*
1342 	 * The BIOS can leave the PHY is some weird state
1343 	 * where it doesn't fully power down some parts.
1344 	 * Disable the asserts until the PHY has been fully
1345 	 * reset (ie. the power well has been disabled at
1346 	 * least once).
1347 	 */
1348 	if (!dev_priv->chv_phy_assert[phy])
1349 		return;
1350 
1351 	if (ch == DPIO_CH0)
1352 		reg = _CHV_CMN_DW0_CH0;
1353 	else
1354 		reg = _CHV_CMN_DW6_CH1;
1355 
1356 	vlv_dpio_get(dev_priv);
1357 	val = vlv_dpio_read(dev_priv, pipe, reg);
1358 	vlv_dpio_put(dev_priv);
1359 
1360 	/*
1361 	 * This assumes !override is only used when the port is disabled.
1362 	 * All lanes should power down even without the override when
1363 	 * the port is disabled.
1364 	 */
1365 	if (!override || mask == 0xf) {
1366 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1367 		/*
1368 		 * If CH1 common lane is not active anymore
1369 		 * (eg. for pipe B DPLL) the entire channel will
1370 		 * shut down, which causes the common lane registers
1371 		 * to read as 0. That means we can't actually check
1372 		 * the lane power down status bits, but as the entire
1373 		 * register reads as 0 it's a good indication that the
1374 		 * channel is indeed entirely powered down.
1375 		 */
1376 		if (ch == DPIO_CH1 && val == 0)
1377 			expected = 0;
1378 	} else if (mask != 0x0) {
1379 		expected = DPIO_ANYDL_POWERDOWN;
1380 	} else {
1381 		expected = 0;
1382 	}
1383 
1384 	if (ch == DPIO_CH0)
1385 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1386 	else
1387 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1388 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1389 
1390 	WARN(actual != expected,
1391 	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1392 	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1393 	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1394 	     reg, val);
1395 }
1396 
1397 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1398 			  enum dpio_channel ch, bool override)
1399 {
1400 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1401 	bool was_override;
1402 
1403 	mutex_lock(&power_domains->lock);
1404 
1405 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1406 
1407 	if (override == was_override)
1408 		goto out;
1409 
1410 	if (override)
1411 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1412 	else
1413 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1414 
1415 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1416 
1417 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1418 		      phy, ch, dev_priv->chv_phy_control);
1419 
1420 	assert_chv_phy_status(dev_priv);
1421 
1422 out:
1423 	mutex_unlock(&power_domains->lock);
1424 
1425 	return was_override;
1426 }
1427 
1428 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1429 			     bool override, unsigned int mask)
1430 {
1431 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1432 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1433 	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1434 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1435 
1436 	mutex_lock(&power_domains->lock);
1437 
1438 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1439 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1440 
1441 	if (override)
1442 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1443 	else
1444 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1445 
1446 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1447 
1448 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1449 		      phy, ch, mask, dev_priv->chv_phy_control);
1450 
1451 	assert_chv_phy_status(dev_priv);
1452 
1453 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1454 
1455 	mutex_unlock(&power_domains->lock);
1456 }
1457 
1458 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1459 					struct i915_power_well *power_well)
1460 {
1461 	enum pipe pipe = PIPE_A;
1462 	bool enabled;
1463 	u32 state, ctrl;
1464 
1465 	vlv_punit_get(dev_priv);
1466 
1467 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1468 	/*
1469 	 * We only ever set the power-on and power-gate states, anything
1470 	 * else is unexpected.
1471 	 */
1472 	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1473 	enabled = state == DP_SSS_PWR_ON(pipe);
1474 
1475 	/*
1476 	 * A transient state at this point would mean some unexpected party
1477 	 * is poking at the power controls too.
1478 	 */
1479 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1480 	WARN_ON(ctrl << 16 != state);
1481 
1482 	vlv_punit_put(dev_priv);
1483 
1484 	return enabled;
1485 }
1486 
1487 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1488 				    struct i915_power_well *power_well,
1489 				    bool enable)
1490 {
1491 	enum pipe pipe = PIPE_A;
1492 	u32 state;
1493 	u32 ctrl;
1494 
1495 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1496 
1497 	vlv_punit_get(dev_priv);
1498 
1499 #define COND \
1500 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1501 
1502 	if (COND)
1503 		goto out;
1504 
1505 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1506 	ctrl &= ~DP_SSC_MASK(pipe);
1507 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1508 	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1509 
1510 	if (wait_for(COND, 100))
1511 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1512 			  state,
1513 			  vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1514 
1515 #undef COND
1516 
1517 out:
1518 	vlv_punit_put(dev_priv);
1519 }
1520 
1521 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1522 				       struct i915_power_well *power_well)
1523 {
1524 	chv_set_pipe_power_well(dev_priv, power_well, true);
1525 
1526 	vlv_display_power_well_init(dev_priv);
1527 }
1528 
1529 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1530 					struct i915_power_well *power_well)
1531 {
1532 	vlv_display_power_well_deinit(dev_priv);
1533 
1534 	chv_set_pipe_power_well(dev_priv, power_well, false);
1535 }
1536 
1537 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1538 {
1539 	return power_domains->async_put_domains[0] |
1540 	       power_domains->async_put_domains[1];
1541 }
1542 
1543 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1544 
1545 static bool
1546 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1547 {
1548 	return !WARN_ON(power_domains->async_put_domains[0] &
1549 			power_domains->async_put_domains[1]);
1550 }
1551 
1552 static bool
1553 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1554 {
1555 	enum intel_display_power_domain domain;
1556 	bool err = false;
1557 
1558 	err |= !assert_async_put_domain_masks_disjoint(power_domains);
1559 	err |= WARN_ON(!!power_domains->async_put_wakeref !=
1560 		       !!__async_put_domains_mask(power_domains));
1561 
1562 	for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1563 		err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1564 
1565 	return !err;
1566 }
1567 
1568 static void print_power_domains(struct i915_power_domains *power_domains,
1569 				const char *prefix, u64 mask)
1570 {
1571 	enum intel_display_power_domain domain;
1572 
1573 	DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
1574 	for_each_power_domain(domain, mask)
1575 		DRM_DEBUG_DRIVER("%s use_count %d\n",
1576 				 intel_display_power_domain_str(domain),
1577 				 power_domains->domain_use_count[domain]);
1578 }
1579 
1580 static void
1581 print_async_put_domains_state(struct i915_power_domains *power_domains)
1582 {
1583 	DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
1584 			 power_domains->async_put_wakeref);
1585 
1586 	print_power_domains(power_domains, "async_put_domains[0]",
1587 			    power_domains->async_put_domains[0]);
1588 	print_power_domains(power_domains, "async_put_domains[1]",
1589 			    power_domains->async_put_domains[1]);
1590 }
1591 
1592 static void
1593 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1594 {
1595 	if (!__async_put_domains_state_ok(power_domains))
1596 		print_async_put_domains_state(power_domains);
1597 }
1598 
1599 #else
1600 
1601 static void
1602 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1603 {
1604 }
1605 
1606 static void
1607 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1608 {
1609 }
1610 
1611 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
1612 
1613 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
1614 {
1615 	assert_async_put_domain_masks_disjoint(power_domains);
1616 
1617 	return __async_put_domains_mask(power_domains);
1618 }
1619 
1620 static void
1621 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
1622 			       enum intel_display_power_domain domain)
1623 {
1624 	assert_async_put_domain_masks_disjoint(power_domains);
1625 
1626 	power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
1627 	power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
1628 }
1629 
1630 static bool
1631 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
1632 				       enum intel_display_power_domain domain)
1633 {
1634 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1635 	bool ret = false;
1636 
1637 	if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
1638 		goto out_verify;
1639 
1640 	async_put_domains_clear_domain(power_domains, domain);
1641 
1642 	ret = true;
1643 
1644 	if (async_put_domains_mask(power_domains))
1645 		goto out_verify;
1646 
1647 	cancel_delayed_work(&power_domains->async_put_work);
1648 	intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
1649 				 fetch_and_zero(&power_domains->async_put_wakeref));
1650 out_verify:
1651 	verify_async_put_domains_state(power_domains);
1652 
1653 	return ret;
1654 }
1655 
1656 static void
1657 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1658 				 enum intel_display_power_domain domain)
1659 {
1660 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1661 	struct i915_power_well *power_well;
1662 
1663 	if (intel_display_power_grab_async_put_ref(dev_priv, domain))
1664 		return;
1665 
1666 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1667 		intel_power_well_get(dev_priv, power_well);
1668 
1669 	power_domains->domain_use_count[domain]++;
1670 }
1671 
1672 /**
1673  * intel_display_power_get - grab a power domain reference
1674  * @dev_priv: i915 device instance
1675  * @domain: power domain to reference
1676  *
1677  * This function grabs a power domain reference for @domain and ensures that the
1678  * power domain and all its parents are powered up. Therefore users should only
1679  * grab a reference to the innermost power domain they need.
1680  *
1681  * Any power domain reference obtained by this function must have a symmetric
1682  * call to intel_display_power_put() to release the reference again.
1683  */
1684 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
1685 					enum intel_display_power_domain domain)
1686 {
1687 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1688 	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1689 
1690 	mutex_lock(&power_domains->lock);
1691 	__intel_display_power_get_domain(dev_priv, domain);
1692 	mutex_unlock(&power_domains->lock);
1693 
1694 	return wakeref;
1695 }
1696 
1697 /**
1698  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1699  * @dev_priv: i915 device instance
1700  * @domain: power domain to reference
1701  *
1702  * This function grabs a power domain reference for @domain and ensures that the
1703  * power domain and all its parents are powered up. Therefore users should only
1704  * grab a reference to the innermost power domain they need.
1705  *
1706  * Any power domain reference obtained by this function must have a symmetric
1707  * call to intel_display_power_put() to release the reference again.
1708  */
1709 intel_wakeref_t
1710 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1711 				   enum intel_display_power_domain domain)
1712 {
1713 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1714 	intel_wakeref_t wakeref;
1715 	bool is_enabled;
1716 
1717 	wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
1718 	if (!wakeref)
1719 		return false;
1720 
1721 	mutex_lock(&power_domains->lock);
1722 
1723 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
1724 		__intel_display_power_get_domain(dev_priv, domain);
1725 		is_enabled = true;
1726 	} else {
1727 		is_enabled = false;
1728 	}
1729 
1730 	mutex_unlock(&power_domains->lock);
1731 
1732 	if (!is_enabled) {
1733 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1734 		wakeref = 0;
1735 	}
1736 
1737 	return wakeref;
1738 }
1739 
1740 static void
1741 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
1742 				 enum intel_display_power_domain domain)
1743 {
1744 	struct i915_power_domains *power_domains;
1745 	struct i915_power_well *power_well;
1746 	const char *name = intel_display_power_domain_str(domain);
1747 
1748 	power_domains = &dev_priv->power_domains;
1749 
1750 	WARN(!power_domains->domain_use_count[domain],
1751 	     "Use count on domain %s is already zero\n",
1752 	     name);
1753 	WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
1754 	     "Async disabling of domain %s is pending\n",
1755 	     name);
1756 
1757 	power_domains->domain_use_count[domain]--;
1758 
1759 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
1760 		intel_power_well_put(dev_priv, power_well);
1761 }
1762 
1763 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
1764 				      enum intel_display_power_domain domain)
1765 {
1766 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1767 
1768 	mutex_lock(&power_domains->lock);
1769 	__intel_display_power_put_domain(dev_priv, domain);
1770 	mutex_unlock(&power_domains->lock);
1771 }
1772 
1773 /**
1774  * intel_display_power_put_unchecked - release an unchecked power domain reference
1775  * @dev_priv: i915 device instance
1776  * @domain: power domain to reference
1777  *
1778  * This function drops the power domain reference obtained by
1779  * intel_display_power_get() and might power down the corresponding hardware
1780  * block right away if this is the last reference.
1781  *
1782  * This function exists only for historical reasons and should be avoided in
1783  * new code, as the correctness of its use cannot be checked. Always use
1784  * intel_display_power_put() instead.
1785  */
1786 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
1787 				       enum intel_display_power_domain domain)
1788 {
1789 	__intel_display_power_put(dev_priv, domain);
1790 	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
1791 }
1792 
1793 static void
1794 queue_async_put_domains_work(struct i915_power_domains *power_domains,
1795 			     intel_wakeref_t wakeref)
1796 {
1797 	WARN_ON(power_domains->async_put_wakeref);
1798 	power_domains->async_put_wakeref = wakeref;
1799 	WARN_ON(!queue_delayed_work(system_unbound_wq,
1800 				    &power_domains->async_put_work,
1801 				    msecs_to_jiffies(100)));
1802 }
1803 
1804 static void
1805 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
1806 {
1807 	struct drm_i915_private *dev_priv =
1808 		container_of(power_domains, struct drm_i915_private,
1809 			     power_domains);
1810 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1811 	enum intel_display_power_domain domain;
1812 	intel_wakeref_t wakeref;
1813 
1814 	/*
1815 	 * The caller must hold already raw wakeref, upgrade that to a proper
1816 	 * wakeref to make the state checker happy about the HW access during
1817 	 * power well disabling.
1818 	 */
1819 	assert_rpm_raw_wakeref_held(rpm);
1820 	wakeref = intel_runtime_pm_get(rpm);
1821 
1822 	for_each_power_domain(domain, mask) {
1823 		/* Clear before put, so put's sanity check is happy. */
1824 		async_put_domains_clear_domain(power_domains, domain);
1825 		__intel_display_power_put_domain(dev_priv, domain);
1826 	}
1827 
1828 	intel_runtime_pm_put(rpm, wakeref);
1829 }
1830 
1831 static void
1832 intel_display_power_put_async_work(struct work_struct *work)
1833 {
1834 	struct drm_i915_private *dev_priv =
1835 		container_of(work, struct drm_i915_private,
1836 			     power_domains.async_put_work.work);
1837 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1838 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1839 	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
1840 	intel_wakeref_t old_work_wakeref = 0;
1841 
1842 	mutex_lock(&power_domains->lock);
1843 
1844 	/*
1845 	 * Bail out if all the domain refs pending to be released were grabbed
1846 	 * by subsequent gets or a flush_work.
1847 	 */
1848 	old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
1849 	if (!old_work_wakeref)
1850 		goto out_verify;
1851 
1852 	release_async_put_domains(power_domains,
1853 				  power_domains->async_put_domains[0]);
1854 
1855 	/* Requeue the work if more domains were async put meanwhile. */
1856 	if (power_domains->async_put_domains[1]) {
1857 		power_domains->async_put_domains[0] =
1858 			fetch_and_zero(&power_domains->async_put_domains[1]);
1859 		queue_async_put_domains_work(power_domains,
1860 					     fetch_and_zero(&new_work_wakeref));
1861 	}
1862 
1863 out_verify:
1864 	verify_async_put_domains_state(power_domains);
1865 
1866 	mutex_unlock(&power_domains->lock);
1867 
1868 	if (old_work_wakeref)
1869 		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
1870 	if (new_work_wakeref)
1871 		intel_runtime_pm_put_raw(rpm, new_work_wakeref);
1872 }
1873 
1874 /**
1875  * intel_display_power_put_async - release a power domain reference asynchronously
1876  * @i915: i915 device instance
1877  * @domain: power domain to reference
1878  * @wakeref: wakeref acquired for the reference that is being released
1879  *
1880  * This function drops the power domain reference obtained by
1881  * intel_display_power_get*() and schedules a work to power down the
1882  * corresponding hardware block if this is the last reference.
1883  */
1884 void __intel_display_power_put_async(struct drm_i915_private *i915,
1885 				     enum intel_display_power_domain domain,
1886 				     intel_wakeref_t wakeref)
1887 {
1888 	struct i915_power_domains *power_domains = &i915->power_domains;
1889 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
1890 	intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
1891 
1892 	mutex_lock(&power_domains->lock);
1893 
1894 	if (power_domains->domain_use_count[domain] > 1) {
1895 		__intel_display_power_put_domain(i915, domain);
1896 
1897 		goto out_verify;
1898 	}
1899 
1900 	WARN_ON(power_domains->domain_use_count[domain] != 1);
1901 
1902 	/* Let a pending work requeue itself or queue a new one. */
1903 	if (power_domains->async_put_wakeref) {
1904 		power_domains->async_put_domains[1] |= BIT_ULL(domain);
1905 	} else {
1906 		power_domains->async_put_domains[0] |= BIT_ULL(domain);
1907 		queue_async_put_domains_work(power_domains,
1908 					     fetch_and_zero(&work_wakeref));
1909 	}
1910 
1911 out_verify:
1912 	verify_async_put_domains_state(power_domains);
1913 
1914 	mutex_unlock(&power_domains->lock);
1915 
1916 	if (work_wakeref)
1917 		intel_runtime_pm_put_raw(rpm, work_wakeref);
1918 
1919 	intel_runtime_pm_put(rpm, wakeref);
1920 }
1921 
1922 /**
1923  * intel_display_power_flush_work - flushes the async display power disabling work
1924  * @i915: i915 device instance
1925  *
1926  * Flushes any pending work that was scheduled by a preceding
1927  * intel_display_power_put_async() call, completing the disabling of the
1928  * corresponding power domains.
1929  *
1930  * Note that the work handler function may still be running after this
1931  * function returns; to ensure that the work handler isn't running use
1932  * intel_display_power_flush_work_sync() instead.
1933  */
1934 void intel_display_power_flush_work(struct drm_i915_private *i915)
1935 {
1936 	struct i915_power_domains *power_domains = &i915->power_domains;
1937 	intel_wakeref_t work_wakeref;
1938 
1939 	mutex_lock(&power_domains->lock);
1940 
1941 	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
1942 	if (!work_wakeref)
1943 		goto out_verify;
1944 
1945 	release_async_put_domains(power_domains,
1946 				  async_put_domains_mask(power_domains));
1947 	cancel_delayed_work(&power_domains->async_put_work);
1948 
1949 out_verify:
1950 	verify_async_put_domains_state(power_domains);
1951 
1952 	mutex_unlock(&power_domains->lock);
1953 
1954 	if (work_wakeref)
1955 		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
1956 }
1957 
1958 /**
1959  * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
1960  * @i915: i915 device instance
1961  *
1962  * Like intel_display_power_flush_work(), but also ensure that the work
1963  * handler function is not running any more when this function returns.
1964  */
1965 static void
1966 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
1967 {
1968 	struct i915_power_domains *power_domains = &i915->power_domains;
1969 
1970 	intel_display_power_flush_work(i915);
1971 	cancel_delayed_work_sync(&power_domains->async_put_work);
1972 
1973 	verify_async_put_domains_state(power_domains);
1974 
1975 	WARN_ON(power_domains->async_put_wakeref);
1976 }
1977 
1978 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1979 /**
1980  * intel_display_power_put - release a power domain reference
1981  * @dev_priv: i915 device instance
1982  * @domain: power domain to reference
1983  * @wakeref: wakeref acquired for the reference that is being released
1984  *
1985  * This function drops the power domain reference obtained by
1986  * intel_display_power_get() and might power down the corresponding hardware
1987  * block right away if this is the last reference.
1988  */
1989 void intel_display_power_put(struct drm_i915_private *dev_priv,
1990 			     enum intel_display_power_domain domain,
1991 			     intel_wakeref_t wakeref)
1992 {
1993 	__intel_display_power_put(dev_priv, domain);
1994 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1995 }
1996 #endif
1997 
1998 #define I830_PIPES_POWER_DOMAINS (		\
1999 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2000 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2001 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2002 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2003 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2004 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2005 	BIT_ULL(POWER_DOMAIN_INIT))
2006 
2007 #define VLV_DISPLAY_POWER_DOMAINS (		\
2008 	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2009 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2010 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2011 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2012 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2013 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2014 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2015 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2016 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2017 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2018 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2019 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2020 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2021 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2022 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2023 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2024 	BIT_ULL(POWER_DOMAIN_INIT))
2025 
2026 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
2027 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2028 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2029 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2030 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2031 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2032 	BIT_ULL(POWER_DOMAIN_INIT))
2033 
2034 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
2035 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2036 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2037 	BIT_ULL(POWER_DOMAIN_INIT))
2038 
2039 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
2040 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2041 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2042 	BIT_ULL(POWER_DOMAIN_INIT))
2043 
2044 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
2045 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2046 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2047 	BIT_ULL(POWER_DOMAIN_INIT))
2048 
2049 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
2050 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2051 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2052 	BIT_ULL(POWER_DOMAIN_INIT))
2053 
2054 #define CHV_DISPLAY_POWER_DOMAINS (		\
2055 	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2056 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2057 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2058 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
2059 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2060 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2061 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2062 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2063 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2064 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
2065 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2066 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2067 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2068 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2069 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2070 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2071 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2072 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2073 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2074 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2075 	BIT_ULL(POWER_DOMAIN_INIT))
2076 
2077 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
2078 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2079 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2080 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2081 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2082 	BIT_ULL(POWER_DOMAIN_INIT))
2083 
2084 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
2085 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2086 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2087 	BIT_ULL(POWER_DOMAIN_INIT))
2088 
2089 #define HSW_DISPLAY_POWER_DOMAINS (			\
2090 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2091 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2092 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
2093 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2094 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2095 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2096 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2097 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2098 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2099 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2100 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2101 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2102 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2103 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2104 	BIT_ULL(POWER_DOMAIN_INIT))
2105 
2106 #define BDW_DISPLAY_POWER_DOMAINS (			\
2107 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2108 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2109 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2110 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2111 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2112 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2113 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2114 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2115 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2116 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2117 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2118 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2119 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2120 	BIT_ULL(POWER_DOMAIN_INIT))
2121 
2122 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2123 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2124 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2125 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2126 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2127 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2128 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2129 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2130 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2131 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2132 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2133 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
2134 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2135 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2136 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2137 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2138 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2139 	BIT_ULL(POWER_DOMAIN_INIT))
2140 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
2141 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2142 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2143 	BIT_ULL(POWER_DOMAIN_INIT))
2144 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2145 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2146 	BIT_ULL(POWER_DOMAIN_INIT))
2147 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2148 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2149 	BIT_ULL(POWER_DOMAIN_INIT))
2150 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
2151 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2152 	BIT_ULL(POWER_DOMAIN_INIT))
2153 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2154 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2155 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2156 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2157 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2158 	BIT_ULL(POWER_DOMAIN_INIT))
2159 
2160 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2161 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2162 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2163 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2164 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2165 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2166 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2167 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2168 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2169 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2170 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2171 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2172 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2173 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2174 	BIT_ULL(POWER_DOMAIN_INIT))
2175 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2176 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2177 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2178 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2179 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2180 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2181 	BIT_ULL(POWER_DOMAIN_INIT))
2182 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
2183 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2184 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2185 	BIT_ULL(POWER_DOMAIN_INIT))
2186 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
2187 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2188 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2189 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2190 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2191 	BIT_ULL(POWER_DOMAIN_INIT))
2192 
2193 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2194 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2195 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2196 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2197 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2198 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2199 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2200 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2201 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2202 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2203 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2204 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2205 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2206 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2207 	BIT_ULL(POWER_DOMAIN_INIT))
2208 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
2209 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2210 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2211 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2212 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2213 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2214 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
2215 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2216 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2217 	BIT_ULL(POWER_DOMAIN_INIT))
2218 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
2219 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2220 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2221 	BIT_ULL(POWER_DOMAIN_INIT))
2222 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
2223 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2224 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2225 	BIT_ULL(POWER_DOMAIN_INIT))
2226 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
2227 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
2228 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2229 	BIT_ULL(POWER_DOMAIN_INIT))
2230 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
2231 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2232 	BIT_ULL(POWER_DOMAIN_INIT))
2233 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
2234 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2235 	BIT_ULL(POWER_DOMAIN_INIT))
2236 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2237 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2238 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2239 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2240 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2241 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2242 	BIT_ULL(POWER_DOMAIN_INIT))
2243 
2244 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2245 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2246 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2247 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2248 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2249 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2250 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2251 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2252 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2253 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2254 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2255 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
2256 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2257 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2258 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2259 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2260 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2261 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2262 	BIT_ULL(POWER_DOMAIN_INIT))
2263 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
2264 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2265 	BIT_ULL(POWER_DOMAIN_INIT))
2266 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
2267 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2268 	BIT_ULL(POWER_DOMAIN_INIT))
2269 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
2270 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2271 	BIT_ULL(POWER_DOMAIN_INIT))
2272 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
2273 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2274 	BIT_ULL(POWER_DOMAIN_INIT))
2275 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
2276 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2277 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2278 	BIT_ULL(POWER_DOMAIN_INIT))
2279 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
2280 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2281 	BIT_ULL(POWER_DOMAIN_INIT))
2282 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
2283 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2284 	BIT_ULL(POWER_DOMAIN_INIT))
2285 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
2286 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2287 	BIT_ULL(POWER_DOMAIN_INIT))
2288 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
2289 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2290 	BIT_ULL(POWER_DOMAIN_INIT))
2291 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
2292 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2293 	BIT_ULL(POWER_DOMAIN_INIT))
2294 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2295 	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2296 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2297 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2298 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2299 	BIT_ULL(POWER_DOMAIN_INIT))
2300 
2301 /*
2302  * ICL PW_0/PG_0 domains (HW/DMC control):
2303  * - PCI
2304  * - clocks except port PLL
2305  * - central power except FBC
2306  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2307  * ICL PW_1/PG_1 domains (HW/DMC control):
2308  * - DBUF function
2309  * - PIPE_A and its planes, except VGA
2310  * - transcoder EDP + PSR
2311  * - transcoder DSI
2312  * - DDI_A
2313  * - FBC
2314  */
2315 #define ICL_PW_4_POWER_DOMAINS (			\
2316 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2317 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2318 	BIT_ULL(POWER_DOMAIN_INIT))
2319 	/* VDSC/joining */
2320 #define ICL_PW_3_POWER_DOMAINS (			\
2321 	ICL_PW_4_POWER_DOMAINS |			\
2322 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2323 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2324 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2325 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2326 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2327 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2328 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2329 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2330 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2331 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2332 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2333 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2334 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2335 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2336 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2337 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2338 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2339 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2340 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2341 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2342 	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |		\
2343 	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |		\
2344 	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |		\
2345 	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |		\
2346 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2347 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2348 	BIT_ULL(POWER_DOMAIN_INIT))
2349 	/*
2350 	 * - transcoder WD
2351 	 * - KVMR (HW control)
2352 	 */
2353 #define ICL_PW_2_POWER_DOMAINS (			\
2354 	ICL_PW_3_POWER_DOMAINS |			\
2355 	BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) |		\
2356 	BIT_ULL(POWER_DOMAIN_INIT))
2357 	/*
2358 	 * - KVMR (HW control)
2359 	 */
2360 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2361 	ICL_PW_2_POWER_DOMAINS |			\
2362 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2363 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2364 	BIT_ULL(POWER_DOMAIN_INIT))
2365 
2366 #define ICL_DDI_IO_A_POWER_DOMAINS (			\
2367 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2368 #define ICL_DDI_IO_B_POWER_DOMAINS (			\
2369 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2370 #define ICL_DDI_IO_C_POWER_DOMAINS (			\
2371 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2372 #define ICL_DDI_IO_D_POWER_DOMAINS (			\
2373 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2374 #define ICL_DDI_IO_E_POWER_DOMAINS (			\
2375 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2376 #define ICL_DDI_IO_F_POWER_DOMAINS (			\
2377 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2378 
2379 #define ICL_AUX_A_IO_POWER_DOMAINS (			\
2380 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2381 	BIT_ULL(POWER_DOMAIN_AUX_A))
2382 #define ICL_AUX_B_IO_POWER_DOMAINS (			\
2383 	BIT_ULL(POWER_DOMAIN_AUX_B))
2384 #define ICL_AUX_C_IO_POWER_DOMAINS (			\
2385 	BIT_ULL(POWER_DOMAIN_AUX_C))
2386 #define ICL_AUX_D_IO_POWER_DOMAINS (			\
2387 	BIT_ULL(POWER_DOMAIN_AUX_D))
2388 #define ICL_AUX_E_IO_POWER_DOMAINS (			\
2389 	BIT_ULL(POWER_DOMAIN_AUX_E))
2390 #define ICL_AUX_F_IO_POWER_DOMAINS (			\
2391 	BIT_ULL(POWER_DOMAIN_AUX_F))
2392 #define ICL_AUX_TBT1_IO_POWER_DOMAINS (			\
2393 	BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2394 #define ICL_AUX_TBT2_IO_POWER_DOMAINS (			\
2395 	BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2396 #define ICL_AUX_TBT3_IO_POWER_DOMAINS (			\
2397 	BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2398 #define ICL_AUX_TBT4_IO_POWER_DOMAINS (			\
2399 	BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2400 
2401 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2402 	.sync_hw = i9xx_power_well_sync_hw_noop,
2403 	.enable = i9xx_always_on_power_well_noop,
2404 	.disable = i9xx_always_on_power_well_noop,
2405 	.is_enabled = i9xx_always_on_power_well_enabled,
2406 };
2407 
2408 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2409 	.sync_hw = i9xx_power_well_sync_hw_noop,
2410 	.enable = chv_pipe_power_well_enable,
2411 	.disable = chv_pipe_power_well_disable,
2412 	.is_enabled = chv_pipe_power_well_enabled,
2413 };
2414 
2415 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2416 	.sync_hw = i9xx_power_well_sync_hw_noop,
2417 	.enable = chv_dpio_cmn_power_well_enable,
2418 	.disable = chv_dpio_cmn_power_well_disable,
2419 	.is_enabled = vlv_power_well_enabled,
2420 };
2421 
2422 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2423 	{
2424 		.name = "always-on",
2425 		.always_on = true,
2426 		.domains = POWER_DOMAIN_MASK,
2427 		.ops = &i9xx_always_on_power_well_ops,
2428 		.id = DISP_PW_ID_NONE,
2429 	},
2430 };
2431 
2432 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2433 	.sync_hw = i830_pipes_power_well_sync_hw,
2434 	.enable = i830_pipes_power_well_enable,
2435 	.disable = i830_pipes_power_well_disable,
2436 	.is_enabled = i830_pipes_power_well_enabled,
2437 };
2438 
2439 static const struct i915_power_well_desc i830_power_wells[] = {
2440 	{
2441 		.name = "always-on",
2442 		.always_on = true,
2443 		.domains = POWER_DOMAIN_MASK,
2444 		.ops = &i9xx_always_on_power_well_ops,
2445 		.id = DISP_PW_ID_NONE,
2446 	},
2447 	{
2448 		.name = "pipes",
2449 		.domains = I830_PIPES_POWER_DOMAINS,
2450 		.ops = &i830_pipes_power_well_ops,
2451 		.id = DISP_PW_ID_NONE,
2452 	},
2453 };
2454 
2455 static const struct i915_power_well_ops hsw_power_well_ops = {
2456 	.sync_hw = hsw_power_well_sync_hw,
2457 	.enable = hsw_power_well_enable,
2458 	.disable = hsw_power_well_disable,
2459 	.is_enabled = hsw_power_well_enabled,
2460 };
2461 
2462 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2463 	.sync_hw = i9xx_power_well_sync_hw_noop,
2464 	.enable = gen9_dc_off_power_well_enable,
2465 	.disable = gen9_dc_off_power_well_disable,
2466 	.is_enabled = gen9_dc_off_power_well_enabled,
2467 };
2468 
2469 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2470 	.sync_hw = i9xx_power_well_sync_hw_noop,
2471 	.enable = bxt_dpio_cmn_power_well_enable,
2472 	.disable = bxt_dpio_cmn_power_well_disable,
2473 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
2474 };
2475 
2476 static const struct i915_power_well_regs hsw_power_well_regs = {
2477 	.bios	= HSW_PWR_WELL_CTL1,
2478 	.driver	= HSW_PWR_WELL_CTL2,
2479 	.kvmr	= HSW_PWR_WELL_CTL3,
2480 	.debug	= HSW_PWR_WELL_CTL4,
2481 };
2482 
2483 static const struct i915_power_well_desc hsw_power_wells[] = {
2484 	{
2485 		.name = "always-on",
2486 		.always_on = true,
2487 		.domains = POWER_DOMAIN_MASK,
2488 		.ops = &i9xx_always_on_power_well_ops,
2489 		.id = DISP_PW_ID_NONE,
2490 	},
2491 	{
2492 		.name = "display",
2493 		.domains = HSW_DISPLAY_POWER_DOMAINS,
2494 		.ops = &hsw_power_well_ops,
2495 		.id = HSW_DISP_PW_GLOBAL,
2496 		{
2497 			.hsw.regs = &hsw_power_well_regs,
2498 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2499 			.hsw.has_vga = true,
2500 		},
2501 	},
2502 };
2503 
2504 static const struct i915_power_well_desc bdw_power_wells[] = {
2505 	{
2506 		.name = "always-on",
2507 		.always_on = true,
2508 		.domains = POWER_DOMAIN_MASK,
2509 		.ops = &i9xx_always_on_power_well_ops,
2510 		.id = DISP_PW_ID_NONE,
2511 	},
2512 	{
2513 		.name = "display",
2514 		.domains = BDW_DISPLAY_POWER_DOMAINS,
2515 		.ops = &hsw_power_well_ops,
2516 		.id = HSW_DISP_PW_GLOBAL,
2517 		{
2518 			.hsw.regs = &hsw_power_well_regs,
2519 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2520 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2521 			.hsw.has_vga = true,
2522 		},
2523 	},
2524 };
2525 
2526 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2527 	.sync_hw = i9xx_power_well_sync_hw_noop,
2528 	.enable = vlv_display_power_well_enable,
2529 	.disable = vlv_display_power_well_disable,
2530 	.is_enabled = vlv_power_well_enabled,
2531 };
2532 
2533 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2534 	.sync_hw = i9xx_power_well_sync_hw_noop,
2535 	.enable = vlv_dpio_cmn_power_well_enable,
2536 	.disable = vlv_dpio_cmn_power_well_disable,
2537 	.is_enabled = vlv_power_well_enabled,
2538 };
2539 
2540 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2541 	.sync_hw = i9xx_power_well_sync_hw_noop,
2542 	.enable = vlv_power_well_enable,
2543 	.disable = vlv_power_well_disable,
2544 	.is_enabled = vlv_power_well_enabled,
2545 };
2546 
2547 static const struct i915_power_well_desc vlv_power_wells[] = {
2548 	{
2549 		.name = "always-on",
2550 		.always_on = true,
2551 		.domains = POWER_DOMAIN_MASK,
2552 		.ops = &i9xx_always_on_power_well_ops,
2553 		.id = DISP_PW_ID_NONE,
2554 	},
2555 	{
2556 		.name = "display",
2557 		.domains = VLV_DISPLAY_POWER_DOMAINS,
2558 		.ops = &vlv_display_power_well_ops,
2559 		.id = VLV_DISP_PW_DISP2D,
2560 		{
2561 			.vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2562 		},
2563 	},
2564 	{
2565 		.name = "dpio-tx-b-01",
2566 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2567 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2568 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2569 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2570 		.ops = &vlv_dpio_power_well_ops,
2571 		.id = DISP_PW_ID_NONE,
2572 		{
2573 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2574 		},
2575 	},
2576 	{
2577 		.name = "dpio-tx-b-23",
2578 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2579 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2580 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2581 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2582 		.ops = &vlv_dpio_power_well_ops,
2583 		.id = DISP_PW_ID_NONE,
2584 		{
2585 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2586 		},
2587 	},
2588 	{
2589 		.name = "dpio-tx-c-01",
2590 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2591 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2592 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2593 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2594 		.ops = &vlv_dpio_power_well_ops,
2595 		.id = DISP_PW_ID_NONE,
2596 		{
2597 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2598 		},
2599 	},
2600 	{
2601 		.name = "dpio-tx-c-23",
2602 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2603 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2604 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2605 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2606 		.ops = &vlv_dpio_power_well_ops,
2607 		.id = DISP_PW_ID_NONE,
2608 		{
2609 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2610 		},
2611 	},
2612 	{
2613 		.name = "dpio-common",
2614 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2615 		.ops = &vlv_dpio_cmn_power_well_ops,
2616 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2617 		{
2618 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2619 		},
2620 	},
2621 };
2622 
2623 static const struct i915_power_well_desc chv_power_wells[] = {
2624 	{
2625 		.name = "always-on",
2626 		.always_on = true,
2627 		.domains = POWER_DOMAIN_MASK,
2628 		.ops = &i9xx_always_on_power_well_ops,
2629 		.id = DISP_PW_ID_NONE,
2630 	},
2631 	{
2632 		.name = "display",
2633 		/*
2634 		 * Pipe A power well is the new disp2d well. Pipe B and C
2635 		 * power wells don't actually exist. Pipe A power well is
2636 		 * required for any pipe to work.
2637 		 */
2638 		.domains = CHV_DISPLAY_POWER_DOMAINS,
2639 		.ops = &chv_pipe_power_well_ops,
2640 		.id = DISP_PW_ID_NONE,
2641 	},
2642 	{
2643 		.name = "dpio-common-bc",
2644 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2645 		.ops = &chv_dpio_cmn_power_well_ops,
2646 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2647 		{
2648 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2649 		},
2650 	},
2651 	{
2652 		.name = "dpio-common-d",
2653 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2654 		.ops = &chv_dpio_cmn_power_well_ops,
2655 		.id = CHV_DISP_PW_DPIO_CMN_D,
2656 		{
2657 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
2658 		},
2659 	},
2660 };
2661 
2662 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2663 					 enum i915_power_well_id power_well_id)
2664 {
2665 	struct i915_power_well *power_well;
2666 	bool ret;
2667 
2668 	power_well = lookup_power_well(dev_priv, power_well_id);
2669 	ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
2670 
2671 	return ret;
2672 }
2673 
2674 static const struct i915_power_well_desc skl_power_wells[] = {
2675 	{
2676 		.name = "always-on",
2677 		.always_on = true,
2678 		.domains = POWER_DOMAIN_MASK,
2679 		.ops = &i9xx_always_on_power_well_ops,
2680 		.id = DISP_PW_ID_NONE,
2681 	},
2682 	{
2683 		.name = "power well 1",
2684 		/* Handled by the DMC firmware */
2685 		.always_on = true,
2686 		.domains = 0,
2687 		.ops = &hsw_power_well_ops,
2688 		.id = SKL_DISP_PW_1,
2689 		{
2690 			.hsw.regs = &hsw_power_well_regs,
2691 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2692 			.hsw.has_fuses = true,
2693 		},
2694 	},
2695 	{
2696 		.name = "MISC IO power well",
2697 		/* Handled by the DMC firmware */
2698 		.always_on = true,
2699 		.domains = 0,
2700 		.ops = &hsw_power_well_ops,
2701 		.id = SKL_DISP_PW_MISC_IO,
2702 		{
2703 			.hsw.regs = &hsw_power_well_regs,
2704 			.hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
2705 		},
2706 	},
2707 	{
2708 		.name = "DC off",
2709 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2710 		.ops = &gen9_dc_off_power_well_ops,
2711 		.id = DISP_PW_ID_NONE,
2712 	},
2713 	{
2714 		.name = "power well 2",
2715 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2716 		.ops = &hsw_power_well_ops,
2717 		.id = SKL_DISP_PW_2,
2718 		{
2719 			.hsw.regs = &hsw_power_well_regs,
2720 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
2721 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2722 			.hsw.has_vga = true,
2723 			.hsw.has_fuses = true,
2724 		},
2725 	},
2726 	{
2727 		.name = "DDI A/E IO power well",
2728 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2729 		.ops = &hsw_power_well_ops,
2730 		.id = DISP_PW_ID_NONE,
2731 		{
2732 			.hsw.regs = &hsw_power_well_regs,
2733 			.hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
2734 		},
2735 	},
2736 	{
2737 		.name = "DDI B IO power well",
2738 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2739 		.ops = &hsw_power_well_ops,
2740 		.id = DISP_PW_ID_NONE,
2741 		{
2742 			.hsw.regs = &hsw_power_well_regs,
2743 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2744 		},
2745 	},
2746 	{
2747 		.name = "DDI C IO power well",
2748 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2749 		.ops = &hsw_power_well_ops,
2750 		.id = DISP_PW_ID_NONE,
2751 		{
2752 			.hsw.regs = &hsw_power_well_regs,
2753 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2754 		},
2755 	},
2756 	{
2757 		.name = "DDI D IO power well",
2758 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2759 		.ops = &hsw_power_well_ops,
2760 		.id = DISP_PW_ID_NONE,
2761 		{
2762 			.hsw.regs = &hsw_power_well_regs,
2763 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
2764 		},
2765 	},
2766 };
2767 
2768 static const struct i915_power_well_desc bxt_power_wells[] = {
2769 	{
2770 		.name = "always-on",
2771 		.always_on = true,
2772 		.domains = POWER_DOMAIN_MASK,
2773 		.ops = &i9xx_always_on_power_well_ops,
2774 		.id = DISP_PW_ID_NONE,
2775 	},
2776 	{
2777 		.name = "power well 1",
2778 		/* Handled by the DMC firmware */
2779 		.always_on = true,
2780 		.domains = 0,
2781 		.ops = &hsw_power_well_ops,
2782 		.id = SKL_DISP_PW_1,
2783 		{
2784 			.hsw.regs = &hsw_power_well_regs,
2785 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2786 			.hsw.has_fuses = true,
2787 		},
2788 	},
2789 	{
2790 		.name = "DC off",
2791 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2792 		.ops = &gen9_dc_off_power_well_ops,
2793 		.id = DISP_PW_ID_NONE,
2794 	},
2795 	{
2796 		.name = "power well 2",
2797 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2798 		.ops = &hsw_power_well_ops,
2799 		.id = SKL_DISP_PW_2,
2800 		{
2801 			.hsw.regs = &hsw_power_well_regs,
2802 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
2803 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2804 			.hsw.has_vga = true,
2805 			.hsw.has_fuses = true,
2806 		},
2807 	},
2808 	{
2809 		.name = "dpio-common-a",
2810 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2811 		.ops = &bxt_dpio_cmn_power_well_ops,
2812 		.id = BXT_DISP_PW_DPIO_CMN_A,
2813 		{
2814 			.bxt.phy = DPIO_PHY1,
2815 		},
2816 	},
2817 	{
2818 		.name = "dpio-common-bc",
2819 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2820 		.ops = &bxt_dpio_cmn_power_well_ops,
2821 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2822 		{
2823 			.bxt.phy = DPIO_PHY0,
2824 		},
2825 	},
2826 };
2827 
2828 static const struct i915_power_well_desc glk_power_wells[] = {
2829 	{
2830 		.name = "always-on",
2831 		.always_on = true,
2832 		.domains = POWER_DOMAIN_MASK,
2833 		.ops = &i9xx_always_on_power_well_ops,
2834 		.id = DISP_PW_ID_NONE,
2835 	},
2836 	{
2837 		.name = "power well 1",
2838 		/* Handled by the DMC firmware */
2839 		.always_on = true,
2840 		.domains = 0,
2841 		.ops = &hsw_power_well_ops,
2842 		.id = SKL_DISP_PW_1,
2843 		{
2844 			.hsw.regs = &hsw_power_well_regs,
2845 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2846 			.hsw.has_fuses = true,
2847 		},
2848 	},
2849 	{
2850 		.name = "DC off",
2851 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2852 		.ops = &gen9_dc_off_power_well_ops,
2853 		.id = DISP_PW_ID_NONE,
2854 	},
2855 	{
2856 		.name = "power well 2",
2857 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2858 		.ops = &hsw_power_well_ops,
2859 		.id = SKL_DISP_PW_2,
2860 		{
2861 			.hsw.regs = &hsw_power_well_regs,
2862 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
2863 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2864 			.hsw.has_vga = true,
2865 			.hsw.has_fuses = true,
2866 		},
2867 	},
2868 	{
2869 		.name = "dpio-common-a",
2870 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2871 		.ops = &bxt_dpio_cmn_power_well_ops,
2872 		.id = BXT_DISP_PW_DPIO_CMN_A,
2873 		{
2874 			.bxt.phy = DPIO_PHY1,
2875 		},
2876 	},
2877 	{
2878 		.name = "dpio-common-b",
2879 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2880 		.ops = &bxt_dpio_cmn_power_well_ops,
2881 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2882 		{
2883 			.bxt.phy = DPIO_PHY0,
2884 		},
2885 	},
2886 	{
2887 		.name = "dpio-common-c",
2888 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2889 		.ops = &bxt_dpio_cmn_power_well_ops,
2890 		.id = GLK_DISP_PW_DPIO_CMN_C,
2891 		{
2892 			.bxt.phy = DPIO_PHY2,
2893 		},
2894 	},
2895 	{
2896 		.name = "AUX A",
2897 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2898 		.ops = &hsw_power_well_ops,
2899 		.id = DISP_PW_ID_NONE,
2900 		{
2901 			.hsw.regs = &hsw_power_well_regs,
2902 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2903 		},
2904 	},
2905 	{
2906 		.name = "AUX B",
2907 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2908 		.ops = &hsw_power_well_ops,
2909 		.id = DISP_PW_ID_NONE,
2910 		{
2911 			.hsw.regs = &hsw_power_well_regs,
2912 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2913 		},
2914 	},
2915 	{
2916 		.name = "AUX C",
2917 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2918 		.ops = &hsw_power_well_ops,
2919 		.id = DISP_PW_ID_NONE,
2920 		{
2921 			.hsw.regs = &hsw_power_well_regs,
2922 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
2923 		},
2924 	},
2925 	{
2926 		.name = "DDI A IO power well",
2927 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
2928 		.ops = &hsw_power_well_ops,
2929 		.id = DISP_PW_ID_NONE,
2930 		{
2931 			.hsw.regs = &hsw_power_well_regs,
2932 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
2933 		},
2934 	},
2935 	{
2936 		.name = "DDI B IO power well",
2937 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2938 		.ops = &hsw_power_well_ops,
2939 		.id = DISP_PW_ID_NONE,
2940 		{
2941 			.hsw.regs = &hsw_power_well_regs,
2942 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2943 		},
2944 	},
2945 	{
2946 		.name = "DDI C IO power well",
2947 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2948 		.ops = &hsw_power_well_ops,
2949 		.id = DISP_PW_ID_NONE,
2950 		{
2951 			.hsw.regs = &hsw_power_well_regs,
2952 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2953 		},
2954 	},
2955 };
2956 
2957 static const struct i915_power_well_desc cnl_power_wells[] = {
2958 	{
2959 		.name = "always-on",
2960 		.always_on = true,
2961 		.domains = POWER_DOMAIN_MASK,
2962 		.ops = &i9xx_always_on_power_well_ops,
2963 		.id = DISP_PW_ID_NONE,
2964 	},
2965 	{
2966 		.name = "power well 1",
2967 		/* Handled by the DMC firmware */
2968 		.always_on = true,
2969 		.domains = 0,
2970 		.ops = &hsw_power_well_ops,
2971 		.id = SKL_DISP_PW_1,
2972 		{
2973 			.hsw.regs = &hsw_power_well_regs,
2974 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2975 			.hsw.has_fuses = true,
2976 		},
2977 	},
2978 	{
2979 		.name = "AUX A",
2980 		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2981 		.ops = &hsw_power_well_ops,
2982 		.id = DISP_PW_ID_NONE,
2983 		{
2984 			.hsw.regs = &hsw_power_well_regs,
2985 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2986 		},
2987 	},
2988 	{
2989 		.name = "AUX B",
2990 		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2991 		.ops = &hsw_power_well_ops,
2992 		.id = DISP_PW_ID_NONE,
2993 		{
2994 			.hsw.regs = &hsw_power_well_regs,
2995 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2996 		},
2997 	},
2998 	{
2999 		.name = "AUX C",
3000 		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3001 		.ops = &hsw_power_well_ops,
3002 		.id = DISP_PW_ID_NONE,
3003 		{
3004 			.hsw.regs = &hsw_power_well_regs,
3005 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3006 		},
3007 	},
3008 	{
3009 		.name = "AUX D",
3010 		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3011 		.ops = &hsw_power_well_ops,
3012 		.id = DISP_PW_ID_NONE,
3013 		{
3014 			.hsw.regs = &hsw_power_well_regs,
3015 			.hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3016 		},
3017 	},
3018 	{
3019 		.name = "DC off",
3020 		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3021 		.ops = &gen9_dc_off_power_well_ops,
3022 		.id = DISP_PW_ID_NONE,
3023 	},
3024 	{
3025 		.name = "power well 2",
3026 		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3027 		.ops = &hsw_power_well_ops,
3028 		.id = SKL_DISP_PW_2,
3029 		{
3030 			.hsw.regs = &hsw_power_well_regs,
3031 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3032 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3033 			.hsw.has_vga = true,
3034 			.hsw.has_fuses = true,
3035 		},
3036 	},
3037 	{
3038 		.name = "DDI A IO power well",
3039 		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3040 		.ops = &hsw_power_well_ops,
3041 		.id = DISP_PW_ID_NONE,
3042 		{
3043 			.hsw.regs = &hsw_power_well_regs,
3044 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3045 		},
3046 	},
3047 	{
3048 		.name = "DDI B IO power well",
3049 		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3050 		.ops = &hsw_power_well_ops,
3051 		.id = DISP_PW_ID_NONE,
3052 		{
3053 			.hsw.regs = &hsw_power_well_regs,
3054 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3055 		},
3056 	},
3057 	{
3058 		.name = "DDI C IO power well",
3059 		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3060 		.ops = &hsw_power_well_ops,
3061 		.id = DISP_PW_ID_NONE,
3062 		{
3063 			.hsw.regs = &hsw_power_well_regs,
3064 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3065 		},
3066 	},
3067 	{
3068 		.name = "DDI D IO power well",
3069 		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3070 		.ops = &hsw_power_well_ops,
3071 		.id = DISP_PW_ID_NONE,
3072 		{
3073 			.hsw.regs = &hsw_power_well_regs,
3074 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3075 		},
3076 	},
3077 	{
3078 		.name = "DDI F IO power well",
3079 		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3080 		.ops = &hsw_power_well_ops,
3081 		.id = DISP_PW_ID_NONE,
3082 		{
3083 			.hsw.regs = &hsw_power_well_regs,
3084 			.hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3085 		},
3086 	},
3087 	{
3088 		.name = "AUX F",
3089 		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3090 		.ops = &hsw_power_well_ops,
3091 		.id = DISP_PW_ID_NONE,
3092 		{
3093 			.hsw.regs = &hsw_power_well_regs,
3094 			.hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3095 		},
3096 	},
3097 };
3098 
3099 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3100 	.sync_hw = hsw_power_well_sync_hw,
3101 	.enable = icl_combo_phy_aux_power_well_enable,
3102 	.disable = icl_combo_phy_aux_power_well_disable,
3103 	.is_enabled = hsw_power_well_enabled,
3104 };
3105 
3106 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3107 	.sync_hw = hsw_power_well_sync_hw,
3108 	.enable = icl_tc_phy_aux_power_well_enable,
3109 	.disable = hsw_power_well_disable,
3110 	.is_enabled = hsw_power_well_enabled,
3111 };
3112 
3113 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3114 	.bios	= ICL_PWR_WELL_CTL_AUX1,
3115 	.driver	= ICL_PWR_WELL_CTL_AUX2,
3116 	.debug	= ICL_PWR_WELL_CTL_AUX4,
3117 };
3118 
3119 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3120 	.bios	= ICL_PWR_WELL_CTL_DDI1,
3121 	.driver	= ICL_PWR_WELL_CTL_DDI2,
3122 	.debug	= ICL_PWR_WELL_CTL_DDI4,
3123 };
3124 
3125 static const struct i915_power_well_desc icl_power_wells[] = {
3126 	{
3127 		.name = "always-on",
3128 		.always_on = true,
3129 		.domains = POWER_DOMAIN_MASK,
3130 		.ops = &i9xx_always_on_power_well_ops,
3131 		.id = DISP_PW_ID_NONE,
3132 	},
3133 	{
3134 		.name = "power well 1",
3135 		/* Handled by the DMC firmware */
3136 		.always_on = true,
3137 		.domains = 0,
3138 		.ops = &hsw_power_well_ops,
3139 		.id = SKL_DISP_PW_1,
3140 		{
3141 			.hsw.regs = &hsw_power_well_regs,
3142 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3143 			.hsw.has_fuses = true,
3144 		},
3145 	},
3146 	{
3147 		.name = "DC off",
3148 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3149 		.ops = &gen9_dc_off_power_well_ops,
3150 		.id = DISP_PW_ID_NONE,
3151 	},
3152 	{
3153 		.name = "power well 2",
3154 		.domains = ICL_PW_2_POWER_DOMAINS,
3155 		.ops = &hsw_power_well_ops,
3156 		.id = SKL_DISP_PW_2,
3157 		{
3158 			.hsw.regs = &hsw_power_well_regs,
3159 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3160 			.hsw.has_fuses = true,
3161 		},
3162 	},
3163 	{
3164 		.name = "power well 3",
3165 		.domains = ICL_PW_3_POWER_DOMAINS,
3166 		.ops = &hsw_power_well_ops,
3167 		.id = DISP_PW_ID_NONE,
3168 		{
3169 			.hsw.regs = &hsw_power_well_regs,
3170 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3171 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3172 			.hsw.has_vga = true,
3173 			.hsw.has_fuses = true,
3174 		},
3175 	},
3176 	{
3177 		.name = "DDI A IO",
3178 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3179 		.ops = &hsw_power_well_ops,
3180 		.id = DISP_PW_ID_NONE,
3181 		{
3182 			.hsw.regs = &icl_ddi_power_well_regs,
3183 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3184 		},
3185 	},
3186 	{
3187 		.name = "DDI B IO",
3188 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3189 		.ops = &hsw_power_well_ops,
3190 		.id = DISP_PW_ID_NONE,
3191 		{
3192 			.hsw.regs = &icl_ddi_power_well_regs,
3193 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3194 		},
3195 	},
3196 	{
3197 		.name = "DDI C IO",
3198 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3199 		.ops = &hsw_power_well_ops,
3200 		.id = DISP_PW_ID_NONE,
3201 		{
3202 			.hsw.regs = &icl_ddi_power_well_regs,
3203 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3204 		},
3205 	},
3206 	{
3207 		.name = "DDI D IO",
3208 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3209 		.ops = &hsw_power_well_ops,
3210 		.id = DISP_PW_ID_NONE,
3211 		{
3212 			.hsw.regs = &icl_ddi_power_well_regs,
3213 			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3214 		},
3215 	},
3216 	{
3217 		.name = "DDI E IO",
3218 		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
3219 		.ops = &hsw_power_well_ops,
3220 		.id = DISP_PW_ID_NONE,
3221 		{
3222 			.hsw.regs = &icl_ddi_power_well_regs,
3223 			.hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3224 		},
3225 	},
3226 	{
3227 		.name = "DDI F IO",
3228 		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
3229 		.ops = &hsw_power_well_ops,
3230 		.id = DISP_PW_ID_NONE,
3231 		{
3232 			.hsw.regs = &icl_ddi_power_well_regs,
3233 			.hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3234 		},
3235 	},
3236 	{
3237 		.name = "AUX A",
3238 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3239 		.ops = &icl_combo_phy_aux_power_well_ops,
3240 		.id = DISP_PW_ID_NONE,
3241 		{
3242 			.hsw.regs = &icl_aux_power_well_regs,
3243 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3244 		},
3245 	},
3246 	{
3247 		.name = "AUX B",
3248 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3249 		.ops = &icl_combo_phy_aux_power_well_ops,
3250 		.id = DISP_PW_ID_NONE,
3251 		{
3252 			.hsw.regs = &icl_aux_power_well_regs,
3253 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3254 		},
3255 	},
3256 	{
3257 		.name = "AUX C",
3258 		.domains = ICL_AUX_C_IO_POWER_DOMAINS,
3259 		.ops = &icl_tc_phy_aux_power_well_ops,
3260 		.id = DISP_PW_ID_NONE,
3261 		{
3262 			.hsw.regs = &icl_aux_power_well_regs,
3263 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3264 			.hsw.is_tc_tbt = false,
3265 		},
3266 	},
3267 	{
3268 		.name = "AUX D",
3269 		.domains = ICL_AUX_D_IO_POWER_DOMAINS,
3270 		.ops = &icl_tc_phy_aux_power_well_ops,
3271 		.id = DISP_PW_ID_NONE,
3272 		{
3273 			.hsw.regs = &icl_aux_power_well_regs,
3274 			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3275 			.hsw.is_tc_tbt = false,
3276 		},
3277 	},
3278 	{
3279 		.name = "AUX E",
3280 		.domains = ICL_AUX_E_IO_POWER_DOMAINS,
3281 		.ops = &icl_tc_phy_aux_power_well_ops,
3282 		.id = DISP_PW_ID_NONE,
3283 		{
3284 			.hsw.regs = &icl_aux_power_well_regs,
3285 			.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3286 			.hsw.is_tc_tbt = false,
3287 		},
3288 	},
3289 	{
3290 		.name = "AUX F",
3291 		.domains = ICL_AUX_F_IO_POWER_DOMAINS,
3292 		.ops = &icl_tc_phy_aux_power_well_ops,
3293 		.id = DISP_PW_ID_NONE,
3294 		{
3295 			.hsw.regs = &icl_aux_power_well_regs,
3296 			.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3297 			.hsw.is_tc_tbt = false,
3298 		},
3299 	},
3300 	{
3301 		.name = "AUX TBT1",
3302 		.domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
3303 		.ops = &icl_tc_phy_aux_power_well_ops,
3304 		.id = DISP_PW_ID_NONE,
3305 		{
3306 			.hsw.regs = &icl_aux_power_well_regs,
3307 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3308 			.hsw.is_tc_tbt = true,
3309 		},
3310 	},
3311 	{
3312 		.name = "AUX TBT2",
3313 		.domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
3314 		.ops = &icl_tc_phy_aux_power_well_ops,
3315 		.id = DISP_PW_ID_NONE,
3316 		{
3317 			.hsw.regs = &icl_aux_power_well_regs,
3318 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3319 			.hsw.is_tc_tbt = true,
3320 		},
3321 	},
3322 	{
3323 		.name = "AUX TBT3",
3324 		.domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
3325 		.ops = &icl_tc_phy_aux_power_well_ops,
3326 		.id = DISP_PW_ID_NONE,
3327 		{
3328 			.hsw.regs = &icl_aux_power_well_regs,
3329 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3330 			.hsw.is_tc_tbt = true,
3331 		},
3332 	},
3333 	{
3334 		.name = "AUX TBT4",
3335 		.domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
3336 		.ops = &icl_tc_phy_aux_power_well_ops,
3337 		.id = DISP_PW_ID_NONE,
3338 		{
3339 			.hsw.regs = &icl_aux_power_well_regs,
3340 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3341 			.hsw.is_tc_tbt = true,
3342 		},
3343 	},
3344 	{
3345 		.name = "power well 4",
3346 		.domains = ICL_PW_4_POWER_DOMAINS,
3347 		.ops = &hsw_power_well_ops,
3348 		.id = DISP_PW_ID_NONE,
3349 		{
3350 			.hsw.regs = &hsw_power_well_regs,
3351 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3352 			.hsw.has_fuses = true,
3353 			.hsw.irq_pipe_mask = BIT(PIPE_C),
3354 		},
3355 	},
3356 };
3357 
3358 static int
3359 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
3360 				   int disable_power_well)
3361 {
3362 	if (disable_power_well >= 0)
3363 		return !!disable_power_well;
3364 
3365 	return 1;
3366 }
3367 
3368 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
3369 			       int enable_dc)
3370 {
3371 	u32 mask;
3372 	int requested_dc;
3373 	int max_dc;
3374 
3375 	if (INTEL_GEN(dev_priv) >= 11) {
3376 		max_dc = 2;
3377 		/*
3378 		 * DC9 has a separate HW flow from the rest of the DC states,
3379 		 * not depending on the DMC firmware. It's needed by system
3380 		 * suspend/resume, so allow it unconditionally.
3381 		 */
3382 		mask = DC_STATE_EN_DC9;
3383 	} else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
3384 		max_dc = 2;
3385 		mask = 0;
3386 	} else if (IS_GEN9_LP(dev_priv)) {
3387 		max_dc = 1;
3388 		mask = DC_STATE_EN_DC9;
3389 	} else {
3390 		max_dc = 0;
3391 		mask = 0;
3392 	}
3393 
3394 	if (!i915_modparams.disable_power_well)
3395 		max_dc = 0;
3396 
3397 	if (enable_dc >= 0 && enable_dc <= max_dc) {
3398 		requested_dc = enable_dc;
3399 	} else if (enable_dc == -1) {
3400 		requested_dc = max_dc;
3401 	} else if (enable_dc > max_dc && enable_dc <= 2) {
3402 		DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
3403 			      enable_dc, max_dc);
3404 		requested_dc = max_dc;
3405 	} else {
3406 		DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
3407 		requested_dc = max_dc;
3408 	}
3409 
3410 	if (requested_dc > 1)
3411 		mask |= DC_STATE_EN_UPTO_DC6;
3412 	if (requested_dc > 0)
3413 		mask |= DC_STATE_EN_UPTO_DC5;
3414 
3415 	DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
3416 
3417 	return mask;
3418 }
3419 
3420 static int
3421 __set_power_wells(struct i915_power_domains *power_domains,
3422 		  const struct i915_power_well_desc *power_well_descs,
3423 		  int power_well_count)
3424 {
3425 	u64 power_well_ids = 0;
3426 	int i;
3427 
3428 	power_domains->power_well_count = power_well_count;
3429 	power_domains->power_wells =
3430 				kcalloc(power_well_count,
3431 					sizeof(*power_domains->power_wells),
3432 					GFP_KERNEL);
3433 	if (!power_domains->power_wells)
3434 		return -ENOMEM;
3435 
3436 	for (i = 0; i < power_well_count; i++) {
3437 		enum i915_power_well_id id = power_well_descs[i].id;
3438 
3439 		power_domains->power_wells[i].desc = &power_well_descs[i];
3440 
3441 		if (id == DISP_PW_ID_NONE)
3442 			continue;
3443 
3444 		WARN_ON(id >= sizeof(power_well_ids) * 8);
3445 		WARN_ON(power_well_ids & BIT_ULL(id));
3446 		power_well_ids |= BIT_ULL(id);
3447 	}
3448 
3449 	return 0;
3450 }
3451 
3452 #define set_power_wells(power_domains, __power_well_descs) \
3453 	__set_power_wells(power_domains, __power_well_descs, \
3454 			  ARRAY_SIZE(__power_well_descs))
3455 
3456 /**
3457  * intel_power_domains_init - initializes the power domain structures
3458  * @dev_priv: i915 device instance
3459  *
3460  * Initializes the power domain structures for @dev_priv depending upon the
3461  * supported platform.
3462  */
3463 int intel_power_domains_init(struct drm_i915_private *dev_priv)
3464 {
3465 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3466 	int err;
3467 
3468 	i915_modparams.disable_power_well =
3469 		sanitize_disable_power_well_option(dev_priv,
3470 						   i915_modparams.disable_power_well);
3471 	dev_priv->csr.allowed_dc_mask =
3472 		get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
3473 
3474 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
3475 
3476 	mutex_init(&power_domains->lock);
3477 
3478 	INIT_DELAYED_WORK(&power_domains->async_put_work,
3479 			  intel_display_power_put_async_work);
3480 
3481 	/*
3482 	 * The enabling order will be from lower to higher indexed wells,
3483 	 * the disabling order is reversed.
3484 	 */
3485 	if (IS_GEN(dev_priv, 11)) {
3486 		err = set_power_wells(power_domains, icl_power_wells);
3487 	} else if (IS_CANNONLAKE(dev_priv)) {
3488 		err = set_power_wells(power_domains, cnl_power_wells);
3489 
3490 		/*
3491 		 * DDI and Aux IO are getting enabled for all ports
3492 		 * regardless the presence or use. So, in order to avoid
3493 		 * timeouts, lets remove them from the list
3494 		 * for the SKUs without port F.
3495 		 */
3496 		if (!IS_CNL_WITH_PORT_F(dev_priv))
3497 			power_domains->power_well_count -= 2;
3498 	} else if (IS_GEMINILAKE(dev_priv)) {
3499 		err = set_power_wells(power_domains, glk_power_wells);
3500 	} else if (IS_BROXTON(dev_priv)) {
3501 		err = set_power_wells(power_domains, bxt_power_wells);
3502 	} else if (IS_GEN9_BC(dev_priv)) {
3503 		err = set_power_wells(power_domains, skl_power_wells);
3504 	} else if (IS_CHERRYVIEW(dev_priv)) {
3505 		err = set_power_wells(power_domains, chv_power_wells);
3506 	} else if (IS_BROADWELL(dev_priv)) {
3507 		err = set_power_wells(power_domains, bdw_power_wells);
3508 	} else if (IS_HASWELL(dev_priv)) {
3509 		err = set_power_wells(power_domains, hsw_power_wells);
3510 	} else if (IS_VALLEYVIEW(dev_priv)) {
3511 		err = set_power_wells(power_domains, vlv_power_wells);
3512 	} else if (IS_I830(dev_priv)) {
3513 		err = set_power_wells(power_domains, i830_power_wells);
3514 	} else {
3515 		err = set_power_wells(power_domains, i9xx_always_on_power_well);
3516 	}
3517 
3518 	return err;
3519 }
3520 
3521 /**
3522  * intel_power_domains_cleanup - clean up power domains resources
3523  * @dev_priv: i915 device instance
3524  *
3525  * Release any resources acquired by intel_power_domains_init()
3526  */
3527 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
3528 {
3529 	kfree(dev_priv->power_domains.power_wells);
3530 }
3531 
3532 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
3533 {
3534 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3535 	struct i915_power_well *power_well;
3536 
3537 	mutex_lock(&power_domains->lock);
3538 	for_each_power_well(dev_priv, power_well) {
3539 		power_well->desc->ops->sync_hw(dev_priv, power_well);
3540 		power_well->hw_enabled =
3541 			power_well->desc->ops->is_enabled(dev_priv, power_well);
3542 	}
3543 	mutex_unlock(&power_domains->lock);
3544 }
3545 
3546 static inline
3547 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
3548 			  i915_reg_t reg, bool enable)
3549 {
3550 	u32 val, status;
3551 
3552 	val = I915_READ(reg);
3553 	val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
3554 	I915_WRITE(reg, val);
3555 	POSTING_READ(reg);
3556 	udelay(10);
3557 
3558 	status = I915_READ(reg) & DBUF_POWER_STATE;
3559 	if ((enable && !status) || (!enable && status)) {
3560 		DRM_ERROR("DBus power %s timeout!\n",
3561 			  enable ? "enable" : "disable");
3562 		return false;
3563 	}
3564 	return true;
3565 }
3566 
3567 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
3568 {
3569 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
3570 }
3571 
3572 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
3573 {
3574 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
3575 }
3576 
3577 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
3578 {
3579 	if (INTEL_GEN(dev_priv) < 11)
3580 		return 1;
3581 	return 2;
3582 }
3583 
3584 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3585 			    u8 req_slices)
3586 {
3587 	const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
3588 	bool ret;
3589 
3590 	if (req_slices > intel_dbuf_max_slices(dev_priv)) {
3591 		DRM_ERROR("Invalid number of dbuf slices requested\n");
3592 		return;
3593 	}
3594 
3595 	if (req_slices == hw_enabled_slices || req_slices == 0)
3596 		return;
3597 
3598 	if (req_slices > hw_enabled_slices)
3599 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3600 	else
3601 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
3602 
3603 	if (ret)
3604 		dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
3605 }
3606 
3607 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
3608 {
3609 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
3610 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
3611 	POSTING_READ(DBUF_CTL_S2);
3612 
3613 	udelay(10);
3614 
3615 	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3616 	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3617 		DRM_ERROR("DBuf power enable timeout\n");
3618 	else
3619 		/*
3620 		 * FIXME: for now pretend that we only have 1 slice, see
3621 		 * intel_enabled_dbuf_slices_num().
3622 		 */
3623 		dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
3624 }
3625 
3626 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
3627 {
3628 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
3629 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
3630 	POSTING_READ(DBUF_CTL_S2);
3631 
3632 	udelay(10);
3633 
3634 	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3635 	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3636 		DRM_ERROR("DBuf power disable timeout!\n");
3637 	else
3638 		/*
3639 		 * FIXME: for now pretend that the first slice is always
3640 		 * enabled, see intel_enabled_dbuf_slices_num().
3641 		 */
3642 		dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
3643 }
3644 
3645 static void icl_mbus_init(struct drm_i915_private *dev_priv)
3646 {
3647 	u32 val;
3648 
3649 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
3650 	      MBUS_ABOX_BT_CREDIT_POOL2(16) |
3651 	      MBUS_ABOX_B_CREDIT(1) |
3652 	      MBUS_ABOX_BW_CREDIT(1);
3653 
3654 	I915_WRITE(MBUS_ABOX_CTL, val);
3655 }
3656 
3657 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
3658 {
3659 	u32 val = I915_READ(LCPLL_CTL);
3660 
3661 	/*
3662 	 * The LCPLL register should be turned on by the BIOS. For now
3663 	 * let's just check its state and print errors in case
3664 	 * something is wrong.  Don't even try to turn it on.
3665 	 */
3666 
3667 	if (val & LCPLL_CD_SOURCE_FCLK)
3668 		DRM_ERROR("CDCLK source is not LCPLL\n");
3669 
3670 	if (val & LCPLL_PLL_DISABLE)
3671 		DRM_ERROR("LCPLL is disabled\n");
3672 
3673 	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
3674 		DRM_ERROR("LCPLL not using non-SSC reference\n");
3675 }
3676 
3677 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
3678 {
3679 	struct drm_device *dev = &dev_priv->drm;
3680 	struct intel_crtc *crtc;
3681 
3682 	for_each_intel_crtc(dev, crtc)
3683 		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
3684 				pipe_name(crtc->pipe));
3685 
3686 	I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
3687 			"Display power well on\n");
3688 	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
3689 			"SPLL enabled\n");
3690 	I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
3691 			"WRPLL1 enabled\n");
3692 	I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
3693 			"WRPLL2 enabled\n");
3694 	I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
3695 			"Panel power on\n");
3696 	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
3697 			"CPU PWM1 enabled\n");
3698 	if (IS_HASWELL(dev_priv))
3699 		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
3700 				"CPU PWM2 enabled\n");
3701 	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
3702 			"PCH PWM1 enabled\n");
3703 	I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
3704 			"Utility pin enabled\n");
3705 	I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
3706 			"PCH GTC enabled\n");
3707 
3708 	/*
3709 	 * In theory we can still leave IRQs enabled, as long as only the HPD
3710 	 * interrupts remain enabled. We used to check for that, but since it's
3711 	 * gen-specific and since we only disable LCPLL after we fully disable
3712 	 * the interrupts, the check below should be enough.
3713 	 */
3714 	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
3715 }
3716 
3717 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
3718 {
3719 	if (IS_HASWELL(dev_priv))
3720 		return I915_READ(D_COMP_HSW);
3721 	else
3722 		return I915_READ(D_COMP_BDW);
3723 }
3724 
3725 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
3726 {
3727 	if (IS_HASWELL(dev_priv)) {
3728 		if (sandybridge_pcode_write(dev_priv,
3729 					    GEN6_PCODE_WRITE_D_COMP, val))
3730 			DRM_DEBUG_KMS("Failed to write to D_COMP\n");
3731 	} else {
3732 		I915_WRITE(D_COMP_BDW, val);
3733 		POSTING_READ(D_COMP_BDW);
3734 	}
3735 }
3736 
3737 /*
3738  * This function implements pieces of two sequences from BSpec:
3739  * - Sequence for display software to disable LCPLL
3740  * - Sequence for display software to allow package C8+
3741  * The steps implemented here are just the steps that actually touch the LCPLL
3742  * register. Callers should take care of disabling all the display engine
3743  * functions, doing the mode unset, fixing interrupts, etc.
3744  */
3745 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
3746 			      bool switch_to_fclk, bool allow_power_down)
3747 {
3748 	u32 val;
3749 
3750 	assert_can_disable_lcpll(dev_priv);
3751 
3752 	val = I915_READ(LCPLL_CTL);
3753 
3754 	if (switch_to_fclk) {
3755 		val |= LCPLL_CD_SOURCE_FCLK;
3756 		I915_WRITE(LCPLL_CTL, val);
3757 
3758 		if (wait_for_us(I915_READ(LCPLL_CTL) &
3759 				LCPLL_CD_SOURCE_FCLK_DONE, 1))
3760 			DRM_ERROR("Switching to FCLK failed\n");
3761 
3762 		val = I915_READ(LCPLL_CTL);
3763 	}
3764 
3765 	val |= LCPLL_PLL_DISABLE;
3766 	I915_WRITE(LCPLL_CTL, val);
3767 	POSTING_READ(LCPLL_CTL);
3768 
3769 	if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
3770 				    LCPLL_PLL_LOCK, 0, 1))
3771 		DRM_ERROR("LCPLL still locked\n");
3772 
3773 	val = hsw_read_dcomp(dev_priv);
3774 	val |= D_COMP_COMP_DISABLE;
3775 	hsw_write_dcomp(dev_priv, val);
3776 	ndelay(100);
3777 
3778 	if (wait_for((hsw_read_dcomp(dev_priv) &
3779 		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
3780 		DRM_ERROR("D_COMP RCOMP still in progress\n");
3781 
3782 	if (allow_power_down) {
3783 		val = I915_READ(LCPLL_CTL);
3784 		val |= LCPLL_POWER_DOWN_ALLOW;
3785 		I915_WRITE(LCPLL_CTL, val);
3786 		POSTING_READ(LCPLL_CTL);
3787 	}
3788 }
3789 
3790 /*
3791  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
3792  * source.
3793  */
3794 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
3795 {
3796 	u32 val;
3797 
3798 	val = I915_READ(LCPLL_CTL);
3799 
3800 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
3801 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
3802 		return;
3803 
3804 	/*
3805 	 * Make sure we're not on PC8 state before disabling PC8, otherwise
3806 	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
3807 	 */
3808 	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
3809 
3810 	if (val & LCPLL_POWER_DOWN_ALLOW) {
3811 		val &= ~LCPLL_POWER_DOWN_ALLOW;
3812 		I915_WRITE(LCPLL_CTL, val);
3813 		POSTING_READ(LCPLL_CTL);
3814 	}
3815 
3816 	val = hsw_read_dcomp(dev_priv);
3817 	val |= D_COMP_COMP_FORCE;
3818 	val &= ~D_COMP_COMP_DISABLE;
3819 	hsw_write_dcomp(dev_priv, val);
3820 
3821 	val = I915_READ(LCPLL_CTL);
3822 	val &= ~LCPLL_PLL_DISABLE;
3823 	I915_WRITE(LCPLL_CTL, val);
3824 
3825 	if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
3826 				    LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5))
3827 		DRM_ERROR("LCPLL not locked yet\n");
3828 
3829 	if (val & LCPLL_CD_SOURCE_FCLK) {
3830 		val = I915_READ(LCPLL_CTL);
3831 		val &= ~LCPLL_CD_SOURCE_FCLK;
3832 		I915_WRITE(LCPLL_CTL, val);
3833 
3834 		if (wait_for_us((I915_READ(LCPLL_CTL) &
3835 				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
3836 			DRM_ERROR("Switching back to LCPLL failed\n");
3837 	}
3838 
3839 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
3840 
3841 	intel_update_cdclk(dev_priv);
3842 	intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
3843 }
3844 
3845 /*
3846  * Package states C8 and deeper are really deep PC states that can only be
3847  * reached when all the devices on the system allow it, so even if the graphics
3848  * device allows PC8+, it doesn't mean the system will actually get to these
3849  * states. Our driver only allows PC8+ when going into runtime PM.
3850  *
3851  * The requirements for PC8+ are that all the outputs are disabled, the power
3852  * well is disabled and most interrupts are disabled, and these are also
3853  * requirements for runtime PM. When these conditions are met, we manually do
3854  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
3855  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
3856  * hang the machine.
3857  *
3858  * When we really reach PC8 or deeper states (not just when we allow it) we lose
3859  * the state of some registers, so when we come back from PC8+ we need to
3860  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
3861  * need to take care of the registers kept by RC6. Notice that this happens even
3862  * if we don't put the device in PCI D3 state (which is what currently happens
3863  * because of the runtime PM support).
3864  *
3865  * For more, read "Display Sequences for Package C8" on the hardware
3866  * documentation.
3867  */
3868 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
3869 {
3870 	u32 val;
3871 
3872 	DRM_DEBUG_KMS("Enabling package C8+\n");
3873 
3874 	if (HAS_PCH_LPT_LP(dev_priv)) {
3875 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
3876 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
3877 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
3878 	}
3879 
3880 	lpt_disable_clkout_dp(dev_priv);
3881 	hsw_disable_lcpll(dev_priv, true, true);
3882 }
3883 
3884 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
3885 {
3886 	u32 val;
3887 
3888 	DRM_DEBUG_KMS("Disabling package C8+\n");
3889 
3890 	hsw_restore_lcpll(dev_priv);
3891 	intel_init_pch_refclk(dev_priv);
3892 
3893 	if (HAS_PCH_LPT_LP(dev_priv)) {
3894 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
3895 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
3896 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
3897 	}
3898 }
3899 
3900 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
3901 				      bool enable)
3902 {
3903 	i915_reg_t reg;
3904 	u32 reset_bits, val;
3905 
3906 	if (IS_IVYBRIDGE(dev_priv)) {
3907 		reg = GEN7_MSG_CTL;
3908 		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
3909 	} else {
3910 		reg = HSW_NDE_RSTWRN_OPT;
3911 		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
3912 	}
3913 
3914 	val = I915_READ(reg);
3915 
3916 	if (enable)
3917 		val |= reset_bits;
3918 	else
3919 		val &= ~reset_bits;
3920 
3921 	I915_WRITE(reg, val);
3922 }
3923 
3924 static void skl_display_core_init(struct drm_i915_private *dev_priv,
3925 				  bool resume)
3926 {
3927 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3928 	struct i915_power_well *well;
3929 
3930 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3931 
3932 	/* enable PCH reset handshake */
3933 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3934 
3935 	/* enable PG1 and Misc I/O */
3936 	mutex_lock(&power_domains->lock);
3937 
3938 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3939 	intel_power_well_enable(dev_priv, well);
3940 
3941 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
3942 	intel_power_well_enable(dev_priv, well);
3943 
3944 	mutex_unlock(&power_domains->lock);
3945 
3946 	intel_cdclk_init(dev_priv);
3947 
3948 	gen9_dbuf_enable(dev_priv);
3949 
3950 	if (resume && dev_priv->csr.dmc_payload)
3951 		intel_csr_load_program(dev_priv);
3952 }
3953 
3954 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
3955 {
3956 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3957 	struct i915_power_well *well;
3958 
3959 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3960 
3961 	gen9_dbuf_disable(dev_priv);
3962 
3963 	intel_cdclk_uninit(dev_priv);
3964 
3965 	/* The spec doesn't call for removing the reset handshake flag */
3966 	/* disable PG1 and Misc I/O */
3967 
3968 	mutex_lock(&power_domains->lock);
3969 
3970 	/*
3971 	 * BSpec says to keep the MISC IO power well enabled here, only
3972 	 * remove our request for power well 1.
3973 	 * Note that even though the driver's request is removed power well 1
3974 	 * may stay enabled after this due to DMC's own request on it.
3975 	 */
3976 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3977 	intel_power_well_disable(dev_priv, well);
3978 
3979 	mutex_unlock(&power_domains->lock);
3980 
3981 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3982 }
3983 
3984 void bxt_display_core_init(struct drm_i915_private *dev_priv,
3985 			   bool resume)
3986 {
3987 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3988 	struct i915_power_well *well;
3989 
3990 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3991 
3992 	/*
3993 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
3994 	 * or else the reset will hang because there is no PCH to respond.
3995 	 * Move the handshake programming to initialization sequence.
3996 	 * Previously was left up to BIOS.
3997 	 */
3998 	intel_pch_reset_handshake(dev_priv, false);
3999 
4000 	/* Enable PG1 */
4001 	mutex_lock(&power_domains->lock);
4002 
4003 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4004 	intel_power_well_enable(dev_priv, well);
4005 
4006 	mutex_unlock(&power_domains->lock);
4007 
4008 	intel_cdclk_init(dev_priv);
4009 
4010 	gen9_dbuf_enable(dev_priv);
4011 
4012 	if (resume && dev_priv->csr.dmc_payload)
4013 		intel_csr_load_program(dev_priv);
4014 }
4015 
4016 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4017 {
4018 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4019 	struct i915_power_well *well;
4020 
4021 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4022 
4023 	gen9_dbuf_disable(dev_priv);
4024 
4025 	intel_cdclk_uninit(dev_priv);
4026 
4027 	/* The spec doesn't call for removing the reset handshake flag */
4028 
4029 	/*
4030 	 * Disable PW1 (PG1).
4031 	 * Note that even though the driver's request is removed power well 1
4032 	 * may stay enabled after this due to DMC's own request on it.
4033 	 */
4034 	mutex_lock(&power_domains->lock);
4035 
4036 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4037 	intel_power_well_disable(dev_priv, well);
4038 
4039 	mutex_unlock(&power_domains->lock);
4040 
4041 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4042 }
4043 
4044 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4045 {
4046 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4047 	struct i915_power_well *well;
4048 
4049 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4050 
4051 	/* 1. Enable PCH Reset Handshake */
4052 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4053 
4054 	/* 2-3. */
4055 	intel_combo_phy_init(dev_priv);
4056 
4057 	/*
4058 	 * 4. Enable Power Well 1 (PG1).
4059 	 *    The AUX IO power wells will be enabled on demand.
4060 	 */
4061 	mutex_lock(&power_domains->lock);
4062 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4063 	intel_power_well_enable(dev_priv, well);
4064 	mutex_unlock(&power_domains->lock);
4065 
4066 	/* 5. Enable CD clock */
4067 	intel_cdclk_init(dev_priv);
4068 
4069 	/* 6. Enable DBUF */
4070 	gen9_dbuf_enable(dev_priv);
4071 
4072 	if (resume && dev_priv->csr.dmc_payload)
4073 		intel_csr_load_program(dev_priv);
4074 }
4075 
4076 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
4077 {
4078 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4079 	struct i915_power_well *well;
4080 
4081 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4082 
4083 	/* 1. Disable all display engine functions -> aready done */
4084 
4085 	/* 2. Disable DBUF */
4086 	gen9_dbuf_disable(dev_priv);
4087 
4088 	/* 3. Disable CD clock */
4089 	intel_cdclk_uninit(dev_priv);
4090 
4091 	/*
4092 	 * 4. Disable Power Well 1 (PG1).
4093 	 *    The AUX IO power wells are toggled on demand, so they are already
4094 	 *    disabled at this point.
4095 	 */
4096 	mutex_lock(&power_domains->lock);
4097 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4098 	intel_power_well_disable(dev_priv, well);
4099 	mutex_unlock(&power_domains->lock);
4100 
4101 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4102 
4103 	/* 5. */
4104 	intel_combo_phy_uninit(dev_priv);
4105 }
4106 
4107 void icl_display_core_init(struct drm_i915_private *dev_priv,
4108 			   bool resume)
4109 {
4110 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4111 	struct i915_power_well *well;
4112 
4113 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4114 
4115 	/* 1. Enable PCH reset handshake. */
4116 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4117 
4118 	/* 2. Initialize all combo phys */
4119 	intel_combo_phy_init(dev_priv);
4120 
4121 	/*
4122 	 * 3. Enable Power Well 1 (PG1).
4123 	 *    The AUX IO power wells will be enabled on demand.
4124 	 */
4125 	mutex_lock(&power_domains->lock);
4126 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4127 	intel_power_well_enable(dev_priv, well);
4128 	mutex_unlock(&power_domains->lock);
4129 
4130 	/* 4. Enable CDCLK. */
4131 	intel_cdclk_init(dev_priv);
4132 
4133 	/* 5. Enable DBUF. */
4134 	icl_dbuf_enable(dev_priv);
4135 
4136 	/* 6. Setup MBUS. */
4137 	icl_mbus_init(dev_priv);
4138 
4139 	if (resume && dev_priv->csr.dmc_payload)
4140 		intel_csr_load_program(dev_priv);
4141 }
4142 
4143 void icl_display_core_uninit(struct drm_i915_private *dev_priv)
4144 {
4145 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4146 	struct i915_power_well *well;
4147 
4148 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4149 
4150 	/* 1. Disable all display engine functions -> aready done */
4151 
4152 	/* 2. Disable DBUF */
4153 	icl_dbuf_disable(dev_priv);
4154 
4155 	/* 3. Disable CD clock */
4156 	intel_cdclk_uninit(dev_priv);
4157 
4158 	/*
4159 	 * 4. Disable Power Well 1 (PG1).
4160 	 *    The AUX IO power wells are toggled on demand, so they are already
4161 	 *    disabled at this point.
4162 	 */
4163 	mutex_lock(&power_domains->lock);
4164 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4165 	intel_power_well_disable(dev_priv, well);
4166 	mutex_unlock(&power_domains->lock);
4167 
4168 	/* 5. */
4169 	intel_combo_phy_uninit(dev_priv);
4170 }
4171 
4172 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
4173 {
4174 	struct i915_power_well *cmn_bc =
4175 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
4176 	struct i915_power_well *cmn_d =
4177 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
4178 
4179 	/*
4180 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
4181 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
4182 	 * instead maintain a shadow copy ourselves. Use the actual
4183 	 * power well state and lane status to reconstruct the
4184 	 * expected initial value.
4185 	 */
4186 	dev_priv->chv_phy_control =
4187 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
4188 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
4189 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
4190 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
4191 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
4192 
4193 	/*
4194 	 * If all lanes are disabled we leave the override disabled
4195 	 * with all power down bits cleared to match the state we
4196 	 * would use after disabling the port. Otherwise enable the
4197 	 * override and set the lane powerdown bits accding to the
4198 	 * current lane status.
4199 	 */
4200 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
4201 		u32 status = I915_READ(DPLL(PIPE_A));
4202 		unsigned int mask;
4203 
4204 		mask = status & DPLL_PORTB_READY_MASK;
4205 		if (mask == 0xf)
4206 			mask = 0x0;
4207 		else
4208 			dev_priv->chv_phy_control |=
4209 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
4210 
4211 		dev_priv->chv_phy_control |=
4212 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
4213 
4214 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
4215 		if (mask == 0xf)
4216 			mask = 0x0;
4217 		else
4218 			dev_priv->chv_phy_control |=
4219 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
4220 
4221 		dev_priv->chv_phy_control |=
4222 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
4223 
4224 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
4225 
4226 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
4227 	} else {
4228 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
4229 	}
4230 
4231 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
4232 		u32 status = I915_READ(DPIO_PHY_STATUS);
4233 		unsigned int mask;
4234 
4235 		mask = status & DPLL_PORTD_READY_MASK;
4236 
4237 		if (mask == 0xf)
4238 			mask = 0x0;
4239 		else
4240 			dev_priv->chv_phy_control |=
4241 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
4242 
4243 		dev_priv->chv_phy_control |=
4244 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
4245 
4246 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
4247 
4248 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
4249 	} else {
4250 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
4251 	}
4252 
4253 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
4254 
4255 	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
4256 		      dev_priv->chv_phy_control);
4257 }
4258 
4259 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
4260 {
4261 	struct i915_power_well *cmn =
4262 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
4263 	struct i915_power_well *disp2d =
4264 		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
4265 
4266 	/* If the display might be already active skip this */
4267 	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
4268 	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
4269 	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
4270 		return;
4271 
4272 	DRM_DEBUG_KMS("toggling display PHY side reset\n");
4273 
4274 	/* cmnlane needs DPLL registers */
4275 	disp2d->desc->ops->enable(dev_priv, disp2d);
4276 
4277 	/*
4278 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
4279 	 * Need to assert and de-assert PHY SB reset by gating the
4280 	 * common lane power, then un-gating it.
4281 	 * Simply ungating isn't enough to reset the PHY enough to get
4282 	 * ports and lanes running.
4283 	 */
4284 	cmn->desc->ops->disable(dev_priv, cmn);
4285 }
4286 
4287 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
4288 {
4289 	bool ret;
4290 
4291 	vlv_punit_get(dev_priv);
4292 	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
4293 	vlv_punit_put(dev_priv);
4294 
4295 	return ret;
4296 }
4297 
4298 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
4299 {
4300 	WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
4301 	     "VED not power gated\n");
4302 }
4303 
4304 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
4305 {
4306 	static const struct pci_device_id isp_ids[] = {
4307 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
4308 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
4309 		{}
4310 	};
4311 
4312 	WARN(!pci_dev_present(isp_ids) &&
4313 	     !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
4314 	     "ISP not power gated\n");
4315 }
4316 
4317 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
4318 
4319 /**
4320  * intel_power_domains_init_hw - initialize hardware power domain state
4321  * @i915: i915 device instance
4322  * @resume: Called from resume code paths or not
4323  *
4324  * This function initializes the hardware power domain state and enables all
4325  * power wells belonging to the INIT power domain. Power wells in other
4326  * domains (and not in the INIT domain) are referenced or disabled by
4327  * intel_modeset_readout_hw_state(). After that the reference count of each
4328  * power well must match its HW enabled state, see
4329  * intel_power_domains_verify_state().
4330  *
4331  * It will return with power domains disabled (to be enabled later by
4332  * intel_power_domains_enable()) and must be paired with
4333  * intel_power_domains_fini_hw().
4334  */
4335 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
4336 {
4337 	struct i915_power_domains *power_domains = &i915->power_domains;
4338 
4339 	power_domains->initializing = true;
4340 
4341 	if (INTEL_GEN(i915) >= 11) {
4342 		icl_display_core_init(i915, resume);
4343 	} else if (IS_CANNONLAKE(i915)) {
4344 		cnl_display_core_init(i915, resume);
4345 	} else if (IS_GEN9_BC(i915)) {
4346 		skl_display_core_init(i915, resume);
4347 	} else if (IS_GEN9_LP(i915)) {
4348 		bxt_display_core_init(i915, resume);
4349 	} else if (IS_CHERRYVIEW(i915)) {
4350 		mutex_lock(&power_domains->lock);
4351 		chv_phy_control_init(i915);
4352 		mutex_unlock(&power_domains->lock);
4353 		assert_isp_power_gated(i915);
4354 	} else if (IS_VALLEYVIEW(i915)) {
4355 		mutex_lock(&power_domains->lock);
4356 		vlv_cmnlane_wa(i915);
4357 		mutex_unlock(&power_domains->lock);
4358 		assert_ved_power_gated(i915);
4359 		assert_isp_power_gated(i915);
4360 	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
4361 		hsw_assert_cdclk(i915);
4362 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4363 	} else if (IS_IVYBRIDGE(i915)) {
4364 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4365 	}
4366 
4367 	/*
4368 	 * Keep all power wells enabled for any dependent HW access during
4369 	 * initialization and to make sure we keep BIOS enabled display HW
4370 	 * resources powered until display HW readout is complete. We drop
4371 	 * this reference in intel_power_domains_enable().
4372 	 */
4373 	power_domains->wakeref =
4374 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
4375 
4376 	/* Disable power support if the user asked so. */
4377 	if (!i915_modparams.disable_power_well)
4378 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
4379 	intel_power_domains_sync_hw(i915);
4380 
4381 	power_domains->initializing = false;
4382 }
4383 
4384 /**
4385  * intel_power_domains_fini_hw - deinitialize hw power domain state
4386  * @i915: i915 device instance
4387  *
4388  * De-initializes the display power domain HW state. It also ensures that the
4389  * device stays powered up so that the driver can be reloaded.
4390  *
4391  * It must be called with power domains already disabled (after a call to
4392  * intel_power_domains_disable()) and must be paired with
4393  * intel_power_domains_init_hw().
4394  */
4395 void intel_power_domains_fini_hw(struct drm_i915_private *i915)
4396 {
4397 	intel_wakeref_t wakeref __maybe_unused =
4398 		fetch_and_zero(&i915->power_domains.wakeref);
4399 
4400 	/* Remove the refcount we took to keep power well support disabled. */
4401 	if (!i915_modparams.disable_power_well)
4402 		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4403 
4404 	intel_display_power_flush_work_sync(i915);
4405 
4406 	intel_power_domains_verify_state(i915);
4407 
4408 	/* Keep the power well enabled, but cancel its rpm wakeref. */
4409 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
4410 }
4411 
4412 /**
4413  * intel_power_domains_enable - enable toggling of display power wells
4414  * @i915: i915 device instance
4415  *
4416  * Enable the ondemand enabling/disabling of the display power wells. Note that
4417  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
4418  * only at specific points of the display modeset sequence, thus they are not
4419  * affected by the intel_power_domains_enable()/disable() calls. The purpose
4420  * of these function is to keep the rest of power wells enabled until the end
4421  * of display HW readout (which will acquire the power references reflecting
4422  * the current HW state).
4423  */
4424 void intel_power_domains_enable(struct drm_i915_private *i915)
4425 {
4426 	intel_wakeref_t wakeref __maybe_unused =
4427 		fetch_and_zero(&i915->power_domains.wakeref);
4428 
4429 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
4430 	intel_power_domains_verify_state(i915);
4431 }
4432 
4433 /**
4434  * intel_power_domains_disable - disable toggling of display power wells
4435  * @i915: i915 device instance
4436  *
4437  * Disable the ondemand enabling/disabling of the display power wells. See
4438  * intel_power_domains_enable() for which power wells this call controls.
4439  */
4440 void intel_power_domains_disable(struct drm_i915_private *i915)
4441 {
4442 	struct i915_power_domains *power_domains = &i915->power_domains;
4443 
4444 	WARN_ON(power_domains->wakeref);
4445 	power_domains->wakeref =
4446 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
4447 
4448 	intel_power_domains_verify_state(i915);
4449 }
4450 
4451 /**
4452  * intel_power_domains_suspend - suspend power domain state
4453  * @i915: i915 device instance
4454  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
4455  *
4456  * This function prepares the hardware power domain state before entering
4457  * system suspend.
4458  *
4459  * It must be called with power domains already disabled (after a call to
4460  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
4461  */
4462 void intel_power_domains_suspend(struct drm_i915_private *i915,
4463 				 enum i915_drm_suspend_mode suspend_mode)
4464 {
4465 	struct i915_power_domains *power_domains = &i915->power_domains;
4466 	intel_wakeref_t wakeref __maybe_unused =
4467 		fetch_and_zero(&power_domains->wakeref);
4468 
4469 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
4470 
4471 	/*
4472 	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
4473 	 * support don't manually deinit the power domains. This also means the
4474 	 * CSR/DMC firmware will stay active, it will power down any HW
4475 	 * resources as required and also enable deeper system power states
4476 	 * that would be blocked if the firmware was inactive.
4477 	 */
4478 	if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
4479 	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
4480 	    i915->csr.dmc_payload) {
4481 		intel_display_power_flush_work(i915);
4482 		intel_power_domains_verify_state(i915);
4483 		return;
4484 	}
4485 
4486 	/*
4487 	 * Even if power well support was disabled we still want to disable
4488 	 * power wells if power domains must be deinitialized for suspend.
4489 	 */
4490 	if (!i915_modparams.disable_power_well)
4491 		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4492 
4493 	intel_display_power_flush_work(i915);
4494 	intel_power_domains_verify_state(i915);
4495 
4496 	if (INTEL_GEN(i915) >= 11)
4497 		icl_display_core_uninit(i915);
4498 	else if (IS_CANNONLAKE(i915))
4499 		cnl_display_core_uninit(i915);
4500 	else if (IS_GEN9_BC(i915))
4501 		skl_display_core_uninit(i915);
4502 	else if (IS_GEN9_LP(i915))
4503 		bxt_display_core_uninit(i915);
4504 
4505 	power_domains->display_core_suspended = true;
4506 }
4507 
4508 /**
4509  * intel_power_domains_resume - resume power domain state
4510  * @i915: i915 device instance
4511  *
4512  * This function resume the hardware power domain state during system resume.
4513  *
4514  * It will return with power domain support disabled (to be enabled later by
4515  * intel_power_domains_enable()) and must be paired with
4516  * intel_power_domains_suspend().
4517  */
4518 void intel_power_domains_resume(struct drm_i915_private *i915)
4519 {
4520 	struct i915_power_domains *power_domains = &i915->power_domains;
4521 
4522 	if (power_domains->display_core_suspended) {
4523 		intel_power_domains_init_hw(i915, true);
4524 		power_domains->display_core_suspended = false;
4525 	} else {
4526 		WARN_ON(power_domains->wakeref);
4527 		power_domains->wakeref =
4528 			intel_display_power_get(i915, POWER_DOMAIN_INIT);
4529 	}
4530 
4531 	intel_power_domains_verify_state(i915);
4532 }
4533 
4534 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4535 
4536 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
4537 {
4538 	struct i915_power_domains *power_domains = &i915->power_domains;
4539 	struct i915_power_well *power_well;
4540 
4541 	for_each_power_well(i915, power_well) {
4542 		enum intel_display_power_domain domain;
4543 
4544 		DRM_DEBUG_DRIVER("%-25s %d\n",
4545 				 power_well->desc->name, power_well->count);
4546 
4547 		for_each_power_domain(domain, power_well->desc->domains)
4548 			DRM_DEBUG_DRIVER("  %-23s %d\n",
4549 					 intel_display_power_domain_str(domain),
4550 					 power_domains->domain_use_count[domain]);
4551 	}
4552 }
4553 
4554 /**
4555  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
4556  * @i915: i915 device instance
4557  *
4558  * Verify if the reference count of each power well matches its HW enabled
4559  * state and the total refcount of the domains it belongs to. This must be
4560  * called after modeset HW state sanitization, which is responsible for
4561  * acquiring reference counts for any power wells in use and disabling the
4562  * ones left on by BIOS but not required by any active output.
4563  */
4564 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
4565 {
4566 	struct i915_power_domains *power_domains = &i915->power_domains;
4567 	struct i915_power_well *power_well;
4568 	bool dump_domain_info;
4569 
4570 	mutex_lock(&power_domains->lock);
4571 
4572 	verify_async_put_domains_state(power_domains);
4573 
4574 	dump_domain_info = false;
4575 	for_each_power_well(i915, power_well) {
4576 		enum intel_display_power_domain domain;
4577 		int domains_count;
4578 		bool enabled;
4579 
4580 		enabled = power_well->desc->ops->is_enabled(i915, power_well);
4581 		if ((power_well->count || power_well->desc->always_on) !=
4582 		    enabled)
4583 			DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
4584 				  power_well->desc->name,
4585 				  power_well->count, enabled);
4586 
4587 		domains_count = 0;
4588 		for_each_power_domain(domain, power_well->desc->domains)
4589 			domains_count += power_domains->domain_use_count[domain];
4590 
4591 		if (power_well->count != domains_count) {
4592 			DRM_ERROR("power well %s refcount/domain refcount mismatch "
4593 				  "(refcount %d/domains refcount %d)\n",
4594 				  power_well->desc->name, power_well->count,
4595 				  domains_count);
4596 			dump_domain_info = true;
4597 		}
4598 	}
4599 
4600 	if (dump_domain_info) {
4601 		static bool dumped;
4602 
4603 		if (!dumped) {
4604 			intel_power_domains_dump_info(i915);
4605 			dumped = true;
4606 		}
4607 	}
4608 
4609 	mutex_unlock(&power_domains->lock);
4610 }
4611 
4612 #else
4613 
4614 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
4615 {
4616 }
4617 
4618 #endif
4619