1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28 
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31 
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34 
35 /**
36  * DOC: runtime pm
37  *
38  * The i915 driver supports dynamic enabling and disabling of entire hardware
39  * blocks at runtime. This is especially important on the display side where
40  * software is supposed to control many power gates manually on recent hardware,
41  * since on the GT side a lot of the power management is done by the hardware.
42  * But even there some manual control at the device level is required.
43  *
44  * Since i915 supports a diverse set of platforms with a unified codebase and
45  * hardware engineers just love to shuffle functionality around between power
46  * domains there's a sizeable amount of indirection required. This file provides
47  * generic functions to the driver for grabbing and releasing references for
48  * abstract power domains. It then maps those to the actual power wells
49  * present for a given platform.
50  */
51 
52 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
53 					 enum i915_power_well_id power_well_id);
54 
55 const char *
56 intel_display_power_domain_str(enum intel_display_power_domain domain)
57 {
58 	switch (domain) {
59 	case POWER_DOMAIN_PIPE_A:
60 		return "PIPE_A";
61 	case POWER_DOMAIN_PIPE_B:
62 		return "PIPE_B";
63 	case POWER_DOMAIN_PIPE_C:
64 		return "PIPE_C";
65 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
66 		return "PIPE_A_PANEL_FITTER";
67 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
68 		return "PIPE_B_PANEL_FITTER";
69 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
70 		return "PIPE_C_PANEL_FITTER";
71 	case POWER_DOMAIN_TRANSCODER_A:
72 		return "TRANSCODER_A";
73 	case POWER_DOMAIN_TRANSCODER_B:
74 		return "TRANSCODER_B";
75 	case POWER_DOMAIN_TRANSCODER_C:
76 		return "TRANSCODER_C";
77 	case POWER_DOMAIN_TRANSCODER_EDP:
78 		return "TRANSCODER_EDP";
79 	case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
80 		return "TRANSCODER_EDP_VDSC";
81 	case POWER_DOMAIN_TRANSCODER_DSI_A:
82 		return "TRANSCODER_DSI_A";
83 	case POWER_DOMAIN_TRANSCODER_DSI_C:
84 		return "TRANSCODER_DSI_C";
85 	case POWER_DOMAIN_PORT_DDI_A_LANES:
86 		return "PORT_DDI_A_LANES";
87 	case POWER_DOMAIN_PORT_DDI_B_LANES:
88 		return "PORT_DDI_B_LANES";
89 	case POWER_DOMAIN_PORT_DDI_C_LANES:
90 		return "PORT_DDI_C_LANES";
91 	case POWER_DOMAIN_PORT_DDI_D_LANES:
92 		return "PORT_DDI_D_LANES";
93 	case POWER_DOMAIN_PORT_DDI_E_LANES:
94 		return "PORT_DDI_E_LANES";
95 	case POWER_DOMAIN_PORT_DDI_F_LANES:
96 		return "PORT_DDI_F_LANES";
97 	case POWER_DOMAIN_PORT_DDI_A_IO:
98 		return "PORT_DDI_A_IO";
99 	case POWER_DOMAIN_PORT_DDI_B_IO:
100 		return "PORT_DDI_B_IO";
101 	case POWER_DOMAIN_PORT_DDI_C_IO:
102 		return "PORT_DDI_C_IO";
103 	case POWER_DOMAIN_PORT_DDI_D_IO:
104 		return "PORT_DDI_D_IO";
105 	case POWER_DOMAIN_PORT_DDI_E_IO:
106 		return "PORT_DDI_E_IO";
107 	case POWER_DOMAIN_PORT_DDI_F_IO:
108 		return "PORT_DDI_F_IO";
109 	case POWER_DOMAIN_PORT_DSI:
110 		return "PORT_DSI";
111 	case POWER_DOMAIN_PORT_CRT:
112 		return "PORT_CRT";
113 	case POWER_DOMAIN_PORT_OTHER:
114 		return "PORT_OTHER";
115 	case POWER_DOMAIN_VGA:
116 		return "VGA";
117 	case POWER_DOMAIN_AUDIO:
118 		return "AUDIO";
119 	case POWER_DOMAIN_PLLS:
120 		return "PLLS";
121 	case POWER_DOMAIN_AUX_A:
122 		return "AUX_A";
123 	case POWER_DOMAIN_AUX_B:
124 		return "AUX_B";
125 	case POWER_DOMAIN_AUX_C:
126 		return "AUX_C";
127 	case POWER_DOMAIN_AUX_D:
128 		return "AUX_D";
129 	case POWER_DOMAIN_AUX_E:
130 		return "AUX_E";
131 	case POWER_DOMAIN_AUX_F:
132 		return "AUX_F";
133 	case POWER_DOMAIN_AUX_IO_A:
134 		return "AUX_IO_A";
135 	case POWER_DOMAIN_AUX_TBT1:
136 		return "AUX_TBT1";
137 	case POWER_DOMAIN_AUX_TBT2:
138 		return "AUX_TBT2";
139 	case POWER_DOMAIN_AUX_TBT3:
140 		return "AUX_TBT3";
141 	case POWER_DOMAIN_AUX_TBT4:
142 		return "AUX_TBT4";
143 	case POWER_DOMAIN_GMBUS:
144 		return "GMBUS";
145 	case POWER_DOMAIN_INIT:
146 		return "INIT";
147 	case POWER_DOMAIN_MODESET:
148 		return "MODESET";
149 	case POWER_DOMAIN_GT_IRQ:
150 		return "GT_IRQ";
151 	default:
152 		MISSING_CASE(domain);
153 		return "?";
154 	}
155 }
156 
157 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
158 				    struct i915_power_well *power_well)
159 {
160 	DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
161 	power_well->desc->ops->enable(dev_priv, power_well);
162 	power_well->hw_enabled = true;
163 }
164 
165 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
166 				     struct i915_power_well *power_well)
167 {
168 	DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
169 	power_well->hw_enabled = false;
170 	power_well->desc->ops->disable(dev_priv, power_well);
171 }
172 
173 static void intel_power_well_get(struct drm_i915_private *dev_priv,
174 				 struct i915_power_well *power_well)
175 {
176 	if (!power_well->count++)
177 		intel_power_well_enable(dev_priv, power_well);
178 }
179 
180 static void intel_power_well_put(struct drm_i915_private *dev_priv,
181 				 struct i915_power_well *power_well)
182 {
183 	WARN(!power_well->count, "Use count on power well %s is already zero",
184 	     power_well->desc->name);
185 
186 	if (!--power_well->count)
187 		intel_power_well_disable(dev_priv, power_well);
188 }
189 
190 /**
191  * __intel_display_power_is_enabled - unlocked check for a power domain
192  * @dev_priv: i915 device instance
193  * @domain: power domain to check
194  *
195  * This is the unlocked version of intel_display_power_is_enabled() and should
196  * only be used from error capture and recovery code where deadlocks are
197  * possible.
198  *
199  * Returns:
200  * True when the power domain is enabled, false otherwise.
201  */
202 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
203 				      enum intel_display_power_domain domain)
204 {
205 	struct i915_power_well *power_well;
206 	bool is_enabled;
207 
208 	if (dev_priv->runtime_pm.suspended)
209 		return false;
210 
211 	is_enabled = true;
212 
213 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
214 		if (power_well->desc->always_on)
215 			continue;
216 
217 		if (!power_well->hw_enabled) {
218 			is_enabled = false;
219 			break;
220 		}
221 	}
222 
223 	return is_enabled;
224 }
225 
226 /**
227  * intel_display_power_is_enabled - check for a power domain
228  * @dev_priv: i915 device instance
229  * @domain: power domain to check
230  *
231  * This function can be used to check the hw power domain state. It is mostly
232  * used in hardware state readout functions. Everywhere else code should rely
233  * upon explicit power domain reference counting to ensure that the hardware
234  * block is powered up before accessing it.
235  *
236  * Callers must hold the relevant modesetting locks to ensure that concurrent
237  * threads can't disable the power well while the caller tries to read a few
238  * registers.
239  *
240  * Returns:
241  * True when the power domain is enabled, false otherwise.
242  */
243 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
244 				    enum intel_display_power_domain domain)
245 {
246 	struct i915_power_domains *power_domains;
247 	bool ret;
248 
249 	power_domains = &dev_priv->power_domains;
250 
251 	mutex_lock(&power_domains->lock);
252 	ret = __intel_display_power_is_enabled(dev_priv, domain);
253 	mutex_unlock(&power_domains->lock);
254 
255 	return ret;
256 }
257 
258 /*
259  * Starting with Haswell, we have a "Power Down Well" that can be turned off
260  * when not needed anymore. We have 4 registers that can request the power well
261  * to be enabled, and it will only be disabled if none of the registers is
262  * requesting it to be enabled.
263  */
264 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
265 				       u8 irq_pipe_mask, bool has_vga)
266 {
267 	struct pci_dev *pdev = dev_priv->drm.pdev;
268 
269 	/*
270 	 * After we re-enable the power well, if we touch VGA register 0x3d5
271 	 * we'll get unclaimed register interrupts. This stops after we write
272 	 * anything to the VGA MSR register. The vgacon module uses this
273 	 * register all the time, so if we unbind our driver and, as a
274 	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
275 	 * console_unlock(). So make here we touch the VGA MSR register, making
276 	 * sure vgacon can keep working normally without triggering interrupts
277 	 * and error messages.
278 	 */
279 	if (has_vga) {
280 		vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
281 		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
282 		vga_put(pdev, VGA_RSRC_LEGACY_IO);
283 	}
284 
285 	if (irq_pipe_mask)
286 		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
287 }
288 
289 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
290 				       u8 irq_pipe_mask)
291 {
292 	if (irq_pipe_mask)
293 		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
294 }
295 
296 
297 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
298 					   struct i915_power_well *power_well)
299 {
300 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
301 	int pw_idx = power_well->desc->hsw.idx;
302 
303 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
304 	WARN_ON(intel_wait_for_register(dev_priv,
305 					regs->driver,
306 					HSW_PWR_WELL_CTL_STATE(pw_idx),
307 					HSW_PWR_WELL_CTL_STATE(pw_idx),
308 					1));
309 }
310 
311 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
312 				     const struct i915_power_well_regs *regs,
313 				     int pw_idx)
314 {
315 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
316 	u32 ret;
317 
318 	ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
319 	ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
320 	if (regs->kvmr.reg)
321 		ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
322 	ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
323 
324 	return ret;
325 }
326 
327 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
328 					    struct i915_power_well *power_well)
329 {
330 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
331 	int pw_idx = power_well->desc->hsw.idx;
332 	bool disabled;
333 	u32 reqs;
334 
335 	/*
336 	 * Bspec doesn't require waiting for PWs to get disabled, but still do
337 	 * this for paranoia. The known cases where a PW will be forced on:
338 	 * - a KVMR request on any power well via the KVMR request register
339 	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
340 	 *   DEBUG request registers
341 	 * Skip the wait in case any of the request bits are set and print a
342 	 * diagnostic message.
343 	 */
344 	wait_for((disabled = !(I915_READ(regs->driver) &
345 			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
346 		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
347 	if (disabled)
348 		return;
349 
350 	DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
351 		      power_well->desc->name,
352 		      !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
353 }
354 
355 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
356 					   enum skl_power_gate pg)
357 {
358 	/* Timeout 5us for PG#0, for other PGs 1us */
359 	WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
360 					SKL_FUSE_PG_DIST_STATUS(pg),
361 					SKL_FUSE_PG_DIST_STATUS(pg), 1));
362 }
363 
364 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
365 				  struct i915_power_well *power_well)
366 {
367 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
368 	int pw_idx = power_well->desc->hsw.idx;
369 	bool wait_fuses = power_well->desc->hsw.has_fuses;
370 	enum skl_power_gate uninitialized_var(pg);
371 	u32 val;
372 
373 	if (wait_fuses) {
374 		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
375 						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
376 		/*
377 		 * For PW1 we have to wait both for the PW0/PG0 fuse state
378 		 * before enabling the power well and PW1/PG1's own fuse
379 		 * state after the enabling. For all other power wells with
380 		 * fuses we only have to wait for that PW/PG's fuse state
381 		 * after the enabling.
382 		 */
383 		if (pg == SKL_PG1)
384 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
385 	}
386 
387 	val = I915_READ(regs->driver);
388 	I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
389 	hsw_wait_for_power_well_enable(dev_priv, power_well);
390 
391 	/* Display WA #1178: cnl */
392 	if (IS_CANNONLAKE(dev_priv) &&
393 	    pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
394 	    pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
395 		val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
396 		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
397 		I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
398 	}
399 
400 	if (wait_fuses)
401 		gen9_wait_for_power_well_fuses(dev_priv, pg);
402 
403 	hsw_power_well_post_enable(dev_priv,
404 				   power_well->desc->hsw.irq_pipe_mask,
405 				   power_well->desc->hsw.has_vga);
406 }
407 
408 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
409 				   struct i915_power_well *power_well)
410 {
411 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
412 	int pw_idx = power_well->desc->hsw.idx;
413 	u32 val;
414 
415 	hsw_power_well_pre_disable(dev_priv,
416 				   power_well->desc->hsw.irq_pipe_mask);
417 
418 	val = I915_READ(regs->driver);
419 	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
420 	hsw_wait_for_power_well_disable(dev_priv, power_well);
421 }
422 
423 #define ICL_AUX_PW_TO_PORT(pw_idx)	((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
424 
425 static void
426 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
427 				    struct i915_power_well *power_well)
428 {
429 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
430 	int pw_idx = power_well->desc->hsw.idx;
431 	enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
432 	u32 val;
433 
434 	val = I915_READ(regs->driver);
435 	I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
436 
437 	val = I915_READ(ICL_PORT_CL_DW12(port));
438 	I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
439 
440 	hsw_wait_for_power_well_enable(dev_priv, power_well);
441 
442 	/* Display WA #1178: icl */
443 	if (IS_ICELAKE(dev_priv) &&
444 	    pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
445 	    !intel_bios_is_port_edp(dev_priv, port)) {
446 		val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
447 		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
448 		I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
449 	}
450 }
451 
452 static void
453 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
454 				     struct i915_power_well *power_well)
455 {
456 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
457 	int pw_idx = power_well->desc->hsw.idx;
458 	enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
459 	u32 val;
460 
461 	val = I915_READ(ICL_PORT_CL_DW12(port));
462 	I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
463 
464 	val = I915_READ(regs->driver);
465 	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
466 
467 	hsw_wait_for_power_well_disable(dev_priv, power_well);
468 }
469 
470 #define ICL_AUX_PW_TO_CH(pw_idx)	\
471 	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
472 
473 static void
474 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
475 				 struct i915_power_well *power_well)
476 {
477 	enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
478 	u32 val;
479 
480 	val = I915_READ(DP_AUX_CH_CTL(aux_ch));
481 	val &= ~DP_AUX_CH_CTL_TBT_IO;
482 	if (power_well->desc->hsw.is_tc_tbt)
483 		val |= DP_AUX_CH_CTL_TBT_IO;
484 	I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
485 
486 	hsw_power_well_enable(dev_priv, power_well);
487 }
488 
489 /*
490  * We should only use the power well if we explicitly asked the hardware to
491  * enable it, so check if it's enabled and also check if we've requested it to
492  * be enabled.
493  */
494 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
495 				   struct i915_power_well *power_well)
496 {
497 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
498 	enum i915_power_well_id id = power_well->desc->id;
499 	int pw_idx = power_well->desc->hsw.idx;
500 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
501 		   HSW_PWR_WELL_CTL_STATE(pw_idx);
502 	u32 val;
503 
504 	val = I915_READ(regs->driver);
505 
506 	/*
507 	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
508 	 * and the MISC_IO PW will be not restored, so check instead for the
509 	 * BIOS's own request bits, which are forced-on for these power wells
510 	 * when exiting DC5/6.
511 	 */
512 	if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) &&
513 	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
514 		val |= I915_READ(regs->bios);
515 
516 	return (val & mask) == mask;
517 }
518 
519 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
520 {
521 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
522 		  "DC9 already programmed to be enabled.\n");
523 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
524 		  "DC5 still not disabled to enable DC9.\n");
525 	WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
526 		  HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
527 		  "Power well 2 on.\n");
528 	WARN_ONCE(intel_irqs_enabled(dev_priv),
529 		  "Interrupts not disabled yet.\n");
530 
531 	 /*
532 	  * TODO: check for the following to verify the conditions to enter DC9
533 	  * state are satisfied:
534 	  * 1] Check relevant display engine registers to verify if mode set
535 	  * disable sequence was followed.
536 	  * 2] Check if display uninitialize sequence is initialized.
537 	  */
538 }
539 
540 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
541 {
542 	WARN_ONCE(intel_irqs_enabled(dev_priv),
543 		  "Interrupts not disabled yet.\n");
544 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
545 		  "DC5 still not disabled.\n");
546 
547 	 /*
548 	  * TODO: check for the following to verify DC9 state was indeed
549 	  * entered before programming to disable it:
550 	  * 1] Check relevant display engine registers to verify if mode
551 	  *  set disable sequence was followed.
552 	  * 2] Check if display uninitialize sequence is initialized.
553 	  */
554 }
555 
556 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
557 				u32 state)
558 {
559 	int rewrites = 0;
560 	int rereads = 0;
561 	u32 v;
562 
563 	I915_WRITE(DC_STATE_EN, state);
564 
565 	/* It has been observed that disabling the dc6 state sometimes
566 	 * doesn't stick and dmc keeps returning old value. Make sure
567 	 * the write really sticks enough times and also force rewrite until
568 	 * we are confident that state is exactly what we want.
569 	 */
570 	do  {
571 		v = I915_READ(DC_STATE_EN);
572 
573 		if (v != state) {
574 			I915_WRITE(DC_STATE_EN, state);
575 			rewrites++;
576 			rereads = 0;
577 		} else if (rereads++ > 5) {
578 			break;
579 		}
580 
581 	} while (rewrites < 100);
582 
583 	if (v != state)
584 		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
585 			  state, v);
586 
587 	/* Most of the times we need one retry, avoid spam */
588 	if (rewrites > 1)
589 		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
590 			      state, rewrites);
591 }
592 
593 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
594 {
595 	u32 mask;
596 
597 	mask = DC_STATE_EN_UPTO_DC5;
598 	if (INTEL_GEN(dev_priv) >= 11)
599 		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
600 	else if (IS_GEN9_LP(dev_priv))
601 		mask |= DC_STATE_EN_DC9;
602 	else
603 		mask |= DC_STATE_EN_UPTO_DC6;
604 
605 	return mask;
606 }
607 
608 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
609 {
610 	u32 val;
611 
612 	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
613 
614 	DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
615 		      dev_priv->csr.dc_state, val);
616 	dev_priv->csr.dc_state = val;
617 }
618 
619 /**
620  * gen9_set_dc_state - set target display C power state
621  * @dev_priv: i915 device instance
622  * @state: target DC power state
623  * - DC_STATE_DISABLE
624  * - DC_STATE_EN_UPTO_DC5
625  * - DC_STATE_EN_UPTO_DC6
626  * - DC_STATE_EN_DC9
627  *
628  * Signal to DMC firmware/HW the target DC power state passed in @state.
629  * DMC/HW can turn off individual display clocks and power rails when entering
630  * a deeper DC power state (higher in number) and turns these back when exiting
631  * that state to a shallower power state (lower in number). The HW will decide
632  * when to actually enter a given state on an on-demand basis, for instance
633  * depending on the active state of display pipes. The state of display
634  * registers backed by affected power rails are saved/restored as needed.
635  *
636  * Based on the above enabling a deeper DC power state is asynchronous wrt.
637  * enabling it. Disabling a deeper power state is synchronous: for instance
638  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
639  * back on and register state is restored. This is guaranteed by the MMIO write
640  * to DC_STATE_EN blocking until the state is restored.
641  */
642 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
643 {
644 	uint32_t val;
645 	uint32_t mask;
646 
647 	if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
648 		state &= dev_priv->csr.allowed_dc_mask;
649 
650 	val = I915_READ(DC_STATE_EN);
651 	mask = gen9_dc_mask(dev_priv);
652 	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
653 		      val & mask, state);
654 
655 	/* Check if DMC is ignoring our DC state requests */
656 	if ((val & mask) != dev_priv->csr.dc_state)
657 		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
658 			  dev_priv->csr.dc_state, val & mask);
659 
660 	val &= ~mask;
661 	val |= state;
662 
663 	gen9_write_dc_state(dev_priv, val);
664 
665 	dev_priv->csr.dc_state = val & mask;
666 }
667 
668 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
669 {
670 	assert_can_enable_dc9(dev_priv);
671 
672 	DRM_DEBUG_KMS("Enabling DC9\n");
673 	/*
674 	 * Power sequencer reset is not needed on
675 	 * platforms with South Display Engine on PCH,
676 	 * because PPS registers are always on.
677 	 */
678 	if (!HAS_PCH_SPLIT(dev_priv))
679 		intel_power_sequencer_reset(dev_priv);
680 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
681 }
682 
683 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
684 {
685 	assert_can_disable_dc9(dev_priv);
686 
687 	DRM_DEBUG_KMS("Disabling DC9\n");
688 
689 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
690 
691 	intel_pps_unlock_regs_wa(dev_priv);
692 }
693 
694 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
695 {
696 	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
697 		  "CSR program storage start is NULL\n");
698 	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
699 	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
700 }
701 
702 static struct i915_power_well *
703 lookup_power_well(struct drm_i915_private *dev_priv,
704 		  enum i915_power_well_id power_well_id)
705 {
706 	struct i915_power_well *power_well;
707 
708 	for_each_power_well(dev_priv, power_well)
709 		if (power_well->desc->id == power_well_id)
710 			return power_well;
711 
712 	/*
713 	 * It's not feasible to add error checking code to the callers since
714 	 * this condition really shouldn't happen and it doesn't even make sense
715 	 * to abort things like display initialization sequences. Just return
716 	 * the first power well and hope the WARN gets reported so we can fix
717 	 * our driver.
718 	 */
719 	WARN(1, "Power well %d not defined for this platform\n", power_well_id);
720 	return &dev_priv->power_domains.power_wells[0];
721 }
722 
723 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
724 {
725 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
726 					SKL_DISP_PW_2);
727 
728 	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
729 
730 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
731 		  "DC5 already programmed to be enabled.\n");
732 	assert_rpm_wakelock_held(dev_priv);
733 
734 	assert_csr_loaded(dev_priv);
735 }
736 
737 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
738 {
739 	assert_can_enable_dc5(dev_priv);
740 
741 	DRM_DEBUG_KMS("Enabling DC5\n");
742 
743 	/* Wa Display #1183: skl,kbl,cfl */
744 	if (IS_GEN9_BC(dev_priv))
745 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
746 			   SKL_SELECT_ALTERNATE_DC_EXIT);
747 
748 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
749 }
750 
751 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
752 {
753 	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
754 		  "Backlight is not disabled.\n");
755 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
756 		  "DC6 already programmed to be enabled.\n");
757 
758 	assert_csr_loaded(dev_priv);
759 }
760 
761 void skl_enable_dc6(struct drm_i915_private *dev_priv)
762 {
763 	assert_can_enable_dc6(dev_priv);
764 
765 	DRM_DEBUG_KMS("Enabling DC6\n");
766 
767 	/* Wa Display #1183: skl,kbl,cfl */
768 	if (IS_GEN9_BC(dev_priv))
769 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
770 			   SKL_SELECT_ALTERNATE_DC_EXIT);
771 
772 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
773 }
774 
775 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
776 				   struct i915_power_well *power_well)
777 {
778 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
779 	int pw_idx = power_well->desc->hsw.idx;
780 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
781 	u32 bios_req = I915_READ(regs->bios);
782 
783 	/* Take over the request bit if set by BIOS. */
784 	if (bios_req & mask) {
785 		u32 drv_req = I915_READ(regs->driver);
786 
787 		if (!(drv_req & mask))
788 			I915_WRITE(regs->driver, drv_req | mask);
789 		I915_WRITE(regs->bios, bios_req & ~mask);
790 	}
791 }
792 
793 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
794 					   struct i915_power_well *power_well)
795 {
796 	bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
797 }
798 
799 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
800 					    struct i915_power_well *power_well)
801 {
802 	bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
803 }
804 
805 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
806 					    struct i915_power_well *power_well)
807 {
808 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
809 }
810 
811 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
812 {
813 	struct i915_power_well *power_well;
814 
815 	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
816 	if (power_well->count > 0)
817 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
818 
819 	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
820 	if (power_well->count > 0)
821 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
822 
823 	if (IS_GEMINILAKE(dev_priv)) {
824 		power_well = lookup_power_well(dev_priv,
825 					       GLK_DISP_PW_DPIO_CMN_C);
826 		if (power_well->count > 0)
827 			bxt_ddi_phy_verify_state(dev_priv,
828 						 power_well->desc->bxt.phy);
829 	}
830 }
831 
832 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
833 					   struct i915_power_well *power_well)
834 {
835 	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
836 }
837 
838 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
839 {
840 	u32 tmp = I915_READ(DBUF_CTL);
841 
842 	WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
843 	     (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
844 	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
845 }
846 
847 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
848 					  struct i915_power_well *power_well)
849 {
850 	struct intel_cdclk_state cdclk_state = {};
851 
852 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
853 
854 	dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
855 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
856 	WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
857 
858 	gen9_assert_dbuf_enabled(dev_priv);
859 
860 	if (IS_GEN9_LP(dev_priv))
861 		bxt_verify_ddi_phy_power_wells(dev_priv);
862 
863 	if (INTEL_GEN(dev_priv) >= 11)
864 		/*
865 		 * DMC retains HW context only for port A, the other combo
866 		 * PHY's HW context for port B is lost after DC transitions,
867 		 * so we need to restore it manually.
868 		 */
869 		icl_combo_phys_init(dev_priv);
870 }
871 
872 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
873 					   struct i915_power_well *power_well)
874 {
875 	if (!dev_priv->csr.dmc_payload)
876 		return;
877 
878 	if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
879 		skl_enable_dc6(dev_priv);
880 	else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
881 		gen9_enable_dc5(dev_priv);
882 }
883 
884 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
885 					 struct i915_power_well *power_well)
886 {
887 }
888 
889 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
890 					   struct i915_power_well *power_well)
891 {
892 }
893 
894 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
895 					     struct i915_power_well *power_well)
896 {
897 	return true;
898 }
899 
900 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
901 					 struct i915_power_well *power_well)
902 {
903 	if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
904 		i830_enable_pipe(dev_priv, PIPE_A);
905 	if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
906 		i830_enable_pipe(dev_priv, PIPE_B);
907 }
908 
909 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
910 					  struct i915_power_well *power_well)
911 {
912 	i830_disable_pipe(dev_priv, PIPE_B);
913 	i830_disable_pipe(dev_priv, PIPE_A);
914 }
915 
916 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
917 					  struct i915_power_well *power_well)
918 {
919 	return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
920 		I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
921 }
922 
923 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
924 					  struct i915_power_well *power_well)
925 {
926 	if (power_well->count > 0)
927 		i830_pipes_power_well_enable(dev_priv, power_well);
928 	else
929 		i830_pipes_power_well_disable(dev_priv, power_well);
930 }
931 
932 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
933 			       struct i915_power_well *power_well, bool enable)
934 {
935 	int pw_idx = power_well->desc->vlv.idx;
936 	u32 mask;
937 	u32 state;
938 	u32 ctrl;
939 
940 	mask = PUNIT_PWRGT_MASK(pw_idx);
941 	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
942 			 PUNIT_PWRGT_PWR_GATE(pw_idx);
943 
944 	mutex_lock(&dev_priv->pcu_lock);
945 
946 #define COND \
947 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
948 
949 	if (COND)
950 		goto out;
951 
952 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
953 	ctrl &= ~mask;
954 	ctrl |= state;
955 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
956 
957 	if (wait_for(COND, 100))
958 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
959 			  state,
960 			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
961 
962 #undef COND
963 
964 out:
965 	mutex_unlock(&dev_priv->pcu_lock);
966 }
967 
968 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
969 				  struct i915_power_well *power_well)
970 {
971 	vlv_set_power_well(dev_priv, power_well, true);
972 }
973 
974 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
975 				   struct i915_power_well *power_well)
976 {
977 	vlv_set_power_well(dev_priv, power_well, false);
978 }
979 
980 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
981 				   struct i915_power_well *power_well)
982 {
983 	int pw_idx = power_well->desc->vlv.idx;
984 	bool enabled = false;
985 	u32 mask;
986 	u32 state;
987 	u32 ctrl;
988 
989 	mask = PUNIT_PWRGT_MASK(pw_idx);
990 	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
991 
992 	mutex_lock(&dev_priv->pcu_lock);
993 
994 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
995 	/*
996 	 * We only ever set the power-on and power-gate states, anything
997 	 * else is unexpected.
998 	 */
999 	WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1000 		state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1001 	if (state == ctrl)
1002 		enabled = true;
1003 
1004 	/*
1005 	 * A transient state at this point would mean some unexpected party
1006 	 * is poking at the power controls too.
1007 	 */
1008 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1009 	WARN_ON(ctrl != state);
1010 
1011 	mutex_unlock(&dev_priv->pcu_lock);
1012 
1013 	return enabled;
1014 }
1015 
1016 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1017 {
1018 	u32 val;
1019 
1020 	/*
1021 	 * On driver load, a pipe may be active and driving a DSI display.
1022 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1023 	 * (and never recovering) in this case. intel_dsi_post_disable() will
1024 	 * clear it when we turn off the display.
1025 	 */
1026 	val = I915_READ(DSPCLK_GATE_D);
1027 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1028 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1029 	I915_WRITE(DSPCLK_GATE_D, val);
1030 
1031 	/*
1032 	 * Disable trickle feed and enable pnd deadline calculation
1033 	 */
1034 	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1035 	I915_WRITE(CBR1_VLV, 0);
1036 
1037 	WARN_ON(dev_priv->rawclk_freq == 0);
1038 
1039 	I915_WRITE(RAWCLK_FREQ_VLV,
1040 		   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1041 }
1042 
1043 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1044 {
1045 	struct intel_encoder *encoder;
1046 	enum pipe pipe;
1047 
1048 	/*
1049 	 * Enable the CRI clock source so we can get at the
1050 	 * display and the reference clock for VGA
1051 	 * hotplug / manual detection. Supposedly DSI also
1052 	 * needs the ref clock up and running.
1053 	 *
1054 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1055 	 */
1056 	for_each_pipe(dev_priv, pipe) {
1057 		u32 val = I915_READ(DPLL(pipe));
1058 
1059 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1060 		if (pipe != PIPE_A)
1061 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1062 
1063 		I915_WRITE(DPLL(pipe), val);
1064 	}
1065 
1066 	vlv_init_display_clock_gating(dev_priv);
1067 
1068 	spin_lock_irq(&dev_priv->irq_lock);
1069 	valleyview_enable_display_irqs(dev_priv);
1070 	spin_unlock_irq(&dev_priv->irq_lock);
1071 
1072 	/*
1073 	 * During driver initialization/resume we can avoid restoring the
1074 	 * part of the HW/SW state that will be inited anyway explicitly.
1075 	 */
1076 	if (dev_priv->power_domains.initializing)
1077 		return;
1078 
1079 	intel_hpd_init(dev_priv);
1080 
1081 	/* Re-enable the ADPA, if we have one */
1082 	for_each_intel_encoder(&dev_priv->drm, encoder) {
1083 		if (encoder->type == INTEL_OUTPUT_ANALOG)
1084 			intel_crt_reset(&encoder->base);
1085 	}
1086 
1087 	i915_redisable_vga_power_on(dev_priv);
1088 
1089 	intel_pps_unlock_regs_wa(dev_priv);
1090 }
1091 
1092 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1093 {
1094 	spin_lock_irq(&dev_priv->irq_lock);
1095 	valleyview_disable_display_irqs(dev_priv);
1096 	spin_unlock_irq(&dev_priv->irq_lock);
1097 
1098 	/* make sure we're done processing display irqs */
1099 	synchronize_irq(dev_priv->drm.irq);
1100 
1101 	intel_power_sequencer_reset(dev_priv);
1102 
1103 	/* Prevent us from re-enabling polling on accident in late suspend */
1104 	if (!dev_priv->drm.dev->power.is_suspended)
1105 		intel_hpd_poll_init(dev_priv);
1106 }
1107 
1108 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1109 					  struct i915_power_well *power_well)
1110 {
1111 	vlv_set_power_well(dev_priv, power_well, true);
1112 
1113 	vlv_display_power_well_init(dev_priv);
1114 }
1115 
1116 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1117 					   struct i915_power_well *power_well)
1118 {
1119 	vlv_display_power_well_deinit(dev_priv);
1120 
1121 	vlv_set_power_well(dev_priv, power_well, false);
1122 }
1123 
1124 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1125 					   struct i915_power_well *power_well)
1126 {
1127 	/* since ref/cri clock was enabled */
1128 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1129 
1130 	vlv_set_power_well(dev_priv, power_well, true);
1131 
1132 	/*
1133 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1134 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1135 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1136 	 *   b.	The other bits such as sfr settings / modesel may all
1137 	 *	be set to 0.
1138 	 *
1139 	 * This should only be done on init and resume from S3 with
1140 	 * both PLLs disabled, or we risk losing DPIO and PLL
1141 	 * synchronization.
1142 	 */
1143 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1144 }
1145 
1146 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1147 					    struct i915_power_well *power_well)
1148 {
1149 	enum pipe pipe;
1150 
1151 	for_each_pipe(dev_priv, pipe)
1152 		assert_pll_disabled(dev_priv, pipe);
1153 
1154 	/* Assert common reset */
1155 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1156 
1157 	vlv_set_power_well(dev_priv, power_well, false);
1158 }
1159 
1160 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1161 
1162 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1163 
1164 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1165 {
1166 	struct i915_power_well *cmn_bc =
1167 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1168 	struct i915_power_well *cmn_d =
1169 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1170 	u32 phy_control = dev_priv->chv_phy_control;
1171 	u32 phy_status = 0;
1172 	u32 phy_status_mask = 0xffffffff;
1173 
1174 	/*
1175 	 * The BIOS can leave the PHY is some weird state
1176 	 * where it doesn't fully power down some parts.
1177 	 * Disable the asserts until the PHY has been fully
1178 	 * reset (ie. the power well has been disabled at
1179 	 * least once).
1180 	 */
1181 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1182 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1183 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1184 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1185 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1186 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1187 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1188 
1189 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1190 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1191 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1192 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1193 
1194 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1195 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1196 
1197 		/* this assumes override is only used to enable lanes */
1198 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1199 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1200 
1201 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1202 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1203 
1204 		/* CL1 is on whenever anything is on in either channel */
1205 		if (BITS_SET(phy_control,
1206 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1207 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1208 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1209 
1210 		/*
1211 		 * The DPLLB check accounts for the pipe B + port A usage
1212 		 * with CL2 powered up but all the lanes in the second channel
1213 		 * powered down.
1214 		 */
1215 		if (BITS_SET(phy_control,
1216 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1217 		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1218 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1219 
1220 		if (BITS_SET(phy_control,
1221 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1222 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1223 		if (BITS_SET(phy_control,
1224 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1225 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1226 
1227 		if (BITS_SET(phy_control,
1228 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1229 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1230 		if (BITS_SET(phy_control,
1231 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1232 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1233 	}
1234 
1235 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1236 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1237 
1238 		/* this assumes override is only used to enable lanes */
1239 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1240 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1241 
1242 		if (BITS_SET(phy_control,
1243 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1244 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1245 
1246 		if (BITS_SET(phy_control,
1247 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1248 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1249 		if (BITS_SET(phy_control,
1250 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1251 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1252 	}
1253 
1254 	phy_status &= phy_status_mask;
1255 
1256 	/*
1257 	 * The PHY may be busy with some initial calibration and whatnot,
1258 	 * so the power state can take a while to actually change.
1259 	 */
1260 	if (intel_wait_for_register(dev_priv,
1261 				    DISPLAY_PHY_STATUS,
1262 				    phy_status_mask,
1263 				    phy_status,
1264 				    10))
1265 		DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1266 			  I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1267 			   phy_status, dev_priv->chv_phy_control);
1268 }
1269 
1270 #undef BITS_SET
1271 
1272 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1273 					   struct i915_power_well *power_well)
1274 {
1275 	enum dpio_phy phy;
1276 	enum pipe pipe;
1277 	uint32_t tmp;
1278 
1279 	WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1280 		     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1281 
1282 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1283 		pipe = PIPE_A;
1284 		phy = DPIO_PHY0;
1285 	} else {
1286 		pipe = PIPE_C;
1287 		phy = DPIO_PHY1;
1288 	}
1289 
1290 	/* since ref/cri clock was enabled */
1291 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1292 	vlv_set_power_well(dev_priv, power_well, true);
1293 
1294 	/* Poll for phypwrgood signal */
1295 	if (intel_wait_for_register(dev_priv,
1296 				    DISPLAY_PHY_STATUS,
1297 				    PHY_POWERGOOD(phy),
1298 				    PHY_POWERGOOD(phy),
1299 				    1))
1300 		DRM_ERROR("Display PHY %d is not power up\n", phy);
1301 
1302 	mutex_lock(&dev_priv->sb_lock);
1303 
1304 	/* Enable dynamic power down */
1305 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1306 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1307 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1308 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1309 
1310 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1311 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1312 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1313 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1314 	} else {
1315 		/*
1316 		 * Force the non-existing CL2 off. BXT does this
1317 		 * too, so maybe it saves some power even though
1318 		 * CL2 doesn't exist?
1319 		 */
1320 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1321 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1322 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1323 	}
1324 
1325 	mutex_unlock(&dev_priv->sb_lock);
1326 
1327 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1328 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1329 
1330 	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1331 		      phy, dev_priv->chv_phy_control);
1332 
1333 	assert_chv_phy_status(dev_priv);
1334 }
1335 
1336 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1337 					    struct i915_power_well *power_well)
1338 {
1339 	enum dpio_phy phy;
1340 
1341 	WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1342 		     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1343 
1344 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1345 		phy = DPIO_PHY0;
1346 		assert_pll_disabled(dev_priv, PIPE_A);
1347 		assert_pll_disabled(dev_priv, PIPE_B);
1348 	} else {
1349 		phy = DPIO_PHY1;
1350 		assert_pll_disabled(dev_priv, PIPE_C);
1351 	}
1352 
1353 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1354 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1355 
1356 	vlv_set_power_well(dev_priv, power_well, false);
1357 
1358 	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1359 		      phy, dev_priv->chv_phy_control);
1360 
1361 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1362 	dev_priv->chv_phy_assert[phy] = true;
1363 
1364 	assert_chv_phy_status(dev_priv);
1365 }
1366 
1367 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1368 				     enum dpio_channel ch, bool override, unsigned int mask)
1369 {
1370 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1371 	u32 reg, val, expected, actual;
1372 
1373 	/*
1374 	 * The BIOS can leave the PHY is some weird state
1375 	 * where it doesn't fully power down some parts.
1376 	 * Disable the asserts until the PHY has been fully
1377 	 * reset (ie. the power well has been disabled at
1378 	 * least once).
1379 	 */
1380 	if (!dev_priv->chv_phy_assert[phy])
1381 		return;
1382 
1383 	if (ch == DPIO_CH0)
1384 		reg = _CHV_CMN_DW0_CH0;
1385 	else
1386 		reg = _CHV_CMN_DW6_CH1;
1387 
1388 	mutex_lock(&dev_priv->sb_lock);
1389 	val = vlv_dpio_read(dev_priv, pipe, reg);
1390 	mutex_unlock(&dev_priv->sb_lock);
1391 
1392 	/*
1393 	 * This assumes !override is only used when the port is disabled.
1394 	 * All lanes should power down even without the override when
1395 	 * the port is disabled.
1396 	 */
1397 	if (!override || mask == 0xf) {
1398 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1399 		/*
1400 		 * If CH1 common lane is not active anymore
1401 		 * (eg. for pipe B DPLL) the entire channel will
1402 		 * shut down, which causes the common lane registers
1403 		 * to read as 0. That means we can't actually check
1404 		 * the lane power down status bits, but as the entire
1405 		 * register reads as 0 it's a good indication that the
1406 		 * channel is indeed entirely powered down.
1407 		 */
1408 		if (ch == DPIO_CH1 && val == 0)
1409 			expected = 0;
1410 	} else if (mask != 0x0) {
1411 		expected = DPIO_ANYDL_POWERDOWN;
1412 	} else {
1413 		expected = 0;
1414 	}
1415 
1416 	if (ch == DPIO_CH0)
1417 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1418 	else
1419 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1420 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1421 
1422 	WARN(actual != expected,
1423 	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1424 	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1425 	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1426 	     reg, val);
1427 }
1428 
1429 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1430 			  enum dpio_channel ch, bool override)
1431 {
1432 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1433 	bool was_override;
1434 
1435 	mutex_lock(&power_domains->lock);
1436 
1437 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1438 
1439 	if (override == was_override)
1440 		goto out;
1441 
1442 	if (override)
1443 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1444 	else
1445 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1446 
1447 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1448 
1449 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1450 		      phy, ch, dev_priv->chv_phy_control);
1451 
1452 	assert_chv_phy_status(dev_priv);
1453 
1454 out:
1455 	mutex_unlock(&power_domains->lock);
1456 
1457 	return was_override;
1458 }
1459 
1460 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1461 			     bool override, unsigned int mask)
1462 {
1463 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1464 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1465 	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1466 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1467 
1468 	mutex_lock(&power_domains->lock);
1469 
1470 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1471 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1472 
1473 	if (override)
1474 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1475 	else
1476 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1477 
1478 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1479 
1480 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1481 		      phy, ch, mask, dev_priv->chv_phy_control);
1482 
1483 	assert_chv_phy_status(dev_priv);
1484 
1485 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1486 
1487 	mutex_unlock(&power_domains->lock);
1488 }
1489 
1490 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1491 					struct i915_power_well *power_well)
1492 {
1493 	enum pipe pipe = PIPE_A;
1494 	bool enabled;
1495 	u32 state, ctrl;
1496 
1497 	mutex_lock(&dev_priv->pcu_lock);
1498 
1499 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1500 	/*
1501 	 * We only ever set the power-on and power-gate states, anything
1502 	 * else is unexpected.
1503 	 */
1504 	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1505 	enabled = state == DP_SSS_PWR_ON(pipe);
1506 
1507 	/*
1508 	 * A transient state at this point would mean some unexpected party
1509 	 * is poking at the power controls too.
1510 	 */
1511 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1512 	WARN_ON(ctrl << 16 != state);
1513 
1514 	mutex_unlock(&dev_priv->pcu_lock);
1515 
1516 	return enabled;
1517 }
1518 
1519 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1520 				    struct i915_power_well *power_well,
1521 				    bool enable)
1522 {
1523 	enum pipe pipe = PIPE_A;
1524 	u32 state;
1525 	u32 ctrl;
1526 
1527 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1528 
1529 	mutex_lock(&dev_priv->pcu_lock);
1530 
1531 #define COND \
1532 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1533 
1534 	if (COND)
1535 		goto out;
1536 
1537 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1538 	ctrl &= ~DP_SSC_MASK(pipe);
1539 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1540 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1541 
1542 	if (wait_for(COND, 100))
1543 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1544 			  state,
1545 			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1546 
1547 #undef COND
1548 
1549 out:
1550 	mutex_unlock(&dev_priv->pcu_lock);
1551 }
1552 
1553 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1554 				       struct i915_power_well *power_well)
1555 {
1556 	chv_set_pipe_power_well(dev_priv, power_well, true);
1557 
1558 	vlv_display_power_well_init(dev_priv);
1559 }
1560 
1561 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1562 					struct i915_power_well *power_well)
1563 {
1564 	vlv_display_power_well_deinit(dev_priv);
1565 
1566 	chv_set_pipe_power_well(dev_priv, power_well, false);
1567 }
1568 
1569 static void
1570 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1571 				 enum intel_display_power_domain domain)
1572 {
1573 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1574 	struct i915_power_well *power_well;
1575 
1576 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1577 		intel_power_well_get(dev_priv, power_well);
1578 
1579 	power_domains->domain_use_count[domain]++;
1580 }
1581 
1582 /**
1583  * intel_display_power_get - grab a power domain reference
1584  * @dev_priv: i915 device instance
1585  * @domain: power domain to reference
1586  *
1587  * This function grabs a power domain reference for @domain and ensures that the
1588  * power domain and all its parents are powered up. Therefore users should only
1589  * grab a reference to the innermost power domain they need.
1590  *
1591  * Any power domain reference obtained by this function must have a symmetric
1592  * call to intel_display_power_put() to release the reference again.
1593  */
1594 void intel_display_power_get(struct drm_i915_private *dev_priv,
1595 			     enum intel_display_power_domain domain)
1596 {
1597 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1598 
1599 	intel_runtime_pm_get(dev_priv);
1600 
1601 	mutex_lock(&power_domains->lock);
1602 
1603 	__intel_display_power_get_domain(dev_priv, domain);
1604 
1605 	mutex_unlock(&power_domains->lock);
1606 }
1607 
1608 /**
1609  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1610  * @dev_priv: i915 device instance
1611  * @domain: power domain to reference
1612  *
1613  * This function grabs a power domain reference for @domain and ensures that the
1614  * power domain and all its parents are powered up. Therefore users should only
1615  * grab a reference to the innermost power domain they need.
1616  *
1617  * Any power domain reference obtained by this function must have a symmetric
1618  * call to intel_display_power_put() to release the reference again.
1619  */
1620 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1621 					enum intel_display_power_domain domain)
1622 {
1623 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1624 	bool is_enabled;
1625 
1626 	if (!intel_runtime_pm_get_if_in_use(dev_priv))
1627 		return false;
1628 
1629 	mutex_lock(&power_domains->lock);
1630 
1631 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
1632 		__intel_display_power_get_domain(dev_priv, domain);
1633 		is_enabled = true;
1634 	} else {
1635 		is_enabled = false;
1636 	}
1637 
1638 	mutex_unlock(&power_domains->lock);
1639 
1640 	if (!is_enabled)
1641 		intel_runtime_pm_put(dev_priv);
1642 
1643 	return is_enabled;
1644 }
1645 
1646 /**
1647  * intel_display_power_put - release a power domain reference
1648  * @dev_priv: i915 device instance
1649  * @domain: power domain to reference
1650  *
1651  * This function drops the power domain reference obtained by
1652  * intel_display_power_get() and might power down the corresponding hardware
1653  * block right away if this is the last reference.
1654  */
1655 void intel_display_power_put(struct drm_i915_private *dev_priv,
1656 			     enum intel_display_power_domain domain)
1657 {
1658 	struct i915_power_domains *power_domains;
1659 	struct i915_power_well *power_well;
1660 
1661 	power_domains = &dev_priv->power_domains;
1662 
1663 	mutex_lock(&power_domains->lock);
1664 
1665 	WARN(!power_domains->domain_use_count[domain],
1666 	     "Use count on domain %s is already zero\n",
1667 	     intel_display_power_domain_str(domain));
1668 	power_domains->domain_use_count[domain]--;
1669 
1670 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
1671 		intel_power_well_put(dev_priv, power_well);
1672 
1673 	mutex_unlock(&power_domains->lock);
1674 
1675 	intel_runtime_pm_put(dev_priv);
1676 }
1677 
1678 #define I830_PIPES_POWER_DOMAINS (		\
1679 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1680 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1681 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1682 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1683 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1684 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1685 	BIT_ULL(POWER_DOMAIN_INIT))
1686 
1687 #define VLV_DISPLAY_POWER_DOMAINS (		\
1688 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1689 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1690 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1691 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1692 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1693 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1694 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1695 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1696 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1697 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1698 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1699 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1700 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1701 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1702 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1703 	BIT_ULL(POWER_DOMAIN_INIT))
1704 
1705 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1706 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1707 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1708 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1709 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1710 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1711 	BIT_ULL(POWER_DOMAIN_INIT))
1712 
1713 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
1714 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1715 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1716 	BIT_ULL(POWER_DOMAIN_INIT))
1717 
1718 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1719 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1720 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1721 	BIT_ULL(POWER_DOMAIN_INIT))
1722 
1723 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
1724 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1725 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1726 	BIT_ULL(POWER_DOMAIN_INIT))
1727 
1728 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
1729 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1730 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1731 	BIT_ULL(POWER_DOMAIN_INIT))
1732 
1733 #define CHV_DISPLAY_POWER_DOMAINS (		\
1734 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1735 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1736 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
1737 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1738 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1739 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
1740 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1741 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1742 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
1743 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1744 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1745 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1746 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1747 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1748 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1749 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1750 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1751 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
1752 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1753 	BIT_ULL(POWER_DOMAIN_INIT))
1754 
1755 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
1756 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1757 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1758 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1759 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1760 	BIT_ULL(POWER_DOMAIN_INIT))
1761 
1762 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
1763 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1764 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
1765 	BIT_ULL(POWER_DOMAIN_INIT))
1766 
1767 #define HSW_DISPLAY_POWER_DOMAINS (			\
1768 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1769 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1770 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
1771 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1772 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1773 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1774 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1775 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1776 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1777 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1778 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1779 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1780 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1781 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1782 	BIT_ULL(POWER_DOMAIN_INIT))
1783 
1784 #define BDW_DISPLAY_POWER_DOMAINS (			\
1785 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1786 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1787 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1788 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1789 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1790 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1791 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1792 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1793 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1794 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1795 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1796 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1797 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1798 	BIT_ULL(POWER_DOMAIN_INIT))
1799 
1800 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1801 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1802 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1803 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1804 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1805 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1806 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1807 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1808 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1809 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1810 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1811 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
1812 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1813 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1814 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1815 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1816 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1817 	BIT_ULL(POWER_DOMAIN_INIT))
1818 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
1819 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
1820 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
1821 	BIT_ULL(POWER_DOMAIN_INIT))
1822 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
1823 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
1824 	BIT_ULL(POWER_DOMAIN_INIT))
1825 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
1826 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
1827 	BIT_ULL(POWER_DOMAIN_INIT))
1828 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
1829 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
1830 	BIT_ULL(POWER_DOMAIN_INIT))
1831 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1832 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1833 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1834 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1835 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1836 	BIT_ULL(POWER_DOMAIN_INIT))
1837 
1838 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1839 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1840 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1841 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1842 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1843 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1844 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1845 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1846 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1847 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1848 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1849 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1850 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1851 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1852 	BIT_ULL(POWER_DOMAIN_INIT))
1853 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1854 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1855 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1856 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1857 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1858 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
1859 	BIT_ULL(POWER_DOMAIN_INIT))
1860 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
1861 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
1862 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1863 	BIT_ULL(POWER_DOMAIN_INIT))
1864 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
1865 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1866 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1867 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1868 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1869 	BIT_ULL(POWER_DOMAIN_INIT))
1870 
1871 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1872 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1873 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1874 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1875 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1876 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1877 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1878 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1879 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1880 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1881 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1882 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1883 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1884 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1885 	BIT_ULL(POWER_DOMAIN_INIT))
1886 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
1887 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
1888 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
1889 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
1890 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
1891 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
1892 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
1893 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
1894 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1895 	BIT_ULL(POWER_DOMAIN_INIT))
1896 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
1897 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1898 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1899 	BIT_ULL(POWER_DOMAIN_INIT))
1900 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
1901 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1902 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1903 	BIT_ULL(POWER_DOMAIN_INIT))
1904 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
1905 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
1906 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
1907 	BIT_ULL(POWER_DOMAIN_INIT))
1908 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
1909 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1910 	BIT_ULL(POWER_DOMAIN_INIT))
1911 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
1912 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1913 	BIT_ULL(POWER_DOMAIN_INIT))
1914 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1915 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1916 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1917 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1918 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1919 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
1920 	BIT_ULL(POWER_DOMAIN_INIT))
1921 
1922 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1923 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1924 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1925 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1926 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1927 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1928 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1929 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1930 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1931 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1932 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1933 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
1934 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1935 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1936 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1937 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
1938 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1939 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1940 	BIT_ULL(POWER_DOMAIN_INIT))
1941 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
1942 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
1943 	BIT_ULL(POWER_DOMAIN_INIT))
1944 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
1945 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
1946 	BIT_ULL(POWER_DOMAIN_INIT))
1947 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
1948 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
1949 	BIT_ULL(POWER_DOMAIN_INIT))
1950 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
1951 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
1952 	BIT_ULL(POWER_DOMAIN_INIT))
1953 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
1954 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1955 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
1956 	BIT_ULL(POWER_DOMAIN_INIT))
1957 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
1958 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1959 	BIT_ULL(POWER_DOMAIN_INIT))
1960 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
1961 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1962 	BIT_ULL(POWER_DOMAIN_INIT))
1963 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
1964 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1965 	BIT_ULL(POWER_DOMAIN_INIT))
1966 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
1967 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
1968 	BIT_ULL(POWER_DOMAIN_INIT))
1969 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
1970 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
1971 	BIT_ULL(POWER_DOMAIN_INIT))
1972 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1973 	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1974 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1975 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1976 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1977 	BIT_ULL(POWER_DOMAIN_INIT))
1978 
1979 /*
1980  * ICL PW_0/PG_0 domains (HW/DMC control):
1981  * - PCI
1982  * - clocks except port PLL
1983  * - central power except FBC
1984  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
1985  * ICL PW_1/PG_1 domains (HW/DMC control):
1986  * - DBUF function
1987  * - PIPE_A and its planes, except VGA
1988  * - transcoder EDP + PSR
1989  * - transcoder DSI
1990  * - DDI_A
1991  * - FBC
1992  */
1993 #define ICL_PW_4_POWER_DOMAINS (			\
1994 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1995 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
1996 	BIT_ULL(POWER_DOMAIN_INIT))
1997 	/* VDSC/joining */
1998 #define ICL_PW_3_POWER_DOMAINS (			\
1999 	ICL_PW_4_POWER_DOMAINS |			\
2000 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2001 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2002 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2003 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2004 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2005 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2006 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2007 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2008 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2009 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2010 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2011 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2012 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2013 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2014 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2015 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2016 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2017 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2018 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2019 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2020 	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |		\
2021 	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |		\
2022 	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |		\
2023 	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |		\
2024 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2025 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2026 	BIT_ULL(POWER_DOMAIN_INIT))
2027 	/*
2028 	 * - transcoder WD
2029 	 * - KVMR (HW control)
2030 	 */
2031 #define ICL_PW_2_POWER_DOMAINS (			\
2032 	ICL_PW_3_POWER_DOMAINS |			\
2033 	BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) |		\
2034 	BIT_ULL(POWER_DOMAIN_INIT))
2035 	/*
2036 	 * - KVMR (HW control)
2037 	 */
2038 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2039 	ICL_PW_2_POWER_DOMAINS |			\
2040 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2041 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2042 	BIT_ULL(POWER_DOMAIN_INIT))
2043 
2044 #define ICL_DDI_IO_A_POWER_DOMAINS (			\
2045 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2046 #define ICL_DDI_IO_B_POWER_DOMAINS (			\
2047 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2048 #define ICL_DDI_IO_C_POWER_DOMAINS (			\
2049 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2050 #define ICL_DDI_IO_D_POWER_DOMAINS (			\
2051 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2052 #define ICL_DDI_IO_E_POWER_DOMAINS (			\
2053 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2054 #define ICL_DDI_IO_F_POWER_DOMAINS (			\
2055 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2056 
2057 #define ICL_AUX_A_IO_POWER_DOMAINS (			\
2058 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2059 	BIT_ULL(POWER_DOMAIN_AUX_A))
2060 #define ICL_AUX_B_IO_POWER_DOMAINS (			\
2061 	BIT_ULL(POWER_DOMAIN_AUX_B))
2062 #define ICL_AUX_C_IO_POWER_DOMAINS (			\
2063 	BIT_ULL(POWER_DOMAIN_AUX_C))
2064 #define ICL_AUX_D_IO_POWER_DOMAINS (			\
2065 	BIT_ULL(POWER_DOMAIN_AUX_D))
2066 #define ICL_AUX_E_IO_POWER_DOMAINS (			\
2067 	BIT_ULL(POWER_DOMAIN_AUX_E))
2068 #define ICL_AUX_F_IO_POWER_DOMAINS (			\
2069 	BIT_ULL(POWER_DOMAIN_AUX_F))
2070 #define ICL_AUX_TBT1_IO_POWER_DOMAINS (			\
2071 	BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2072 #define ICL_AUX_TBT2_IO_POWER_DOMAINS (			\
2073 	BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2074 #define ICL_AUX_TBT3_IO_POWER_DOMAINS (			\
2075 	BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2076 #define ICL_AUX_TBT4_IO_POWER_DOMAINS (			\
2077 	BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2078 
2079 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2080 	.sync_hw = i9xx_power_well_sync_hw_noop,
2081 	.enable = i9xx_always_on_power_well_noop,
2082 	.disable = i9xx_always_on_power_well_noop,
2083 	.is_enabled = i9xx_always_on_power_well_enabled,
2084 };
2085 
2086 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2087 	.sync_hw = i9xx_power_well_sync_hw_noop,
2088 	.enable = chv_pipe_power_well_enable,
2089 	.disable = chv_pipe_power_well_disable,
2090 	.is_enabled = chv_pipe_power_well_enabled,
2091 };
2092 
2093 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2094 	.sync_hw = i9xx_power_well_sync_hw_noop,
2095 	.enable = chv_dpio_cmn_power_well_enable,
2096 	.disable = chv_dpio_cmn_power_well_disable,
2097 	.is_enabled = vlv_power_well_enabled,
2098 };
2099 
2100 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2101 	{
2102 		.name = "always-on",
2103 		.always_on = true,
2104 		.domains = POWER_DOMAIN_MASK,
2105 		.ops = &i9xx_always_on_power_well_ops,
2106 		.id = DISP_PW_ID_NONE,
2107 	},
2108 };
2109 
2110 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2111 	.sync_hw = i830_pipes_power_well_sync_hw,
2112 	.enable = i830_pipes_power_well_enable,
2113 	.disable = i830_pipes_power_well_disable,
2114 	.is_enabled = i830_pipes_power_well_enabled,
2115 };
2116 
2117 static const struct i915_power_well_desc i830_power_wells[] = {
2118 	{
2119 		.name = "always-on",
2120 		.always_on = true,
2121 		.domains = POWER_DOMAIN_MASK,
2122 		.ops = &i9xx_always_on_power_well_ops,
2123 		.id = DISP_PW_ID_NONE,
2124 	},
2125 	{
2126 		.name = "pipes",
2127 		.domains = I830_PIPES_POWER_DOMAINS,
2128 		.ops = &i830_pipes_power_well_ops,
2129 		.id = DISP_PW_ID_NONE,
2130 	},
2131 };
2132 
2133 static const struct i915_power_well_ops hsw_power_well_ops = {
2134 	.sync_hw = hsw_power_well_sync_hw,
2135 	.enable = hsw_power_well_enable,
2136 	.disable = hsw_power_well_disable,
2137 	.is_enabled = hsw_power_well_enabled,
2138 };
2139 
2140 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2141 	.sync_hw = i9xx_power_well_sync_hw_noop,
2142 	.enable = gen9_dc_off_power_well_enable,
2143 	.disable = gen9_dc_off_power_well_disable,
2144 	.is_enabled = gen9_dc_off_power_well_enabled,
2145 };
2146 
2147 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2148 	.sync_hw = i9xx_power_well_sync_hw_noop,
2149 	.enable = bxt_dpio_cmn_power_well_enable,
2150 	.disable = bxt_dpio_cmn_power_well_disable,
2151 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
2152 };
2153 
2154 static const struct i915_power_well_regs hsw_power_well_regs = {
2155 	.bios	= HSW_PWR_WELL_CTL1,
2156 	.driver	= HSW_PWR_WELL_CTL2,
2157 	.kvmr	= HSW_PWR_WELL_CTL3,
2158 	.debug	= HSW_PWR_WELL_CTL4,
2159 };
2160 
2161 static const struct i915_power_well_desc hsw_power_wells[] = {
2162 	{
2163 		.name = "always-on",
2164 		.always_on = true,
2165 		.domains = POWER_DOMAIN_MASK,
2166 		.ops = &i9xx_always_on_power_well_ops,
2167 		.id = DISP_PW_ID_NONE,
2168 	},
2169 	{
2170 		.name = "display",
2171 		.domains = HSW_DISPLAY_POWER_DOMAINS,
2172 		.ops = &hsw_power_well_ops,
2173 		.id = HSW_DISP_PW_GLOBAL,
2174 		{
2175 			.hsw.regs = &hsw_power_well_regs,
2176 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2177 			.hsw.has_vga = true,
2178 		},
2179 	},
2180 };
2181 
2182 static const struct i915_power_well_desc bdw_power_wells[] = {
2183 	{
2184 		.name = "always-on",
2185 		.always_on = true,
2186 		.domains = POWER_DOMAIN_MASK,
2187 		.ops = &i9xx_always_on_power_well_ops,
2188 		.id = DISP_PW_ID_NONE,
2189 	},
2190 	{
2191 		.name = "display",
2192 		.domains = BDW_DISPLAY_POWER_DOMAINS,
2193 		.ops = &hsw_power_well_ops,
2194 		.id = HSW_DISP_PW_GLOBAL,
2195 		{
2196 			.hsw.regs = &hsw_power_well_regs,
2197 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2198 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2199 			.hsw.has_vga = true,
2200 		},
2201 	},
2202 };
2203 
2204 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2205 	.sync_hw = i9xx_power_well_sync_hw_noop,
2206 	.enable = vlv_display_power_well_enable,
2207 	.disable = vlv_display_power_well_disable,
2208 	.is_enabled = vlv_power_well_enabled,
2209 };
2210 
2211 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2212 	.sync_hw = i9xx_power_well_sync_hw_noop,
2213 	.enable = vlv_dpio_cmn_power_well_enable,
2214 	.disable = vlv_dpio_cmn_power_well_disable,
2215 	.is_enabled = vlv_power_well_enabled,
2216 };
2217 
2218 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2219 	.sync_hw = i9xx_power_well_sync_hw_noop,
2220 	.enable = vlv_power_well_enable,
2221 	.disable = vlv_power_well_disable,
2222 	.is_enabled = vlv_power_well_enabled,
2223 };
2224 
2225 static const struct i915_power_well_desc vlv_power_wells[] = {
2226 	{
2227 		.name = "always-on",
2228 		.always_on = true,
2229 		.domains = POWER_DOMAIN_MASK,
2230 		.ops = &i9xx_always_on_power_well_ops,
2231 		.id = DISP_PW_ID_NONE,
2232 	},
2233 	{
2234 		.name = "display",
2235 		.domains = VLV_DISPLAY_POWER_DOMAINS,
2236 		.ops = &vlv_display_power_well_ops,
2237 		.id = VLV_DISP_PW_DISP2D,
2238 		{
2239 			.vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2240 		},
2241 	},
2242 	{
2243 		.name = "dpio-tx-b-01",
2244 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2245 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2246 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2247 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2248 		.ops = &vlv_dpio_power_well_ops,
2249 		.id = DISP_PW_ID_NONE,
2250 		{
2251 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2252 		},
2253 	},
2254 	{
2255 		.name = "dpio-tx-b-23",
2256 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2257 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2258 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2259 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2260 		.ops = &vlv_dpio_power_well_ops,
2261 		.id = DISP_PW_ID_NONE,
2262 		{
2263 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2264 		},
2265 	},
2266 	{
2267 		.name = "dpio-tx-c-01",
2268 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2269 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2270 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2271 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2272 		.ops = &vlv_dpio_power_well_ops,
2273 		.id = DISP_PW_ID_NONE,
2274 		{
2275 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2276 		},
2277 	},
2278 	{
2279 		.name = "dpio-tx-c-23",
2280 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2281 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2282 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2283 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2284 		.ops = &vlv_dpio_power_well_ops,
2285 		.id = DISP_PW_ID_NONE,
2286 		{
2287 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2288 		},
2289 	},
2290 	{
2291 		.name = "dpio-common",
2292 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2293 		.ops = &vlv_dpio_cmn_power_well_ops,
2294 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2295 		{
2296 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2297 		},
2298 	},
2299 };
2300 
2301 static const struct i915_power_well_desc chv_power_wells[] = {
2302 	{
2303 		.name = "always-on",
2304 		.always_on = true,
2305 		.domains = POWER_DOMAIN_MASK,
2306 		.ops = &i9xx_always_on_power_well_ops,
2307 		.id = DISP_PW_ID_NONE,
2308 	},
2309 	{
2310 		.name = "display",
2311 		/*
2312 		 * Pipe A power well is the new disp2d well. Pipe B and C
2313 		 * power wells don't actually exist. Pipe A power well is
2314 		 * required for any pipe to work.
2315 		 */
2316 		.domains = CHV_DISPLAY_POWER_DOMAINS,
2317 		.ops = &chv_pipe_power_well_ops,
2318 		.id = DISP_PW_ID_NONE,
2319 	},
2320 	{
2321 		.name = "dpio-common-bc",
2322 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2323 		.ops = &chv_dpio_cmn_power_well_ops,
2324 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2325 		{
2326 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2327 		},
2328 	},
2329 	{
2330 		.name = "dpio-common-d",
2331 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2332 		.ops = &chv_dpio_cmn_power_well_ops,
2333 		.id = CHV_DISP_PW_DPIO_CMN_D,
2334 		{
2335 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
2336 		},
2337 	},
2338 };
2339 
2340 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2341 					 enum i915_power_well_id power_well_id)
2342 {
2343 	struct i915_power_well *power_well;
2344 	bool ret;
2345 
2346 	power_well = lookup_power_well(dev_priv, power_well_id);
2347 	ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
2348 
2349 	return ret;
2350 }
2351 
2352 static const struct i915_power_well_desc skl_power_wells[] = {
2353 	{
2354 		.name = "always-on",
2355 		.always_on = true,
2356 		.domains = POWER_DOMAIN_MASK,
2357 		.ops = &i9xx_always_on_power_well_ops,
2358 		.id = DISP_PW_ID_NONE,
2359 	},
2360 	{
2361 		.name = "power well 1",
2362 		/* Handled by the DMC firmware */
2363 		.always_on = true,
2364 		.domains = 0,
2365 		.ops = &hsw_power_well_ops,
2366 		.id = SKL_DISP_PW_1,
2367 		{
2368 			.hsw.regs = &hsw_power_well_regs,
2369 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2370 			.hsw.has_fuses = true,
2371 		},
2372 	},
2373 	{
2374 		.name = "MISC IO power well",
2375 		/* Handled by the DMC firmware */
2376 		.always_on = true,
2377 		.domains = 0,
2378 		.ops = &hsw_power_well_ops,
2379 		.id = SKL_DISP_PW_MISC_IO,
2380 		{
2381 			.hsw.regs = &hsw_power_well_regs,
2382 			.hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
2383 		},
2384 	},
2385 	{
2386 		.name = "DC off",
2387 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2388 		.ops = &gen9_dc_off_power_well_ops,
2389 		.id = DISP_PW_ID_NONE,
2390 	},
2391 	{
2392 		.name = "power well 2",
2393 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2394 		.ops = &hsw_power_well_ops,
2395 		.id = SKL_DISP_PW_2,
2396 		{
2397 			.hsw.regs = &hsw_power_well_regs,
2398 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
2399 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2400 			.hsw.has_vga = true,
2401 			.hsw.has_fuses = true,
2402 		},
2403 	},
2404 	{
2405 		.name = "DDI A/E IO power well",
2406 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2407 		.ops = &hsw_power_well_ops,
2408 		.id = DISP_PW_ID_NONE,
2409 		{
2410 			.hsw.regs = &hsw_power_well_regs,
2411 			.hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
2412 		},
2413 	},
2414 	{
2415 		.name = "DDI B IO power well",
2416 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2417 		.ops = &hsw_power_well_ops,
2418 		.id = DISP_PW_ID_NONE,
2419 		{
2420 			.hsw.regs = &hsw_power_well_regs,
2421 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2422 		},
2423 	},
2424 	{
2425 		.name = "DDI C IO power well",
2426 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2427 		.ops = &hsw_power_well_ops,
2428 		.id = DISP_PW_ID_NONE,
2429 		{
2430 			.hsw.regs = &hsw_power_well_regs,
2431 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2432 		},
2433 	},
2434 	{
2435 		.name = "DDI D IO power well",
2436 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2437 		.ops = &hsw_power_well_ops,
2438 		.id = DISP_PW_ID_NONE,
2439 		{
2440 			.hsw.regs = &hsw_power_well_regs,
2441 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
2442 		},
2443 	},
2444 };
2445 
2446 static const struct i915_power_well_desc bxt_power_wells[] = {
2447 	{
2448 		.name = "always-on",
2449 		.always_on = true,
2450 		.domains = POWER_DOMAIN_MASK,
2451 		.ops = &i9xx_always_on_power_well_ops,
2452 		.id = DISP_PW_ID_NONE,
2453 	},
2454 	{
2455 		.name = "power well 1",
2456 		/* Handled by the DMC firmware */
2457 		.always_on = true,
2458 		.domains = 0,
2459 		.ops = &hsw_power_well_ops,
2460 		.id = SKL_DISP_PW_1,
2461 		{
2462 			.hsw.regs = &hsw_power_well_regs,
2463 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2464 			.hsw.has_fuses = true,
2465 		},
2466 	},
2467 	{
2468 		.name = "DC off",
2469 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2470 		.ops = &gen9_dc_off_power_well_ops,
2471 		.id = DISP_PW_ID_NONE,
2472 	},
2473 	{
2474 		.name = "power well 2",
2475 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2476 		.ops = &hsw_power_well_ops,
2477 		.id = SKL_DISP_PW_2,
2478 		{
2479 			.hsw.regs = &hsw_power_well_regs,
2480 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
2481 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2482 			.hsw.has_vga = true,
2483 			.hsw.has_fuses = true,
2484 		},
2485 	},
2486 	{
2487 		.name = "dpio-common-a",
2488 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2489 		.ops = &bxt_dpio_cmn_power_well_ops,
2490 		.id = BXT_DISP_PW_DPIO_CMN_A,
2491 		{
2492 			.bxt.phy = DPIO_PHY1,
2493 		},
2494 	},
2495 	{
2496 		.name = "dpio-common-bc",
2497 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2498 		.ops = &bxt_dpio_cmn_power_well_ops,
2499 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2500 		{
2501 			.bxt.phy = DPIO_PHY0,
2502 		},
2503 	},
2504 };
2505 
2506 static const struct i915_power_well_desc glk_power_wells[] = {
2507 	{
2508 		.name = "always-on",
2509 		.always_on = true,
2510 		.domains = POWER_DOMAIN_MASK,
2511 		.ops = &i9xx_always_on_power_well_ops,
2512 		.id = DISP_PW_ID_NONE,
2513 	},
2514 	{
2515 		.name = "power well 1",
2516 		/* Handled by the DMC firmware */
2517 		.always_on = true,
2518 		.domains = 0,
2519 		.ops = &hsw_power_well_ops,
2520 		.id = SKL_DISP_PW_1,
2521 		{
2522 			.hsw.regs = &hsw_power_well_regs,
2523 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2524 			.hsw.has_fuses = true,
2525 		},
2526 	},
2527 	{
2528 		.name = "DC off",
2529 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2530 		.ops = &gen9_dc_off_power_well_ops,
2531 		.id = DISP_PW_ID_NONE,
2532 	},
2533 	{
2534 		.name = "power well 2",
2535 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2536 		.ops = &hsw_power_well_ops,
2537 		.id = SKL_DISP_PW_2,
2538 		{
2539 			.hsw.regs = &hsw_power_well_regs,
2540 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
2541 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2542 			.hsw.has_vga = true,
2543 			.hsw.has_fuses = true,
2544 		},
2545 	},
2546 	{
2547 		.name = "dpio-common-a",
2548 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2549 		.ops = &bxt_dpio_cmn_power_well_ops,
2550 		.id = BXT_DISP_PW_DPIO_CMN_A,
2551 		{
2552 			.bxt.phy = DPIO_PHY1,
2553 		},
2554 	},
2555 	{
2556 		.name = "dpio-common-b",
2557 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2558 		.ops = &bxt_dpio_cmn_power_well_ops,
2559 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2560 		{
2561 			.bxt.phy = DPIO_PHY0,
2562 		},
2563 	},
2564 	{
2565 		.name = "dpio-common-c",
2566 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2567 		.ops = &bxt_dpio_cmn_power_well_ops,
2568 		.id = GLK_DISP_PW_DPIO_CMN_C,
2569 		{
2570 			.bxt.phy = DPIO_PHY2,
2571 		},
2572 	},
2573 	{
2574 		.name = "AUX A",
2575 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2576 		.ops = &hsw_power_well_ops,
2577 		.id = DISP_PW_ID_NONE,
2578 		{
2579 			.hsw.regs = &hsw_power_well_regs,
2580 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2581 		},
2582 	},
2583 	{
2584 		.name = "AUX B",
2585 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2586 		.ops = &hsw_power_well_ops,
2587 		.id = DISP_PW_ID_NONE,
2588 		{
2589 			.hsw.regs = &hsw_power_well_regs,
2590 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2591 		},
2592 	},
2593 	{
2594 		.name = "AUX C",
2595 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2596 		.ops = &hsw_power_well_ops,
2597 		.id = DISP_PW_ID_NONE,
2598 		{
2599 			.hsw.regs = &hsw_power_well_regs,
2600 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
2601 		},
2602 	},
2603 	{
2604 		.name = "DDI A IO power well",
2605 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
2606 		.ops = &hsw_power_well_ops,
2607 		.id = DISP_PW_ID_NONE,
2608 		{
2609 			.hsw.regs = &hsw_power_well_regs,
2610 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
2611 		},
2612 	},
2613 	{
2614 		.name = "DDI B IO power well",
2615 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2616 		.ops = &hsw_power_well_ops,
2617 		.id = DISP_PW_ID_NONE,
2618 		{
2619 			.hsw.regs = &hsw_power_well_regs,
2620 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2621 		},
2622 	},
2623 	{
2624 		.name = "DDI C IO power well",
2625 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2626 		.ops = &hsw_power_well_ops,
2627 		.id = DISP_PW_ID_NONE,
2628 		{
2629 			.hsw.regs = &hsw_power_well_regs,
2630 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2631 		},
2632 	},
2633 };
2634 
2635 static const struct i915_power_well_desc cnl_power_wells[] = {
2636 	{
2637 		.name = "always-on",
2638 		.always_on = true,
2639 		.domains = POWER_DOMAIN_MASK,
2640 		.ops = &i9xx_always_on_power_well_ops,
2641 		.id = DISP_PW_ID_NONE,
2642 	},
2643 	{
2644 		.name = "power well 1",
2645 		/* Handled by the DMC firmware */
2646 		.always_on = true,
2647 		.domains = 0,
2648 		.ops = &hsw_power_well_ops,
2649 		.id = SKL_DISP_PW_1,
2650 		{
2651 			.hsw.regs = &hsw_power_well_regs,
2652 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2653 			.hsw.has_fuses = true,
2654 		},
2655 	},
2656 	{
2657 		.name = "AUX A",
2658 		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2659 		.ops = &hsw_power_well_ops,
2660 		.id = DISP_PW_ID_NONE,
2661 		{
2662 			.hsw.regs = &hsw_power_well_regs,
2663 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2664 		},
2665 	},
2666 	{
2667 		.name = "AUX B",
2668 		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2669 		.ops = &hsw_power_well_ops,
2670 		.id = DISP_PW_ID_NONE,
2671 		{
2672 			.hsw.regs = &hsw_power_well_regs,
2673 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2674 		},
2675 	},
2676 	{
2677 		.name = "AUX C",
2678 		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
2679 		.ops = &hsw_power_well_ops,
2680 		.id = DISP_PW_ID_NONE,
2681 		{
2682 			.hsw.regs = &hsw_power_well_regs,
2683 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
2684 		},
2685 	},
2686 	{
2687 		.name = "AUX D",
2688 		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
2689 		.ops = &hsw_power_well_ops,
2690 		.id = DISP_PW_ID_NONE,
2691 		{
2692 			.hsw.regs = &hsw_power_well_regs,
2693 			.hsw.idx = CNL_PW_CTL_IDX_AUX_D,
2694 		},
2695 	},
2696 	{
2697 		.name = "DC off",
2698 		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2699 		.ops = &gen9_dc_off_power_well_ops,
2700 		.id = DISP_PW_ID_NONE,
2701 	},
2702 	{
2703 		.name = "power well 2",
2704 		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2705 		.ops = &hsw_power_well_ops,
2706 		.id = SKL_DISP_PW_2,
2707 		{
2708 			.hsw.regs = &hsw_power_well_regs,
2709 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
2710 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2711 			.hsw.has_vga = true,
2712 			.hsw.has_fuses = true,
2713 		},
2714 	},
2715 	{
2716 		.name = "DDI A IO power well",
2717 		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
2718 		.ops = &hsw_power_well_ops,
2719 		.id = DISP_PW_ID_NONE,
2720 		{
2721 			.hsw.regs = &hsw_power_well_regs,
2722 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
2723 		},
2724 	},
2725 	{
2726 		.name = "DDI B IO power well",
2727 		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
2728 		.ops = &hsw_power_well_ops,
2729 		.id = DISP_PW_ID_NONE,
2730 		{
2731 			.hsw.regs = &hsw_power_well_regs,
2732 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2733 		},
2734 	},
2735 	{
2736 		.name = "DDI C IO power well",
2737 		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
2738 		.ops = &hsw_power_well_ops,
2739 		.id = DISP_PW_ID_NONE,
2740 		{
2741 			.hsw.regs = &hsw_power_well_regs,
2742 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2743 		},
2744 	},
2745 	{
2746 		.name = "DDI D IO power well",
2747 		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
2748 		.ops = &hsw_power_well_ops,
2749 		.id = DISP_PW_ID_NONE,
2750 		{
2751 			.hsw.regs = &hsw_power_well_regs,
2752 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
2753 		},
2754 	},
2755 	{
2756 		.name = "DDI F IO power well",
2757 		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
2758 		.ops = &hsw_power_well_ops,
2759 		.id = DISP_PW_ID_NONE,
2760 		{
2761 			.hsw.regs = &hsw_power_well_regs,
2762 			.hsw.idx = CNL_PW_CTL_IDX_DDI_F,
2763 		},
2764 	},
2765 	{
2766 		.name = "AUX F",
2767 		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
2768 		.ops = &hsw_power_well_ops,
2769 		.id = DISP_PW_ID_NONE,
2770 		{
2771 			.hsw.regs = &hsw_power_well_regs,
2772 			.hsw.idx = CNL_PW_CTL_IDX_AUX_F,
2773 		},
2774 	},
2775 };
2776 
2777 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
2778 	.sync_hw = hsw_power_well_sync_hw,
2779 	.enable = icl_combo_phy_aux_power_well_enable,
2780 	.disable = icl_combo_phy_aux_power_well_disable,
2781 	.is_enabled = hsw_power_well_enabled,
2782 };
2783 
2784 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
2785 	.sync_hw = hsw_power_well_sync_hw,
2786 	.enable = icl_tc_phy_aux_power_well_enable,
2787 	.disable = hsw_power_well_disable,
2788 	.is_enabled = hsw_power_well_enabled,
2789 };
2790 
2791 static const struct i915_power_well_regs icl_aux_power_well_regs = {
2792 	.bios	= ICL_PWR_WELL_CTL_AUX1,
2793 	.driver	= ICL_PWR_WELL_CTL_AUX2,
2794 	.debug	= ICL_PWR_WELL_CTL_AUX4,
2795 };
2796 
2797 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
2798 	.bios	= ICL_PWR_WELL_CTL_DDI1,
2799 	.driver	= ICL_PWR_WELL_CTL_DDI2,
2800 	.debug	= ICL_PWR_WELL_CTL_DDI4,
2801 };
2802 
2803 static const struct i915_power_well_desc icl_power_wells[] = {
2804 	{
2805 		.name = "always-on",
2806 		.always_on = true,
2807 		.domains = POWER_DOMAIN_MASK,
2808 		.ops = &i9xx_always_on_power_well_ops,
2809 		.id = DISP_PW_ID_NONE,
2810 	},
2811 	{
2812 		.name = "power well 1",
2813 		/* Handled by the DMC firmware */
2814 		.always_on = true,
2815 		.domains = 0,
2816 		.ops = &hsw_power_well_ops,
2817 		.id = SKL_DISP_PW_1,
2818 		{
2819 			.hsw.regs = &hsw_power_well_regs,
2820 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
2821 			.hsw.has_fuses = true,
2822 		},
2823 	},
2824 	{
2825 		.name = "DC off",
2826 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
2827 		.ops = &gen9_dc_off_power_well_ops,
2828 		.id = DISP_PW_ID_NONE,
2829 	},
2830 	{
2831 		.name = "power well 2",
2832 		.domains = ICL_PW_2_POWER_DOMAINS,
2833 		.ops = &hsw_power_well_ops,
2834 		.id = SKL_DISP_PW_2,
2835 		{
2836 			.hsw.regs = &hsw_power_well_regs,
2837 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
2838 			.hsw.has_fuses = true,
2839 		},
2840 	},
2841 	{
2842 		.name = "power well 3",
2843 		.domains = ICL_PW_3_POWER_DOMAINS,
2844 		.ops = &hsw_power_well_ops,
2845 		.id = DISP_PW_ID_NONE,
2846 		{
2847 			.hsw.regs = &hsw_power_well_regs,
2848 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
2849 			.hsw.irq_pipe_mask = BIT(PIPE_B),
2850 			.hsw.has_vga = true,
2851 			.hsw.has_fuses = true,
2852 		},
2853 	},
2854 	{
2855 		.name = "DDI A IO",
2856 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
2857 		.ops = &hsw_power_well_ops,
2858 		.id = DISP_PW_ID_NONE,
2859 		{
2860 			.hsw.regs = &icl_ddi_power_well_regs,
2861 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
2862 		},
2863 	},
2864 	{
2865 		.name = "DDI B IO",
2866 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
2867 		.ops = &hsw_power_well_ops,
2868 		.id = DISP_PW_ID_NONE,
2869 		{
2870 			.hsw.regs = &icl_ddi_power_well_regs,
2871 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
2872 		},
2873 	},
2874 	{
2875 		.name = "DDI C IO",
2876 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
2877 		.ops = &hsw_power_well_ops,
2878 		.id = DISP_PW_ID_NONE,
2879 		{
2880 			.hsw.regs = &icl_ddi_power_well_regs,
2881 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
2882 		},
2883 	},
2884 	{
2885 		.name = "DDI D IO",
2886 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
2887 		.ops = &hsw_power_well_ops,
2888 		.id = DISP_PW_ID_NONE,
2889 		{
2890 			.hsw.regs = &icl_ddi_power_well_regs,
2891 			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
2892 		},
2893 	},
2894 	{
2895 		.name = "DDI E IO",
2896 		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
2897 		.ops = &hsw_power_well_ops,
2898 		.id = DISP_PW_ID_NONE,
2899 		{
2900 			.hsw.regs = &icl_ddi_power_well_regs,
2901 			.hsw.idx = ICL_PW_CTL_IDX_DDI_E,
2902 		},
2903 	},
2904 	{
2905 		.name = "DDI F IO",
2906 		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
2907 		.ops = &hsw_power_well_ops,
2908 		.id = DISP_PW_ID_NONE,
2909 		{
2910 			.hsw.regs = &icl_ddi_power_well_regs,
2911 			.hsw.idx = ICL_PW_CTL_IDX_DDI_F,
2912 		},
2913 	},
2914 	{
2915 		.name = "AUX A",
2916 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
2917 		.ops = &icl_combo_phy_aux_power_well_ops,
2918 		.id = DISP_PW_ID_NONE,
2919 		{
2920 			.hsw.regs = &icl_aux_power_well_regs,
2921 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
2922 		},
2923 	},
2924 	{
2925 		.name = "AUX B",
2926 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
2927 		.ops = &icl_combo_phy_aux_power_well_ops,
2928 		.id = DISP_PW_ID_NONE,
2929 		{
2930 			.hsw.regs = &icl_aux_power_well_regs,
2931 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
2932 		},
2933 	},
2934 	{
2935 		.name = "AUX C",
2936 		.domains = ICL_AUX_C_IO_POWER_DOMAINS,
2937 		.ops = &icl_tc_phy_aux_power_well_ops,
2938 		.id = DISP_PW_ID_NONE,
2939 		{
2940 			.hsw.regs = &icl_aux_power_well_regs,
2941 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
2942 			.hsw.is_tc_tbt = false,
2943 		},
2944 	},
2945 	{
2946 		.name = "AUX D",
2947 		.domains = ICL_AUX_D_IO_POWER_DOMAINS,
2948 		.ops = &icl_tc_phy_aux_power_well_ops,
2949 		.id = DISP_PW_ID_NONE,
2950 		{
2951 			.hsw.regs = &icl_aux_power_well_regs,
2952 			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
2953 			.hsw.is_tc_tbt = false,
2954 		},
2955 	},
2956 	{
2957 		.name = "AUX E",
2958 		.domains = ICL_AUX_E_IO_POWER_DOMAINS,
2959 		.ops = &icl_tc_phy_aux_power_well_ops,
2960 		.id = DISP_PW_ID_NONE,
2961 		{
2962 			.hsw.regs = &icl_aux_power_well_regs,
2963 			.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
2964 			.hsw.is_tc_tbt = false,
2965 		},
2966 	},
2967 	{
2968 		.name = "AUX F",
2969 		.domains = ICL_AUX_F_IO_POWER_DOMAINS,
2970 		.ops = &icl_tc_phy_aux_power_well_ops,
2971 		.id = DISP_PW_ID_NONE,
2972 		{
2973 			.hsw.regs = &icl_aux_power_well_regs,
2974 			.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
2975 			.hsw.is_tc_tbt = false,
2976 		},
2977 	},
2978 	{
2979 		.name = "AUX TBT1",
2980 		.domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
2981 		.ops = &icl_tc_phy_aux_power_well_ops,
2982 		.id = DISP_PW_ID_NONE,
2983 		{
2984 			.hsw.regs = &icl_aux_power_well_regs,
2985 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
2986 			.hsw.is_tc_tbt = true,
2987 		},
2988 	},
2989 	{
2990 		.name = "AUX TBT2",
2991 		.domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
2992 		.ops = &icl_tc_phy_aux_power_well_ops,
2993 		.id = DISP_PW_ID_NONE,
2994 		{
2995 			.hsw.regs = &icl_aux_power_well_regs,
2996 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
2997 			.hsw.is_tc_tbt = true,
2998 		},
2999 	},
3000 	{
3001 		.name = "AUX TBT3",
3002 		.domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
3003 		.ops = &icl_tc_phy_aux_power_well_ops,
3004 		.id = DISP_PW_ID_NONE,
3005 		{
3006 			.hsw.regs = &icl_aux_power_well_regs,
3007 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3008 			.hsw.is_tc_tbt = true,
3009 		},
3010 	},
3011 	{
3012 		.name = "AUX TBT4",
3013 		.domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
3014 		.ops = &icl_tc_phy_aux_power_well_ops,
3015 		.id = DISP_PW_ID_NONE,
3016 		{
3017 			.hsw.regs = &icl_aux_power_well_regs,
3018 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3019 			.hsw.is_tc_tbt = true,
3020 		},
3021 	},
3022 	{
3023 		.name = "power well 4",
3024 		.domains = ICL_PW_4_POWER_DOMAINS,
3025 		.ops = &hsw_power_well_ops,
3026 		.id = DISP_PW_ID_NONE,
3027 		{
3028 			.hsw.regs = &hsw_power_well_regs,
3029 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3030 			.hsw.has_fuses = true,
3031 			.hsw.irq_pipe_mask = BIT(PIPE_C),
3032 		},
3033 	},
3034 };
3035 
3036 static int
3037 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
3038 				   int disable_power_well)
3039 {
3040 	if (disable_power_well >= 0)
3041 		return !!disable_power_well;
3042 
3043 	return 1;
3044 }
3045 
3046 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
3047 				    int enable_dc)
3048 {
3049 	uint32_t mask;
3050 	int requested_dc;
3051 	int max_dc;
3052 
3053 	if (INTEL_GEN(dev_priv) >= 11) {
3054 		max_dc = 2;
3055 		/*
3056 		 * DC9 has a separate HW flow from the rest of the DC states,
3057 		 * not depending on the DMC firmware. It's needed by system
3058 		 * suspend/resume, so allow it unconditionally.
3059 		 */
3060 		mask = DC_STATE_EN_DC9;
3061 	} else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) {
3062 		max_dc = 2;
3063 		mask = 0;
3064 	} else if (IS_GEN9_LP(dev_priv)) {
3065 		max_dc = 1;
3066 		mask = DC_STATE_EN_DC9;
3067 	} else {
3068 		max_dc = 0;
3069 		mask = 0;
3070 	}
3071 
3072 	if (!i915_modparams.disable_power_well)
3073 		max_dc = 0;
3074 
3075 	if (enable_dc >= 0 && enable_dc <= max_dc) {
3076 		requested_dc = enable_dc;
3077 	} else if (enable_dc == -1) {
3078 		requested_dc = max_dc;
3079 	} else if (enable_dc > max_dc && enable_dc <= 2) {
3080 		DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
3081 			      enable_dc, max_dc);
3082 		requested_dc = max_dc;
3083 	} else {
3084 		DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
3085 		requested_dc = max_dc;
3086 	}
3087 
3088 	if (requested_dc > 1)
3089 		mask |= DC_STATE_EN_UPTO_DC6;
3090 	if (requested_dc > 0)
3091 		mask |= DC_STATE_EN_UPTO_DC5;
3092 
3093 	DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
3094 
3095 	return mask;
3096 }
3097 
3098 static int
3099 __set_power_wells(struct i915_power_domains *power_domains,
3100 		  const struct i915_power_well_desc *power_well_descs,
3101 		  int power_well_count)
3102 {
3103 	u64 power_well_ids = 0;
3104 	int i;
3105 
3106 	power_domains->power_well_count = power_well_count;
3107 	power_domains->power_wells =
3108 				kcalloc(power_well_count,
3109 					sizeof(*power_domains->power_wells),
3110 					GFP_KERNEL);
3111 	if (!power_domains->power_wells)
3112 		return -ENOMEM;
3113 
3114 	for (i = 0; i < power_well_count; i++) {
3115 		enum i915_power_well_id id = power_well_descs[i].id;
3116 
3117 		power_domains->power_wells[i].desc = &power_well_descs[i];
3118 
3119 		if (id == DISP_PW_ID_NONE)
3120 			continue;
3121 
3122 		WARN_ON(id >= sizeof(power_well_ids) * 8);
3123 		WARN_ON(power_well_ids & BIT_ULL(id));
3124 		power_well_ids |= BIT_ULL(id);
3125 	}
3126 
3127 	return 0;
3128 }
3129 
3130 #define set_power_wells(power_domains, __power_well_descs) \
3131 	__set_power_wells(power_domains, __power_well_descs, \
3132 			  ARRAY_SIZE(__power_well_descs))
3133 
3134 /**
3135  * intel_power_domains_init - initializes the power domain structures
3136  * @dev_priv: i915 device instance
3137  *
3138  * Initializes the power domain structures for @dev_priv depending upon the
3139  * supported platform.
3140  */
3141 int intel_power_domains_init(struct drm_i915_private *dev_priv)
3142 {
3143 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3144 	int err;
3145 
3146 	i915_modparams.disable_power_well =
3147 		sanitize_disable_power_well_option(dev_priv,
3148 						   i915_modparams.disable_power_well);
3149 	dev_priv->csr.allowed_dc_mask =
3150 		get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
3151 
3152 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
3153 
3154 	mutex_init(&power_domains->lock);
3155 
3156 	/*
3157 	 * The enabling order will be from lower to higher indexed wells,
3158 	 * the disabling order is reversed.
3159 	 */
3160 	if (IS_ICELAKE(dev_priv)) {
3161 		err = set_power_wells(power_domains, icl_power_wells);
3162 	} else if (IS_CANNONLAKE(dev_priv)) {
3163 		err = set_power_wells(power_domains, cnl_power_wells);
3164 
3165 		/*
3166 		 * DDI and Aux IO are getting enabled for all ports
3167 		 * regardless the presence or use. So, in order to avoid
3168 		 * timeouts, lets remove them from the list
3169 		 * for the SKUs without port F.
3170 		 */
3171 		if (!IS_CNL_WITH_PORT_F(dev_priv))
3172 			power_domains->power_well_count -= 2;
3173 	} else if (IS_GEMINILAKE(dev_priv)) {
3174 		err = set_power_wells(power_domains, glk_power_wells);
3175 	} else if (IS_BROXTON(dev_priv)) {
3176 		err = set_power_wells(power_domains, bxt_power_wells);
3177 	} else if (IS_GEN9_BC(dev_priv)) {
3178 		err = set_power_wells(power_domains, skl_power_wells);
3179 	} else if (IS_CHERRYVIEW(dev_priv)) {
3180 		err = set_power_wells(power_domains, chv_power_wells);
3181 	} else if (IS_BROADWELL(dev_priv)) {
3182 		err = set_power_wells(power_domains, bdw_power_wells);
3183 	} else if (IS_HASWELL(dev_priv)) {
3184 		err = set_power_wells(power_domains, hsw_power_wells);
3185 	} else if (IS_VALLEYVIEW(dev_priv)) {
3186 		err = set_power_wells(power_domains, vlv_power_wells);
3187 	} else if (IS_I830(dev_priv)) {
3188 		err = set_power_wells(power_domains, i830_power_wells);
3189 	} else {
3190 		err = set_power_wells(power_domains, i9xx_always_on_power_well);
3191 	}
3192 
3193 	return err;
3194 }
3195 
3196 /**
3197  * intel_power_domains_cleanup - clean up power domains resources
3198  * @dev_priv: i915 device instance
3199  *
3200  * Release any resources acquired by intel_power_domains_init()
3201  */
3202 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
3203 {
3204 	kfree(dev_priv->power_domains.power_wells);
3205 }
3206 
3207 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
3208 {
3209 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3210 	struct i915_power_well *power_well;
3211 
3212 	mutex_lock(&power_domains->lock);
3213 	for_each_power_well(dev_priv, power_well) {
3214 		power_well->desc->ops->sync_hw(dev_priv, power_well);
3215 		power_well->hw_enabled =
3216 			power_well->desc->ops->is_enabled(dev_priv, power_well);
3217 	}
3218 	mutex_unlock(&power_domains->lock);
3219 }
3220 
3221 static inline
3222 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
3223 			  i915_reg_t reg, bool enable)
3224 {
3225 	u32 val, status;
3226 
3227 	val = I915_READ(reg);
3228 	val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
3229 	I915_WRITE(reg, val);
3230 	POSTING_READ(reg);
3231 	udelay(10);
3232 
3233 	status = I915_READ(reg) & DBUF_POWER_STATE;
3234 	if ((enable && !status) || (!enable && status)) {
3235 		DRM_ERROR("DBus power %s timeout!\n",
3236 			  enable ? "enable" : "disable");
3237 		return false;
3238 	}
3239 	return true;
3240 }
3241 
3242 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
3243 {
3244 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
3245 }
3246 
3247 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
3248 {
3249 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
3250 }
3251 
3252 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
3253 {
3254 	if (INTEL_GEN(dev_priv) < 11)
3255 		return 1;
3256 	return 2;
3257 }
3258 
3259 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3260 			    u8 req_slices)
3261 {
3262 	const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
3263 	bool ret;
3264 
3265 	if (req_slices > intel_dbuf_max_slices(dev_priv)) {
3266 		DRM_ERROR("Invalid number of dbuf slices requested\n");
3267 		return;
3268 	}
3269 
3270 	if (req_slices == hw_enabled_slices || req_slices == 0)
3271 		return;
3272 
3273 	if (req_slices > hw_enabled_slices)
3274 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3275 	else
3276 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
3277 
3278 	if (ret)
3279 		dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
3280 }
3281 
3282 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
3283 {
3284 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
3285 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
3286 	POSTING_READ(DBUF_CTL_S2);
3287 
3288 	udelay(10);
3289 
3290 	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3291 	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3292 		DRM_ERROR("DBuf power enable timeout\n");
3293 	else
3294 		dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
3295 }
3296 
3297 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
3298 {
3299 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
3300 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
3301 	POSTING_READ(DBUF_CTL_S2);
3302 
3303 	udelay(10);
3304 
3305 	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3306 	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3307 		DRM_ERROR("DBuf power disable timeout!\n");
3308 	else
3309 		dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
3310 }
3311 
3312 static void icl_mbus_init(struct drm_i915_private *dev_priv)
3313 {
3314 	uint32_t val;
3315 
3316 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
3317 	      MBUS_ABOX_BT_CREDIT_POOL2(16) |
3318 	      MBUS_ABOX_B_CREDIT(1) |
3319 	      MBUS_ABOX_BW_CREDIT(1);
3320 
3321 	I915_WRITE(MBUS_ABOX_CTL, val);
3322 }
3323 
3324 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
3325 				      bool enable)
3326 {
3327 	i915_reg_t reg;
3328 	u32 reset_bits, val;
3329 
3330 	if (IS_IVYBRIDGE(dev_priv)) {
3331 		reg = GEN7_MSG_CTL;
3332 		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
3333 	} else {
3334 		reg = HSW_NDE_RSTWRN_OPT;
3335 		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
3336 	}
3337 
3338 	val = I915_READ(reg);
3339 
3340 	if (enable)
3341 		val |= reset_bits;
3342 	else
3343 		val &= ~reset_bits;
3344 
3345 	I915_WRITE(reg, val);
3346 }
3347 
3348 static void skl_display_core_init(struct drm_i915_private *dev_priv,
3349 				   bool resume)
3350 {
3351 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3352 	struct i915_power_well *well;
3353 
3354 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3355 
3356 	/* enable PCH reset handshake */
3357 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3358 
3359 	/* enable PG1 and Misc I/O */
3360 	mutex_lock(&power_domains->lock);
3361 
3362 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3363 	intel_power_well_enable(dev_priv, well);
3364 
3365 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
3366 	intel_power_well_enable(dev_priv, well);
3367 
3368 	mutex_unlock(&power_domains->lock);
3369 
3370 	skl_init_cdclk(dev_priv);
3371 
3372 	gen9_dbuf_enable(dev_priv);
3373 
3374 	if (resume && dev_priv->csr.dmc_payload)
3375 		intel_csr_load_program(dev_priv);
3376 }
3377 
3378 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
3379 {
3380 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3381 	struct i915_power_well *well;
3382 
3383 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3384 
3385 	gen9_dbuf_disable(dev_priv);
3386 
3387 	skl_uninit_cdclk(dev_priv);
3388 
3389 	/* The spec doesn't call for removing the reset handshake flag */
3390 	/* disable PG1 and Misc I/O */
3391 
3392 	mutex_lock(&power_domains->lock);
3393 
3394 	/*
3395 	 * BSpec says to keep the MISC IO power well enabled here, only
3396 	 * remove our request for power well 1.
3397 	 * Note that even though the driver's request is removed power well 1
3398 	 * may stay enabled after this due to DMC's own request on it.
3399 	 */
3400 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3401 	intel_power_well_disable(dev_priv, well);
3402 
3403 	mutex_unlock(&power_domains->lock);
3404 
3405 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3406 }
3407 
3408 void bxt_display_core_init(struct drm_i915_private *dev_priv,
3409 			   bool resume)
3410 {
3411 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3412 	struct i915_power_well *well;
3413 
3414 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3415 
3416 	/*
3417 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
3418 	 * or else the reset will hang because there is no PCH to respond.
3419 	 * Move the handshake programming to initialization sequence.
3420 	 * Previously was left up to BIOS.
3421 	 */
3422 	intel_pch_reset_handshake(dev_priv, false);
3423 
3424 	/* Enable PG1 */
3425 	mutex_lock(&power_domains->lock);
3426 
3427 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3428 	intel_power_well_enable(dev_priv, well);
3429 
3430 	mutex_unlock(&power_domains->lock);
3431 
3432 	bxt_init_cdclk(dev_priv);
3433 
3434 	gen9_dbuf_enable(dev_priv);
3435 
3436 	if (resume && dev_priv->csr.dmc_payload)
3437 		intel_csr_load_program(dev_priv);
3438 }
3439 
3440 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
3441 {
3442 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3443 	struct i915_power_well *well;
3444 
3445 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3446 
3447 	gen9_dbuf_disable(dev_priv);
3448 
3449 	bxt_uninit_cdclk(dev_priv);
3450 
3451 	/* The spec doesn't call for removing the reset handshake flag */
3452 
3453 	/*
3454 	 * Disable PW1 (PG1).
3455 	 * Note that even though the driver's request is removed power well 1
3456 	 * may stay enabled after this due to DMC's own request on it.
3457 	 */
3458 	mutex_lock(&power_domains->lock);
3459 
3460 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3461 	intel_power_well_disable(dev_priv, well);
3462 
3463 	mutex_unlock(&power_domains->lock);
3464 
3465 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3466 }
3467 
3468 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
3469 {
3470 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3471 	struct i915_power_well *well;
3472 
3473 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3474 
3475 	/* 1. Enable PCH Reset Handshake */
3476 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3477 
3478 	/* 2-3. */
3479 	cnl_combo_phys_init(dev_priv);
3480 
3481 	/*
3482 	 * 4. Enable Power Well 1 (PG1).
3483 	 *    The AUX IO power wells will be enabled on demand.
3484 	 */
3485 	mutex_lock(&power_domains->lock);
3486 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3487 	intel_power_well_enable(dev_priv, well);
3488 	mutex_unlock(&power_domains->lock);
3489 
3490 	/* 5. Enable CD clock */
3491 	cnl_init_cdclk(dev_priv);
3492 
3493 	/* 6. Enable DBUF */
3494 	gen9_dbuf_enable(dev_priv);
3495 
3496 	if (resume && dev_priv->csr.dmc_payload)
3497 		intel_csr_load_program(dev_priv);
3498 }
3499 
3500 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
3501 {
3502 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3503 	struct i915_power_well *well;
3504 
3505 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3506 
3507 	/* 1. Disable all display engine functions -> aready done */
3508 
3509 	/* 2. Disable DBUF */
3510 	gen9_dbuf_disable(dev_priv);
3511 
3512 	/* 3. Disable CD clock */
3513 	cnl_uninit_cdclk(dev_priv);
3514 
3515 	/*
3516 	 * 4. Disable Power Well 1 (PG1).
3517 	 *    The AUX IO power wells are toggled on demand, so they are already
3518 	 *    disabled at this point.
3519 	 */
3520 	mutex_lock(&power_domains->lock);
3521 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3522 	intel_power_well_disable(dev_priv, well);
3523 	mutex_unlock(&power_domains->lock);
3524 
3525 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3526 
3527 	/* 5. */
3528 	cnl_combo_phys_uninit(dev_priv);
3529 }
3530 
3531 void icl_display_core_init(struct drm_i915_private *dev_priv,
3532 			   bool resume)
3533 {
3534 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3535 	struct i915_power_well *well;
3536 
3537 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3538 
3539 	/* 1. Enable PCH reset handshake. */
3540 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3541 
3542 	/* 2-3. */
3543 	icl_combo_phys_init(dev_priv);
3544 
3545 	/*
3546 	 * 4. Enable Power Well 1 (PG1).
3547 	 *    The AUX IO power wells will be enabled on demand.
3548 	 */
3549 	mutex_lock(&power_domains->lock);
3550 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3551 	intel_power_well_enable(dev_priv, well);
3552 	mutex_unlock(&power_domains->lock);
3553 
3554 	/* 5. Enable CDCLK. */
3555 	icl_init_cdclk(dev_priv);
3556 
3557 	/* 6. Enable DBUF. */
3558 	icl_dbuf_enable(dev_priv);
3559 
3560 	/* 7. Setup MBUS. */
3561 	icl_mbus_init(dev_priv);
3562 
3563 	if (resume && dev_priv->csr.dmc_payload)
3564 		intel_csr_load_program(dev_priv);
3565 }
3566 
3567 void icl_display_core_uninit(struct drm_i915_private *dev_priv)
3568 {
3569 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3570 	struct i915_power_well *well;
3571 
3572 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3573 
3574 	/* 1. Disable all display engine functions -> aready done */
3575 
3576 	/* 2. Disable DBUF */
3577 	icl_dbuf_disable(dev_priv);
3578 
3579 	/* 3. Disable CD clock */
3580 	icl_uninit_cdclk(dev_priv);
3581 
3582 	/*
3583 	 * 4. Disable Power Well 1 (PG1).
3584 	 *    The AUX IO power wells are toggled on demand, so they are already
3585 	 *    disabled at this point.
3586 	 */
3587 	mutex_lock(&power_domains->lock);
3588 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3589 	intel_power_well_disable(dev_priv, well);
3590 	mutex_unlock(&power_domains->lock);
3591 
3592 	/* 5. */
3593 	icl_combo_phys_uninit(dev_priv);
3594 }
3595 
3596 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
3597 {
3598 	struct i915_power_well *cmn_bc =
3599 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
3600 	struct i915_power_well *cmn_d =
3601 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
3602 
3603 	/*
3604 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
3605 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
3606 	 * instead maintain a shadow copy ourselves. Use the actual
3607 	 * power well state and lane status to reconstruct the
3608 	 * expected initial value.
3609 	 */
3610 	dev_priv->chv_phy_control =
3611 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
3612 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
3613 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
3614 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
3615 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
3616 
3617 	/*
3618 	 * If all lanes are disabled we leave the override disabled
3619 	 * with all power down bits cleared to match the state we
3620 	 * would use after disabling the port. Otherwise enable the
3621 	 * override and set the lane powerdown bits accding to the
3622 	 * current lane status.
3623 	 */
3624 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
3625 		uint32_t status = I915_READ(DPLL(PIPE_A));
3626 		unsigned int mask;
3627 
3628 		mask = status & DPLL_PORTB_READY_MASK;
3629 		if (mask == 0xf)
3630 			mask = 0x0;
3631 		else
3632 			dev_priv->chv_phy_control |=
3633 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
3634 
3635 		dev_priv->chv_phy_control |=
3636 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
3637 
3638 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
3639 		if (mask == 0xf)
3640 			mask = 0x0;
3641 		else
3642 			dev_priv->chv_phy_control |=
3643 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
3644 
3645 		dev_priv->chv_phy_control |=
3646 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
3647 
3648 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
3649 
3650 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
3651 	} else {
3652 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
3653 	}
3654 
3655 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
3656 		uint32_t status = I915_READ(DPIO_PHY_STATUS);
3657 		unsigned int mask;
3658 
3659 		mask = status & DPLL_PORTD_READY_MASK;
3660 
3661 		if (mask == 0xf)
3662 			mask = 0x0;
3663 		else
3664 			dev_priv->chv_phy_control |=
3665 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
3666 
3667 		dev_priv->chv_phy_control |=
3668 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
3669 
3670 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
3671 
3672 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
3673 	} else {
3674 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
3675 	}
3676 
3677 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
3678 
3679 	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
3680 		      dev_priv->chv_phy_control);
3681 }
3682 
3683 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
3684 {
3685 	struct i915_power_well *cmn =
3686 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
3687 	struct i915_power_well *disp2d =
3688 		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
3689 
3690 	/* If the display might be already active skip this */
3691 	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
3692 	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
3693 	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
3694 		return;
3695 
3696 	DRM_DEBUG_KMS("toggling display PHY side reset\n");
3697 
3698 	/* cmnlane needs DPLL registers */
3699 	disp2d->desc->ops->enable(dev_priv, disp2d);
3700 
3701 	/*
3702 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
3703 	 * Need to assert and de-assert PHY SB reset by gating the
3704 	 * common lane power, then un-gating it.
3705 	 * Simply ungating isn't enough to reset the PHY enough to get
3706 	 * ports and lanes running.
3707 	 */
3708 	cmn->desc->ops->disable(dev_priv, cmn);
3709 }
3710 
3711 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
3712 
3713 /**
3714  * intel_power_domains_init_hw - initialize hardware power domain state
3715  * @dev_priv: i915 device instance
3716  * @resume: Called from resume code paths or not
3717  *
3718  * This function initializes the hardware power domain state and enables all
3719  * power wells belonging to the INIT power domain. Power wells in other
3720  * domains (and not in the INIT domain) are referenced or disabled by
3721  * intel_modeset_readout_hw_state(). After that the reference count of each
3722  * power well must match its HW enabled state, see
3723  * intel_power_domains_verify_state().
3724  *
3725  * It will return with power domains disabled (to be enabled later by
3726  * intel_power_domains_enable()) and must be paired with
3727  * intel_power_domains_fini_hw().
3728  */
3729 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
3730 {
3731 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3732 
3733 	power_domains->initializing = true;
3734 
3735 	if (IS_ICELAKE(dev_priv)) {
3736 		icl_display_core_init(dev_priv, resume);
3737 	} else if (IS_CANNONLAKE(dev_priv)) {
3738 		cnl_display_core_init(dev_priv, resume);
3739 	} else if (IS_GEN9_BC(dev_priv)) {
3740 		skl_display_core_init(dev_priv, resume);
3741 	} else if (IS_GEN9_LP(dev_priv)) {
3742 		bxt_display_core_init(dev_priv, resume);
3743 	} else if (IS_CHERRYVIEW(dev_priv)) {
3744 		mutex_lock(&power_domains->lock);
3745 		chv_phy_control_init(dev_priv);
3746 		mutex_unlock(&power_domains->lock);
3747 	} else if (IS_VALLEYVIEW(dev_priv)) {
3748 		mutex_lock(&power_domains->lock);
3749 		vlv_cmnlane_wa(dev_priv);
3750 		mutex_unlock(&power_domains->lock);
3751 	} else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7)
3752 		intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3753 
3754 	/*
3755 	 * Keep all power wells enabled for any dependent HW access during
3756 	 * initialization and to make sure we keep BIOS enabled display HW
3757 	 * resources powered until display HW readout is complete. We drop
3758 	 * this reference in intel_power_domains_enable().
3759 	 */
3760 	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
3761 	/* Disable power support if the user asked so. */
3762 	if (!i915_modparams.disable_power_well)
3763 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
3764 	intel_power_domains_sync_hw(dev_priv);
3765 
3766 	power_domains->initializing = false;
3767 }
3768 
3769 /**
3770  * intel_power_domains_fini_hw - deinitialize hw power domain state
3771  * @dev_priv: i915 device instance
3772  *
3773  * De-initializes the display power domain HW state. It also ensures that the
3774  * device stays powered up so that the driver can be reloaded.
3775  *
3776  * It must be called with power domains already disabled (after a call to
3777  * intel_power_domains_disable()) and must be paired with
3778  * intel_power_domains_init_hw().
3779  */
3780 void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv)
3781 {
3782 	/* Keep the power well enabled, but cancel its rpm wakeref. */
3783 	intel_runtime_pm_put(dev_priv);
3784 
3785 	/* Remove the refcount we took to keep power well support disabled. */
3786 	if (!i915_modparams.disable_power_well)
3787 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3788 
3789 	intel_power_domains_verify_state(dev_priv);
3790 }
3791 
3792 /**
3793  * intel_power_domains_enable - enable toggling of display power wells
3794  * @dev_priv: i915 device instance
3795  *
3796  * Enable the ondemand enabling/disabling of the display power wells. Note that
3797  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
3798  * only at specific points of the display modeset sequence, thus they are not
3799  * affected by the intel_power_domains_enable()/disable() calls. The purpose
3800  * of these function is to keep the rest of power wells enabled until the end
3801  * of display HW readout (which will acquire the power references reflecting
3802  * the current HW state).
3803  */
3804 void intel_power_domains_enable(struct drm_i915_private *dev_priv)
3805 {
3806 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3807 
3808 	intel_power_domains_verify_state(dev_priv);
3809 }
3810 
3811 /**
3812  * intel_power_domains_disable - disable toggling of display power wells
3813  * @dev_priv: i915 device instance
3814  *
3815  * Disable the ondemand enabling/disabling of the display power wells. See
3816  * intel_power_domains_enable() for which power wells this call controls.
3817  */
3818 void intel_power_domains_disable(struct drm_i915_private *dev_priv)
3819 {
3820 	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
3821 
3822 	intel_power_domains_verify_state(dev_priv);
3823 }
3824 
3825 /**
3826  * intel_power_domains_suspend - suspend power domain state
3827  * @dev_priv: i915 device instance
3828  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
3829  *
3830  * This function prepares the hardware power domain state before entering
3831  * system suspend.
3832  *
3833  * It must be called with power domains already disabled (after a call to
3834  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
3835  */
3836 void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
3837 				 enum i915_drm_suspend_mode suspend_mode)
3838 {
3839 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3840 
3841 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3842 
3843 	/*
3844 	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
3845 	 * support don't manually deinit the power domains. This also means the
3846 	 * CSR/DMC firmware will stay active, it will power down any HW
3847 	 * resources as required and also enable deeper system power states
3848 	 * that would be blocked if the firmware was inactive.
3849 	 */
3850 	if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
3851 	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
3852 	    dev_priv->csr.dmc_payload != NULL) {
3853 		intel_power_domains_verify_state(dev_priv);
3854 		return;
3855 	}
3856 
3857 	/*
3858 	 * Even if power well support was disabled we still want to disable
3859 	 * power wells if power domains must be deinitialized for suspend.
3860 	 */
3861 	if (!i915_modparams.disable_power_well) {
3862 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3863 		intel_power_domains_verify_state(dev_priv);
3864 	}
3865 
3866 	if (IS_ICELAKE(dev_priv))
3867 		icl_display_core_uninit(dev_priv);
3868 	else if (IS_CANNONLAKE(dev_priv))
3869 		cnl_display_core_uninit(dev_priv);
3870 	else if (IS_GEN9_BC(dev_priv))
3871 		skl_display_core_uninit(dev_priv);
3872 	else if (IS_GEN9_LP(dev_priv))
3873 		bxt_display_core_uninit(dev_priv);
3874 
3875 	power_domains->display_core_suspended = true;
3876 }
3877 
3878 /**
3879  * intel_power_domains_resume - resume power domain state
3880  * @dev_priv: i915 device instance
3881  *
3882  * This function resume the hardware power domain state during system resume.
3883  *
3884  * It will return with power domain support disabled (to be enabled later by
3885  * intel_power_domains_enable()) and must be paired with
3886  * intel_power_domains_suspend().
3887  */
3888 void intel_power_domains_resume(struct drm_i915_private *dev_priv)
3889 {
3890 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3891 
3892 	if (power_domains->display_core_suspended) {
3893 		intel_power_domains_init_hw(dev_priv, true);
3894 		power_domains->display_core_suspended = false;
3895 	} else {
3896 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
3897 	}
3898 
3899 	intel_power_domains_verify_state(dev_priv);
3900 }
3901 
3902 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
3903 
3904 static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
3905 {
3906 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3907 	struct i915_power_well *power_well;
3908 
3909 	for_each_power_well(dev_priv, power_well) {
3910 		enum intel_display_power_domain domain;
3911 
3912 		DRM_DEBUG_DRIVER("%-25s %d\n",
3913 				 power_well->desc->name, power_well->count);
3914 
3915 		for_each_power_domain(domain, power_well->desc->domains)
3916 			DRM_DEBUG_DRIVER("  %-23s %d\n",
3917 					 intel_display_power_domain_str(domain),
3918 					 power_domains->domain_use_count[domain]);
3919 	}
3920 }
3921 
3922 /**
3923  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
3924  * @dev_priv: i915 device instance
3925  *
3926  * Verify if the reference count of each power well matches its HW enabled
3927  * state and the total refcount of the domains it belongs to. This must be
3928  * called after modeset HW state sanitization, which is responsible for
3929  * acquiring reference counts for any power wells in use and disabling the
3930  * ones left on by BIOS but not required by any active output.
3931  */
3932 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
3933 {
3934 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3935 	struct i915_power_well *power_well;
3936 	bool dump_domain_info;
3937 
3938 	mutex_lock(&power_domains->lock);
3939 
3940 	dump_domain_info = false;
3941 	for_each_power_well(dev_priv, power_well) {
3942 		enum intel_display_power_domain domain;
3943 		int domains_count;
3944 		bool enabled;
3945 
3946 		enabled = power_well->desc->ops->is_enabled(dev_priv,
3947 							    power_well);
3948 		if ((power_well->count || power_well->desc->always_on) !=
3949 		    enabled)
3950 			DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
3951 				  power_well->desc->name,
3952 				  power_well->count, enabled);
3953 
3954 		domains_count = 0;
3955 		for_each_power_domain(domain, power_well->desc->domains)
3956 			domains_count += power_domains->domain_use_count[domain];
3957 
3958 		if (power_well->count != domains_count) {
3959 			DRM_ERROR("power well %s refcount/domain refcount mismatch "
3960 				  "(refcount %d/domains refcount %d)\n",
3961 				  power_well->desc->name, power_well->count,
3962 				  domains_count);
3963 			dump_domain_info = true;
3964 		}
3965 	}
3966 
3967 	if (dump_domain_info) {
3968 		static bool dumped;
3969 
3970 		if (!dumped) {
3971 			intel_power_domains_dump_info(dev_priv);
3972 			dumped = true;
3973 		}
3974 	}
3975 
3976 	mutex_unlock(&power_domains->lock);
3977 }
3978 
3979 #else
3980 
3981 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
3982 {
3983 }
3984 
3985 #endif
3986 
3987 /**
3988  * intel_runtime_pm_get - grab a runtime pm reference
3989  * @dev_priv: i915 device instance
3990  *
3991  * This function grabs a device-level runtime pm reference (mostly used for GEM
3992  * code to ensure the GTT or GT is on) and ensures that it is powered up.
3993  *
3994  * Any runtime pm reference obtained by this function must have a symmetric
3995  * call to intel_runtime_pm_put() to release the reference again.
3996  */
3997 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
3998 {
3999 	struct pci_dev *pdev = dev_priv->drm.pdev;
4000 	struct device *kdev = &pdev->dev;
4001 	int ret;
4002 
4003 	ret = pm_runtime_get_sync(kdev);
4004 	WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
4005 
4006 	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
4007 	assert_rpm_wakelock_held(dev_priv);
4008 }
4009 
4010 /**
4011  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
4012  * @dev_priv: i915 device instance
4013  *
4014  * This function grabs a device-level runtime pm reference if the device is
4015  * already in use and ensures that it is powered up. It is illegal to try
4016  * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
4017  *
4018  * Any runtime pm reference obtained by this function must have a symmetric
4019  * call to intel_runtime_pm_put() to release the reference again.
4020  *
4021  * Returns: True if the wakeref was acquired, or False otherwise.
4022  */
4023 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
4024 {
4025 	if (IS_ENABLED(CONFIG_PM)) {
4026 		struct pci_dev *pdev = dev_priv->drm.pdev;
4027 		struct device *kdev = &pdev->dev;
4028 
4029 		/*
4030 		 * In cases runtime PM is disabled by the RPM core and we get
4031 		 * an -EINVAL return value we are not supposed to call this
4032 		 * function, since the power state is undefined. This applies
4033 		 * atm to the late/early system suspend/resume handlers.
4034 		 */
4035 		if (pm_runtime_get_if_in_use(kdev) <= 0)
4036 			return false;
4037 	}
4038 
4039 	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
4040 	assert_rpm_wakelock_held(dev_priv);
4041 
4042 	return true;
4043 }
4044 
4045 /**
4046  * intel_runtime_pm_get_noresume - grab a runtime pm reference
4047  * @dev_priv: i915 device instance
4048  *
4049  * This function grabs a device-level runtime pm reference (mostly used for GEM
4050  * code to ensure the GTT or GT is on).
4051  *
4052  * It will _not_ power up the device but instead only check that it's powered
4053  * on.  Therefore it is only valid to call this functions from contexts where
4054  * the device is known to be powered up and where trying to power it up would
4055  * result in hilarity and deadlocks. That pretty much means only the system
4056  * suspend/resume code where this is used to grab runtime pm references for
4057  * delayed setup down in work items.
4058  *
4059  * Any runtime pm reference obtained by this function must have a symmetric
4060  * call to intel_runtime_pm_put() to release the reference again.
4061  */
4062 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
4063 {
4064 	struct pci_dev *pdev = dev_priv->drm.pdev;
4065 	struct device *kdev = &pdev->dev;
4066 
4067 	assert_rpm_wakelock_held(dev_priv);
4068 	pm_runtime_get_noresume(kdev);
4069 
4070 	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
4071 }
4072 
4073 /**
4074  * intel_runtime_pm_put - release a runtime pm reference
4075  * @dev_priv: i915 device instance
4076  *
4077  * This function drops the device-level runtime pm reference obtained by
4078  * intel_runtime_pm_get() and might power down the corresponding
4079  * hardware block right away if this is the last reference.
4080  */
4081 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
4082 {
4083 	struct pci_dev *pdev = dev_priv->drm.pdev;
4084 	struct device *kdev = &pdev->dev;
4085 
4086 	assert_rpm_wakelock_held(dev_priv);
4087 	atomic_dec(&dev_priv->runtime_pm.wakeref_count);
4088 
4089 	pm_runtime_mark_last_busy(kdev);
4090 	pm_runtime_put_autosuspend(kdev);
4091 }
4092 
4093 /**
4094  * intel_runtime_pm_enable - enable runtime pm
4095  * @dev_priv: i915 device instance
4096  *
4097  * This function enables runtime pm at the end of the driver load sequence.
4098  *
4099  * Note that this function does currently not enable runtime pm for the
4100  * subordinate display power domains. That is done by
4101  * intel_power_domains_enable().
4102  */
4103 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
4104 {
4105 	struct pci_dev *pdev = dev_priv->drm.pdev;
4106 	struct device *kdev = &pdev->dev;
4107 
4108 	/*
4109 	 * Disable the system suspend direct complete optimization, which can
4110 	 * leave the device suspended skipping the driver's suspend handlers
4111 	 * if the device was already runtime suspended. This is needed due to
4112 	 * the difference in our runtime and system suspend sequence and
4113 	 * becaue the HDA driver may require us to enable the audio power
4114 	 * domain during system suspend.
4115 	 */
4116 	dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
4117 
4118 	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
4119 	pm_runtime_mark_last_busy(kdev);
4120 
4121 	/*
4122 	 * Take a permanent reference to disable the RPM functionality and drop
4123 	 * it only when unloading the driver. Use the low level get/put helpers,
4124 	 * so the driver's own RPM reference tracking asserts also work on
4125 	 * platforms without RPM support.
4126 	 */
4127 	if (!HAS_RUNTIME_PM(dev_priv)) {
4128 		int ret;
4129 
4130 		pm_runtime_dont_use_autosuspend(kdev);
4131 		ret = pm_runtime_get_sync(kdev);
4132 		WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
4133 	} else {
4134 		pm_runtime_use_autosuspend(kdev);
4135 	}
4136 
4137 	/*
4138 	 * The core calls the driver load handler with an RPM reference held.
4139 	 * We drop that here and will reacquire it during unloading in
4140 	 * intel_power_domains_fini().
4141 	 */
4142 	pm_runtime_put_autosuspend(kdev);
4143 }
4144 
4145 void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
4146 {
4147 	struct pci_dev *pdev = dev_priv->drm.pdev;
4148 	struct device *kdev = &pdev->dev;
4149 
4150 	/* Transfer rpm ownership back to core */
4151 	WARN(pm_runtime_get_sync(&dev_priv->drm.pdev->dev) < 0,
4152 	     "Failed to pass rpm ownership back to core\n");
4153 
4154 	pm_runtime_dont_use_autosuspend(kdev);
4155 
4156 	if (!HAS_RUNTIME_PM(dev_priv))
4157 		pm_runtime_put(kdev);
4158 }
4159