1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28 
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31 
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34 
35 /**
36  * DOC: runtime pm
37  *
38  * The i915 driver supports dynamic enabling and disabling of entire hardware
39  * blocks at runtime. This is especially important on the display side where
40  * software is supposed to control many power gates manually on recent hardware,
41  * since on the GT side a lot of the power management is done by the hardware.
42  * But even there some manual control at the device level is required.
43  *
44  * Since i915 supports a diverse set of platforms with a unified codebase and
45  * hardware engineers just love to shuffle functionality around between power
46  * domains there's a sizeable amount of indirection required. This file provides
47  * generic functions to the driver for grabbing and releasing references for
48  * abstract power domains. It then maps those to the actual power wells
49  * present for a given platform.
50  */
51 
52 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
53 				    int power_well_id);
54 
55 static struct i915_power_well *
56 lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
57 
58 const char *
59 intel_display_power_domain_str(enum intel_display_power_domain domain)
60 {
61 	switch (domain) {
62 	case POWER_DOMAIN_PIPE_A:
63 		return "PIPE_A";
64 	case POWER_DOMAIN_PIPE_B:
65 		return "PIPE_B";
66 	case POWER_DOMAIN_PIPE_C:
67 		return "PIPE_C";
68 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
69 		return "PIPE_A_PANEL_FITTER";
70 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
71 		return "PIPE_B_PANEL_FITTER";
72 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
73 		return "PIPE_C_PANEL_FITTER";
74 	case POWER_DOMAIN_TRANSCODER_A:
75 		return "TRANSCODER_A";
76 	case POWER_DOMAIN_TRANSCODER_B:
77 		return "TRANSCODER_B";
78 	case POWER_DOMAIN_TRANSCODER_C:
79 		return "TRANSCODER_C";
80 	case POWER_DOMAIN_TRANSCODER_EDP:
81 		return "TRANSCODER_EDP";
82 	case POWER_DOMAIN_TRANSCODER_DSI_A:
83 		return "TRANSCODER_DSI_A";
84 	case POWER_DOMAIN_TRANSCODER_DSI_C:
85 		return "TRANSCODER_DSI_C";
86 	case POWER_DOMAIN_PORT_DDI_A_LANES:
87 		return "PORT_DDI_A_LANES";
88 	case POWER_DOMAIN_PORT_DDI_B_LANES:
89 		return "PORT_DDI_B_LANES";
90 	case POWER_DOMAIN_PORT_DDI_C_LANES:
91 		return "PORT_DDI_C_LANES";
92 	case POWER_DOMAIN_PORT_DDI_D_LANES:
93 		return "PORT_DDI_D_LANES";
94 	case POWER_DOMAIN_PORT_DDI_E_LANES:
95 		return "PORT_DDI_E_LANES";
96 	case POWER_DOMAIN_PORT_DDI_A_IO:
97 		return "PORT_DDI_A_IO";
98 	case POWER_DOMAIN_PORT_DDI_B_IO:
99 		return "PORT_DDI_B_IO";
100 	case POWER_DOMAIN_PORT_DDI_C_IO:
101 		return "PORT_DDI_C_IO";
102 	case POWER_DOMAIN_PORT_DDI_D_IO:
103 		return "PORT_DDI_D_IO";
104 	case POWER_DOMAIN_PORT_DDI_E_IO:
105 		return "PORT_DDI_E_IO";
106 	case POWER_DOMAIN_PORT_DSI:
107 		return "PORT_DSI";
108 	case POWER_DOMAIN_PORT_CRT:
109 		return "PORT_CRT";
110 	case POWER_DOMAIN_PORT_OTHER:
111 		return "PORT_OTHER";
112 	case POWER_DOMAIN_VGA:
113 		return "VGA";
114 	case POWER_DOMAIN_AUDIO:
115 		return "AUDIO";
116 	case POWER_DOMAIN_PLLS:
117 		return "PLLS";
118 	case POWER_DOMAIN_AUX_A:
119 		return "AUX_A";
120 	case POWER_DOMAIN_AUX_B:
121 		return "AUX_B";
122 	case POWER_DOMAIN_AUX_C:
123 		return "AUX_C";
124 	case POWER_DOMAIN_AUX_D:
125 		return "AUX_D";
126 	case POWER_DOMAIN_GMBUS:
127 		return "GMBUS";
128 	case POWER_DOMAIN_INIT:
129 		return "INIT";
130 	case POWER_DOMAIN_MODESET:
131 		return "MODESET";
132 	default:
133 		MISSING_CASE(domain);
134 		return "?";
135 	}
136 }
137 
138 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
139 				    struct i915_power_well *power_well)
140 {
141 	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
142 	power_well->ops->enable(dev_priv, power_well);
143 	power_well->hw_enabled = true;
144 }
145 
146 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
147 				     struct i915_power_well *power_well)
148 {
149 	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
150 	power_well->hw_enabled = false;
151 	power_well->ops->disable(dev_priv, power_well);
152 }
153 
154 static void intel_power_well_get(struct drm_i915_private *dev_priv,
155 				 struct i915_power_well *power_well)
156 {
157 	if (!power_well->count++)
158 		intel_power_well_enable(dev_priv, power_well);
159 }
160 
161 static void intel_power_well_put(struct drm_i915_private *dev_priv,
162 				 struct i915_power_well *power_well)
163 {
164 	WARN(!power_well->count, "Use count on power well %s is already zero",
165 	     power_well->name);
166 
167 	if (!--power_well->count)
168 		intel_power_well_disable(dev_priv, power_well);
169 }
170 
171 /*
172  * We should only use the power well if we explicitly asked the hardware to
173  * enable it, so check if it's enabled and also check if we've requested it to
174  * be enabled.
175  */
176 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
177 				   struct i915_power_well *power_well)
178 {
179 	return I915_READ(HSW_PWR_WELL_DRIVER) ==
180 		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
181 }
182 
183 /**
184  * __intel_display_power_is_enabled - unlocked check for a power domain
185  * @dev_priv: i915 device instance
186  * @domain: power domain to check
187  *
188  * This is the unlocked version of intel_display_power_is_enabled() and should
189  * only be used from error capture and recovery code where deadlocks are
190  * possible.
191  *
192  * Returns:
193  * True when the power domain is enabled, false otherwise.
194  */
195 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
196 				      enum intel_display_power_domain domain)
197 {
198 	struct i915_power_well *power_well;
199 	bool is_enabled;
200 
201 	if (dev_priv->pm.suspended)
202 		return false;
203 
204 	is_enabled = true;
205 
206 	for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
207 		if (power_well->always_on)
208 			continue;
209 
210 		if (!power_well->hw_enabled) {
211 			is_enabled = false;
212 			break;
213 		}
214 	}
215 
216 	return is_enabled;
217 }
218 
219 /**
220  * intel_display_power_is_enabled - check for a power domain
221  * @dev_priv: i915 device instance
222  * @domain: power domain to check
223  *
224  * This function can be used to check the hw power domain state. It is mostly
225  * used in hardware state readout functions. Everywhere else code should rely
226  * upon explicit power domain reference counting to ensure that the hardware
227  * block is powered up before accessing it.
228  *
229  * Callers must hold the relevant modesetting locks to ensure that concurrent
230  * threads can't disable the power well while the caller tries to read a few
231  * registers.
232  *
233  * Returns:
234  * True when the power domain is enabled, false otherwise.
235  */
236 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
237 				    enum intel_display_power_domain domain)
238 {
239 	struct i915_power_domains *power_domains;
240 	bool ret;
241 
242 	power_domains = &dev_priv->power_domains;
243 
244 	mutex_lock(&power_domains->lock);
245 	ret = __intel_display_power_is_enabled(dev_priv, domain);
246 	mutex_unlock(&power_domains->lock);
247 
248 	return ret;
249 }
250 
251 /**
252  * intel_display_set_init_power - set the initial power domain state
253  * @dev_priv: i915 device instance
254  * @enable: whether to enable or disable the initial power domain state
255  *
256  * For simplicity our driver load/unload and system suspend/resume code assumes
257  * that all power domains are always enabled. This functions controls the state
258  * of this little hack. While the initial power domain state is enabled runtime
259  * pm is effectively disabled.
260  */
261 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
262 				  bool enable)
263 {
264 	if (dev_priv->power_domains.init_power_on == enable)
265 		return;
266 
267 	if (enable)
268 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
269 	else
270 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
271 
272 	dev_priv->power_domains.init_power_on = enable;
273 }
274 
275 /*
276  * Starting with Haswell, we have a "Power Down Well" that can be turned off
277  * when not needed anymore. We have 4 registers that can request the power well
278  * to be enabled, and it will only be disabled if none of the registers is
279  * requesting it to be enabled.
280  */
281 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
282 {
283 	struct pci_dev *pdev = dev_priv->drm.pdev;
284 
285 	/*
286 	 * After we re-enable the power well, if we touch VGA register 0x3d5
287 	 * we'll get unclaimed register interrupts. This stops after we write
288 	 * anything to the VGA MSR register. The vgacon module uses this
289 	 * register all the time, so if we unbind our driver and, as a
290 	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
291 	 * console_unlock(). So make here we touch the VGA MSR register, making
292 	 * sure vgacon can keep working normally without triggering interrupts
293 	 * and error messages.
294 	 */
295 	vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
296 	outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
297 	vga_put(pdev, VGA_RSRC_LEGACY_IO);
298 
299 	if (IS_BROADWELL(dev_priv))
300 		gen8_irq_power_well_post_enable(dev_priv,
301 						1 << PIPE_C | 1 << PIPE_B);
302 }
303 
304 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
305 {
306 	if (IS_BROADWELL(dev_priv))
307 		gen8_irq_power_well_pre_disable(dev_priv,
308 						1 << PIPE_C | 1 << PIPE_B);
309 }
310 
311 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
312 				       struct i915_power_well *power_well)
313 {
314 	struct pci_dev *pdev = dev_priv->drm.pdev;
315 
316 	/*
317 	 * After we re-enable the power well, if we touch VGA register 0x3d5
318 	 * we'll get unclaimed register interrupts. This stops after we write
319 	 * anything to the VGA MSR register. The vgacon module uses this
320 	 * register all the time, so if we unbind our driver and, as a
321 	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
322 	 * console_unlock(). So make here we touch the VGA MSR register, making
323 	 * sure vgacon can keep working normally without triggering interrupts
324 	 * and error messages.
325 	 */
326 	if (power_well->id == SKL_DISP_PW_2) {
327 		vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
328 		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
329 		vga_put(pdev, VGA_RSRC_LEGACY_IO);
330 
331 		gen8_irq_power_well_post_enable(dev_priv,
332 						1 << PIPE_C | 1 << PIPE_B);
333 	}
334 }
335 
336 static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
337 				       struct i915_power_well *power_well)
338 {
339 	if (power_well->id == SKL_DISP_PW_2)
340 		gen8_irq_power_well_pre_disable(dev_priv,
341 						1 << PIPE_C | 1 << PIPE_B);
342 }
343 
344 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
345 			       struct i915_power_well *power_well, bool enable)
346 {
347 	bool is_enabled, enable_requested;
348 	uint32_t tmp;
349 
350 	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
351 	is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
352 	enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
353 
354 	if (enable) {
355 		if (!enable_requested)
356 			I915_WRITE(HSW_PWR_WELL_DRIVER,
357 				   HSW_PWR_WELL_ENABLE_REQUEST);
358 
359 		if (!is_enabled) {
360 			DRM_DEBUG_KMS("Enabling power well\n");
361 			if (intel_wait_for_register(dev_priv,
362 						    HSW_PWR_WELL_DRIVER,
363 						    HSW_PWR_WELL_STATE_ENABLED,
364 						    HSW_PWR_WELL_STATE_ENABLED,
365 						    20))
366 				DRM_ERROR("Timeout enabling power well\n");
367 			hsw_power_well_post_enable(dev_priv);
368 		}
369 
370 	} else {
371 		if (enable_requested) {
372 			hsw_power_well_pre_disable(dev_priv);
373 			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
374 			POSTING_READ(HSW_PWR_WELL_DRIVER);
375 			DRM_DEBUG_KMS("Requesting to disable the power well\n");
376 		}
377 	}
378 }
379 
380 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
381 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
382 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
383 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
384 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
385 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
386 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
387 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
388 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
389 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
390 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
391 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
392 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
393 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
394 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
395 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
396 	BIT_ULL(POWER_DOMAIN_VGA) |				\
397 	BIT_ULL(POWER_DOMAIN_INIT))
398 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
399 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
400 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
401 	BIT_ULL(POWER_DOMAIN_INIT))
402 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
403 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
404 	BIT_ULL(POWER_DOMAIN_INIT))
405 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
406 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
407 	BIT_ULL(POWER_DOMAIN_INIT))
408 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
409 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
410 	BIT_ULL(POWER_DOMAIN_INIT))
411 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
412 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
413 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
414 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
415 	BIT_ULL(POWER_DOMAIN_INIT))
416 
417 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
418 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
419 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
420 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
421 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
422 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
423 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
424 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
425 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
426 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
427 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
428 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
429 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
430 	BIT_ULL(POWER_DOMAIN_VGA) |				\
431 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
432 	BIT_ULL(POWER_DOMAIN_INIT))
433 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
434 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
435 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
436 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
437 	BIT_ULL(POWER_DOMAIN_INIT))
438 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
439 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
440 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
441 	BIT_ULL(POWER_DOMAIN_INIT))
442 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
443 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
444 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
445 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
446 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
447 	BIT_ULL(POWER_DOMAIN_INIT))
448 
449 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
450 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
451 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
452 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
453 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
454 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
455 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
456 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
457 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
458 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
459 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
460 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
461 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
462 	BIT_ULL(POWER_DOMAIN_VGA) |				\
463 	BIT_ULL(POWER_DOMAIN_INIT))
464 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
465 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
466 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
467 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
468 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
469 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
470 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
471 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
472 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
473 	BIT_ULL(POWER_DOMAIN_INIT))
474 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
475 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
476 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
477 	BIT_ULL(POWER_DOMAIN_INIT))
478 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
479 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
480 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
481 	BIT_ULL(POWER_DOMAIN_INIT))
482 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
483 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
484 	BIT_ULL(POWER_DOMAIN_INIT))
485 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
486 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
487 	BIT_ULL(POWER_DOMAIN_INIT))
488 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
489 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
490 	BIT_ULL(POWER_DOMAIN_INIT))
491 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
492 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
493 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
494 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
495 	BIT_ULL(POWER_DOMAIN_INIT))
496 
497 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
498 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
499 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
500 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
501 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
502 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
503 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
504 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
505 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
506 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
507 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
508 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
509 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
510 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
511 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
512 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
513 	BIT_ULL(POWER_DOMAIN_VGA) |				\
514 	BIT_ULL(POWER_DOMAIN_INIT))
515 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
516 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
517 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
518 	BIT_ULL(POWER_DOMAIN_INIT))
519 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
520 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
521 	BIT_ULL(POWER_DOMAIN_INIT))
522 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
523 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
524 	BIT_ULL(POWER_DOMAIN_INIT))
525 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
526 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
527 	BIT_ULL(POWER_DOMAIN_INIT))
528 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
529 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
530 	BIT_ULL(POWER_DOMAIN_INIT))
531 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
532 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
533 	BIT_ULL(POWER_DOMAIN_INIT))
534 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
535 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
536 	BIT_ULL(POWER_DOMAIN_INIT))
537 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
538 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
539 	BIT_ULL(POWER_DOMAIN_INIT))
540 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
541 	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
542 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
543 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
544 	BIT_ULL(POWER_DOMAIN_INIT))
545 
546 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
547 {
548 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
549 		  "DC9 already programmed to be enabled.\n");
550 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
551 		  "DC5 still not disabled to enable DC9.\n");
552 	WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
553 	WARN_ONCE(intel_irqs_enabled(dev_priv),
554 		  "Interrupts not disabled yet.\n");
555 
556 	 /*
557 	  * TODO: check for the following to verify the conditions to enter DC9
558 	  * state are satisfied:
559 	  * 1] Check relevant display engine registers to verify if mode set
560 	  * disable sequence was followed.
561 	  * 2] Check if display uninitialize sequence is initialized.
562 	  */
563 }
564 
565 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
566 {
567 	WARN_ONCE(intel_irqs_enabled(dev_priv),
568 		  "Interrupts not disabled yet.\n");
569 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
570 		  "DC5 still not disabled.\n");
571 
572 	 /*
573 	  * TODO: check for the following to verify DC9 state was indeed
574 	  * entered before programming to disable it:
575 	  * 1] Check relevant display engine registers to verify if mode
576 	  *  set disable sequence was followed.
577 	  * 2] Check if display uninitialize sequence is initialized.
578 	  */
579 }
580 
581 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
582 				u32 state)
583 {
584 	int rewrites = 0;
585 	int rereads = 0;
586 	u32 v;
587 
588 	I915_WRITE(DC_STATE_EN, state);
589 
590 	/* It has been observed that disabling the dc6 state sometimes
591 	 * doesn't stick and dmc keeps returning old value. Make sure
592 	 * the write really sticks enough times and also force rewrite until
593 	 * we are confident that state is exactly what we want.
594 	 */
595 	do  {
596 		v = I915_READ(DC_STATE_EN);
597 
598 		if (v != state) {
599 			I915_WRITE(DC_STATE_EN, state);
600 			rewrites++;
601 			rereads = 0;
602 		} else if (rereads++ > 5) {
603 			break;
604 		}
605 
606 	} while (rewrites < 100);
607 
608 	if (v != state)
609 		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
610 			  state, v);
611 
612 	/* Most of the times we need one retry, avoid spam */
613 	if (rewrites > 1)
614 		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
615 			      state, rewrites);
616 }
617 
618 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
619 {
620 	u32 mask;
621 
622 	mask = DC_STATE_EN_UPTO_DC5;
623 	if (IS_GEN9_LP(dev_priv))
624 		mask |= DC_STATE_EN_DC9;
625 	else
626 		mask |= DC_STATE_EN_UPTO_DC6;
627 
628 	return mask;
629 }
630 
631 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
632 {
633 	u32 val;
634 
635 	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
636 
637 	DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
638 		      dev_priv->csr.dc_state, val);
639 	dev_priv->csr.dc_state = val;
640 }
641 
642 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
643 {
644 	uint32_t val;
645 	uint32_t mask;
646 
647 	if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
648 		state &= dev_priv->csr.allowed_dc_mask;
649 
650 	val = I915_READ(DC_STATE_EN);
651 	mask = gen9_dc_mask(dev_priv);
652 	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
653 		      val & mask, state);
654 
655 	/* Check if DMC is ignoring our DC state requests */
656 	if ((val & mask) != dev_priv->csr.dc_state)
657 		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
658 			  dev_priv->csr.dc_state, val & mask);
659 
660 	val &= ~mask;
661 	val |= state;
662 
663 	gen9_write_dc_state(dev_priv, val);
664 
665 	dev_priv->csr.dc_state = val & mask;
666 }
667 
668 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
669 {
670 	assert_can_enable_dc9(dev_priv);
671 
672 	DRM_DEBUG_KMS("Enabling DC9\n");
673 
674 	intel_power_sequencer_reset(dev_priv);
675 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
676 }
677 
678 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
679 {
680 	assert_can_disable_dc9(dev_priv);
681 
682 	DRM_DEBUG_KMS("Disabling DC9\n");
683 
684 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
685 
686 	intel_pps_unlock_regs_wa(dev_priv);
687 }
688 
689 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
690 {
691 	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
692 		  "CSR program storage start is NULL\n");
693 	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
694 	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
695 }
696 
697 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
698 {
699 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
700 					SKL_DISP_PW_2);
701 
702 	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
703 
704 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
705 		  "DC5 already programmed to be enabled.\n");
706 	assert_rpm_wakelock_held(dev_priv);
707 
708 	assert_csr_loaded(dev_priv);
709 }
710 
711 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
712 {
713 	assert_can_enable_dc5(dev_priv);
714 
715 	DRM_DEBUG_KMS("Enabling DC5\n");
716 
717 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
718 }
719 
720 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
721 {
722 	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
723 		  "Backlight is not disabled.\n");
724 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
725 		  "DC6 already programmed to be enabled.\n");
726 
727 	assert_csr_loaded(dev_priv);
728 }
729 
730 void skl_enable_dc6(struct drm_i915_private *dev_priv)
731 {
732 	assert_can_enable_dc6(dev_priv);
733 
734 	DRM_DEBUG_KMS("Enabling DC6\n");
735 
736 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
737 
738 }
739 
740 void skl_disable_dc6(struct drm_i915_private *dev_priv)
741 {
742 	DRM_DEBUG_KMS("Disabling DC6\n");
743 
744 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
745 }
746 
747 static void
748 gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
749 				  struct i915_power_well *power_well)
750 {
751 	enum skl_disp_power_wells power_well_id = power_well->id;
752 	u32 val;
753 	u32 mask;
754 
755 	mask = SKL_POWER_WELL_REQ(power_well_id);
756 
757 	val = I915_READ(HSW_PWR_WELL_KVMR);
758 	if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
759 		      power_well->name))
760 		I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
761 
762 	val = I915_READ(HSW_PWR_WELL_BIOS);
763 	val |= I915_READ(HSW_PWR_WELL_DEBUG);
764 
765 	if (!(val & mask))
766 		return;
767 
768 	/*
769 	 * DMC is known to force on the request bits for power well 1 on SKL
770 	 * and BXT and the misc IO power well on SKL but we don't expect any
771 	 * other request bits to be set, so WARN for those.
772 	 */
773 	if (power_well_id == SKL_DISP_PW_1 ||
774 	    (IS_GEN9_BC(dev_priv) &&
775 	     power_well_id == SKL_DISP_PW_MISC_IO))
776 		DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
777 				 "by DMC\n", power_well->name);
778 	else
779 		WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
780 			  power_well->name);
781 
782 	I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
783 	I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
784 }
785 
786 static void skl_set_power_well(struct drm_i915_private *dev_priv,
787 			       struct i915_power_well *power_well, bool enable)
788 {
789 	uint32_t tmp, fuse_status;
790 	uint32_t req_mask, state_mask;
791 	bool is_enabled, enable_requested, check_fuse_status = false;
792 
793 	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
794 	fuse_status = I915_READ(SKL_FUSE_STATUS);
795 
796 	switch (power_well->id) {
797 	case SKL_DISP_PW_1:
798 		if (intel_wait_for_register(dev_priv,
799 					    SKL_FUSE_STATUS,
800 					    SKL_FUSE_PG0_DIST_STATUS,
801 					    SKL_FUSE_PG0_DIST_STATUS,
802 					    1)) {
803 			DRM_ERROR("PG0 not enabled\n");
804 			return;
805 		}
806 		break;
807 	case SKL_DISP_PW_2:
808 		if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
809 			DRM_ERROR("PG1 in disabled state\n");
810 			return;
811 		}
812 		break;
813 	case SKL_DISP_PW_MISC_IO:
814 	case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A, CNL_DISP_PW_DDI_A */
815 	case SKL_DISP_PW_DDI_B:
816 	case SKL_DISP_PW_DDI_C:
817 	case SKL_DISP_PW_DDI_D:
818 	case GLK_DISP_PW_AUX_A: /* CNL_DISP_PW_AUX_A */
819 	case GLK_DISP_PW_AUX_B: /* CNL_DISP_PW_AUX_B */
820 	case GLK_DISP_PW_AUX_C: /* CNL_DISP_PW_AUX_C */
821 	case CNL_DISP_PW_AUX_D:
822 		break;
823 	default:
824 		WARN(1, "Unknown power well %lu\n", power_well->id);
825 		return;
826 	}
827 
828 	req_mask = SKL_POWER_WELL_REQ(power_well->id);
829 	enable_requested = tmp & req_mask;
830 	state_mask = SKL_POWER_WELL_STATE(power_well->id);
831 	is_enabled = tmp & state_mask;
832 
833 	if (!enable && enable_requested)
834 		skl_power_well_pre_disable(dev_priv, power_well);
835 
836 	if (enable) {
837 		if (!enable_requested) {
838 			WARN((tmp & state_mask) &&
839 				!I915_READ(HSW_PWR_WELL_BIOS),
840 				"Invalid for power well status to be enabled, unless done by the BIOS, \
841 				when request is to disable!\n");
842 			I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
843 		}
844 
845 		if (!is_enabled) {
846 			DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
847 			check_fuse_status = true;
848 		}
849 	} else {
850 		if (enable_requested) {
851 			I915_WRITE(HSW_PWR_WELL_DRIVER,	tmp & ~req_mask);
852 			POSTING_READ(HSW_PWR_WELL_DRIVER);
853 			DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
854 		}
855 
856 		gen9_sanitize_power_well_requests(dev_priv, power_well);
857 	}
858 
859 	if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
860 		     1))
861 		DRM_ERROR("%s %s timeout\n",
862 			  power_well->name, enable ? "enable" : "disable");
863 
864 	if (check_fuse_status) {
865 		if (power_well->id == SKL_DISP_PW_1) {
866 			if (intel_wait_for_register(dev_priv,
867 						    SKL_FUSE_STATUS,
868 						    SKL_FUSE_PG1_DIST_STATUS,
869 						    SKL_FUSE_PG1_DIST_STATUS,
870 						    1))
871 				DRM_ERROR("PG1 distributing status timeout\n");
872 		} else if (power_well->id == SKL_DISP_PW_2) {
873 			if (intel_wait_for_register(dev_priv,
874 						    SKL_FUSE_STATUS,
875 						    SKL_FUSE_PG2_DIST_STATUS,
876 						    SKL_FUSE_PG2_DIST_STATUS,
877 						    1))
878 				DRM_ERROR("PG2 distributing status timeout\n");
879 		}
880 	}
881 
882 	if (enable && !is_enabled)
883 		skl_power_well_post_enable(dev_priv, power_well);
884 }
885 
886 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
887 				   struct i915_power_well *power_well)
888 {
889 	/* Take over the request bit if set by BIOS. */
890 	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) {
891 		if (!(I915_READ(HSW_PWR_WELL_DRIVER) &
892 		      HSW_PWR_WELL_ENABLE_REQUEST))
893 			I915_WRITE(HSW_PWR_WELL_DRIVER,
894 				   HSW_PWR_WELL_ENABLE_REQUEST);
895 		I915_WRITE(HSW_PWR_WELL_BIOS, 0);
896 	}
897 }
898 
899 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
900 				  struct i915_power_well *power_well)
901 {
902 	hsw_set_power_well(dev_priv, power_well, true);
903 }
904 
905 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
906 				   struct i915_power_well *power_well)
907 {
908 	hsw_set_power_well(dev_priv, power_well, false);
909 }
910 
911 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
912 					struct i915_power_well *power_well)
913 {
914 	uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
915 		SKL_POWER_WELL_STATE(power_well->id);
916 
917 	return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
918 }
919 
920 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
921 				struct i915_power_well *power_well)
922 {
923 	uint32_t mask = SKL_POWER_WELL_REQ(power_well->id);
924 	uint32_t bios_req = I915_READ(HSW_PWR_WELL_BIOS);
925 
926 	/* Take over the request bit if set by BIOS. */
927 	if (bios_req & mask) {
928 		uint32_t drv_req = I915_READ(HSW_PWR_WELL_DRIVER);
929 
930 		if (!(drv_req & mask))
931 			I915_WRITE(HSW_PWR_WELL_DRIVER, drv_req | mask);
932 		I915_WRITE(HSW_PWR_WELL_BIOS, bios_req & ~mask);
933 	}
934 }
935 
936 static void skl_power_well_enable(struct drm_i915_private *dev_priv,
937 				struct i915_power_well *power_well)
938 {
939 	skl_set_power_well(dev_priv, power_well, true);
940 }
941 
942 static void skl_power_well_disable(struct drm_i915_private *dev_priv,
943 				struct i915_power_well *power_well)
944 {
945 	skl_set_power_well(dev_priv, power_well, false);
946 }
947 
948 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
949 					   struct i915_power_well *power_well)
950 {
951 	bxt_ddi_phy_init(dev_priv, power_well->data);
952 }
953 
954 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
955 					    struct i915_power_well *power_well)
956 {
957 	bxt_ddi_phy_uninit(dev_priv, power_well->data);
958 }
959 
960 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
961 					    struct i915_power_well *power_well)
962 {
963 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
964 }
965 
966 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
967 {
968 	struct i915_power_well *power_well;
969 
970 	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
971 	if (power_well->count > 0)
972 		bxt_ddi_phy_verify_state(dev_priv, power_well->data);
973 
974 	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
975 	if (power_well->count > 0)
976 		bxt_ddi_phy_verify_state(dev_priv, power_well->data);
977 
978 	if (IS_GEMINILAKE(dev_priv)) {
979 		power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
980 		if (power_well->count > 0)
981 			bxt_ddi_phy_verify_state(dev_priv, power_well->data);
982 	}
983 }
984 
985 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
986 					   struct i915_power_well *power_well)
987 {
988 	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
989 }
990 
991 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
992 {
993 	u32 tmp = I915_READ(DBUF_CTL);
994 
995 	WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
996 	     (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
997 	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
998 }
999 
1000 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1001 					  struct i915_power_well *power_well)
1002 {
1003 	struct intel_cdclk_state cdclk_state = {};
1004 
1005 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1006 
1007 	dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
1008 	WARN_ON(!intel_cdclk_state_compare(&dev_priv->cdclk.hw, &cdclk_state));
1009 
1010 	gen9_assert_dbuf_enabled(dev_priv);
1011 
1012 	if (IS_GEN9_LP(dev_priv))
1013 		bxt_verify_ddi_phy_power_wells(dev_priv);
1014 }
1015 
1016 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1017 					   struct i915_power_well *power_well)
1018 {
1019 	if (!dev_priv->csr.dmc_payload)
1020 		return;
1021 
1022 	if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
1023 		skl_enable_dc6(dev_priv);
1024 	else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
1025 		gen9_enable_dc5(dev_priv);
1026 }
1027 
1028 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1029 					 struct i915_power_well *power_well)
1030 {
1031 }
1032 
1033 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1034 					   struct i915_power_well *power_well)
1035 {
1036 }
1037 
1038 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1039 					     struct i915_power_well *power_well)
1040 {
1041 	return true;
1042 }
1043 
1044 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1045 					 struct i915_power_well *power_well)
1046 {
1047 	if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1048 		i830_enable_pipe(dev_priv, PIPE_A);
1049 	if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1050 		i830_enable_pipe(dev_priv, PIPE_B);
1051 }
1052 
1053 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1054 					  struct i915_power_well *power_well)
1055 {
1056 	i830_disable_pipe(dev_priv, PIPE_B);
1057 	i830_disable_pipe(dev_priv, PIPE_A);
1058 }
1059 
1060 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1061 					  struct i915_power_well *power_well)
1062 {
1063 	return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1064 		I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1065 }
1066 
1067 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1068 					  struct i915_power_well *power_well)
1069 {
1070 	if (power_well->count > 0)
1071 		i830_pipes_power_well_enable(dev_priv, power_well);
1072 	else
1073 		i830_pipes_power_well_disable(dev_priv, power_well);
1074 }
1075 
1076 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1077 			       struct i915_power_well *power_well, bool enable)
1078 {
1079 	enum punit_power_well power_well_id = power_well->id;
1080 	u32 mask;
1081 	u32 state;
1082 	u32 ctrl;
1083 
1084 	mask = PUNIT_PWRGT_MASK(power_well_id);
1085 	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
1086 			 PUNIT_PWRGT_PWR_GATE(power_well_id);
1087 
1088 	mutex_lock(&dev_priv->rps.hw_lock);
1089 
1090 #define COND \
1091 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1092 
1093 	if (COND)
1094 		goto out;
1095 
1096 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1097 	ctrl &= ~mask;
1098 	ctrl |= state;
1099 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1100 
1101 	if (wait_for(COND, 100))
1102 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1103 			  state,
1104 			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1105 
1106 #undef COND
1107 
1108 out:
1109 	mutex_unlock(&dev_priv->rps.hw_lock);
1110 }
1111 
1112 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1113 				  struct i915_power_well *power_well)
1114 {
1115 	vlv_set_power_well(dev_priv, power_well, true);
1116 }
1117 
1118 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1119 				   struct i915_power_well *power_well)
1120 {
1121 	vlv_set_power_well(dev_priv, power_well, false);
1122 }
1123 
1124 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1125 				   struct i915_power_well *power_well)
1126 {
1127 	int power_well_id = power_well->id;
1128 	bool enabled = false;
1129 	u32 mask;
1130 	u32 state;
1131 	u32 ctrl;
1132 
1133 	mask = PUNIT_PWRGT_MASK(power_well_id);
1134 	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
1135 
1136 	mutex_lock(&dev_priv->rps.hw_lock);
1137 
1138 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1139 	/*
1140 	 * We only ever set the power-on and power-gate states, anything
1141 	 * else is unexpected.
1142 	 */
1143 	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
1144 		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
1145 	if (state == ctrl)
1146 		enabled = true;
1147 
1148 	/*
1149 	 * A transient state at this point would mean some unexpected party
1150 	 * is poking at the power controls too.
1151 	 */
1152 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1153 	WARN_ON(ctrl != state);
1154 
1155 	mutex_unlock(&dev_priv->rps.hw_lock);
1156 
1157 	return enabled;
1158 }
1159 
1160 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1161 {
1162 	u32 val;
1163 
1164 	/*
1165 	 * On driver load, a pipe may be active and driving a DSI display.
1166 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1167 	 * (and never recovering) in this case. intel_dsi_post_disable() will
1168 	 * clear it when we turn off the display.
1169 	 */
1170 	val = I915_READ(DSPCLK_GATE_D);
1171 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1172 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1173 	I915_WRITE(DSPCLK_GATE_D, val);
1174 
1175 	/*
1176 	 * Disable trickle feed and enable pnd deadline calculation
1177 	 */
1178 	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1179 	I915_WRITE(CBR1_VLV, 0);
1180 
1181 	WARN_ON(dev_priv->rawclk_freq == 0);
1182 
1183 	I915_WRITE(RAWCLK_FREQ_VLV,
1184 		   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1185 }
1186 
1187 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1188 {
1189 	struct intel_encoder *encoder;
1190 	enum pipe pipe;
1191 
1192 	/*
1193 	 * Enable the CRI clock source so we can get at the
1194 	 * display and the reference clock for VGA
1195 	 * hotplug / manual detection. Supposedly DSI also
1196 	 * needs the ref clock up and running.
1197 	 *
1198 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1199 	 */
1200 	for_each_pipe(dev_priv, pipe) {
1201 		u32 val = I915_READ(DPLL(pipe));
1202 
1203 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1204 		if (pipe != PIPE_A)
1205 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1206 
1207 		I915_WRITE(DPLL(pipe), val);
1208 	}
1209 
1210 	vlv_init_display_clock_gating(dev_priv);
1211 
1212 	spin_lock_irq(&dev_priv->irq_lock);
1213 	valleyview_enable_display_irqs(dev_priv);
1214 	spin_unlock_irq(&dev_priv->irq_lock);
1215 
1216 	/*
1217 	 * During driver initialization/resume we can avoid restoring the
1218 	 * part of the HW/SW state that will be inited anyway explicitly.
1219 	 */
1220 	if (dev_priv->power_domains.initializing)
1221 		return;
1222 
1223 	intel_hpd_init(dev_priv);
1224 
1225 	/* Re-enable the ADPA, if we have one */
1226 	for_each_intel_encoder(&dev_priv->drm, encoder) {
1227 		if (encoder->type == INTEL_OUTPUT_ANALOG)
1228 			intel_crt_reset(&encoder->base);
1229 	}
1230 
1231 	i915_redisable_vga_power_on(dev_priv);
1232 
1233 	intel_pps_unlock_regs_wa(dev_priv);
1234 }
1235 
1236 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1237 {
1238 	spin_lock_irq(&dev_priv->irq_lock);
1239 	valleyview_disable_display_irqs(dev_priv);
1240 	spin_unlock_irq(&dev_priv->irq_lock);
1241 
1242 	/* make sure we're done processing display irqs */
1243 	synchronize_irq(dev_priv->drm.irq);
1244 
1245 	intel_power_sequencer_reset(dev_priv);
1246 
1247 	/* Prevent us from re-enabling polling on accident in late suspend */
1248 	if (!dev_priv->drm.dev->power.is_suspended)
1249 		intel_hpd_poll_init(dev_priv);
1250 }
1251 
1252 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1253 					  struct i915_power_well *power_well)
1254 {
1255 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1256 
1257 	vlv_set_power_well(dev_priv, power_well, true);
1258 
1259 	vlv_display_power_well_init(dev_priv);
1260 }
1261 
1262 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1263 					   struct i915_power_well *power_well)
1264 {
1265 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1266 
1267 	vlv_display_power_well_deinit(dev_priv);
1268 
1269 	vlv_set_power_well(dev_priv, power_well, false);
1270 }
1271 
1272 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1273 					   struct i915_power_well *power_well)
1274 {
1275 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1276 
1277 	/* since ref/cri clock was enabled */
1278 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1279 
1280 	vlv_set_power_well(dev_priv, power_well, true);
1281 
1282 	/*
1283 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1284 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1285 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1286 	 *   b.	The other bits such as sfr settings / modesel may all
1287 	 *	be set to 0.
1288 	 *
1289 	 * This should only be done on init and resume from S3 with
1290 	 * both PLLs disabled, or we risk losing DPIO and PLL
1291 	 * synchronization.
1292 	 */
1293 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1294 }
1295 
1296 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1297 					    struct i915_power_well *power_well)
1298 {
1299 	enum pipe pipe;
1300 
1301 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1302 
1303 	for_each_pipe(dev_priv, pipe)
1304 		assert_pll_disabled(dev_priv, pipe);
1305 
1306 	/* Assert common reset */
1307 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1308 
1309 	vlv_set_power_well(dev_priv, power_well, false);
1310 }
1311 
1312 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1313 
1314 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1315 						 int power_well_id)
1316 {
1317 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1318 	int i;
1319 
1320 	for (i = 0; i < power_domains->power_well_count; i++) {
1321 		struct i915_power_well *power_well;
1322 
1323 		power_well = &power_domains->power_wells[i];
1324 		if (power_well->id == power_well_id)
1325 			return power_well;
1326 	}
1327 
1328 	return NULL;
1329 }
1330 
1331 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1332 
1333 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1334 {
1335 	struct i915_power_well *cmn_bc =
1336 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1337 	struct i915_power_well *cmn_d =
1338 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1339 	u32 phy_control = dev_priv->chv_phy_control;
1340 	u32 phy_status = 0;
1341 	u32 phy_status_mask = 0xffffffff;
1342 
1343 	/*
1344 	 * The BIOS can leave the PHY is some weird state
1345 	 * where it doesn't fully power down some parts.
1346 	 * Disable the asserts until the PHY has been fully
1347 	 * reset (ie. the power well has been disabled at
1348 	 * least once).
1349 	 */
1350 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1351 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1352 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1353 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1354 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1355 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1356 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1357 
1358 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1359 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1360 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1361 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1362 
1363 	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1364 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1365 
1366 		/* this assumes override is only used to enable lanes */
1367 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1368 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1369 
1370 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1371 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1372 
1373 		/* CL1 is on whenever anything is on in either channel */
1374 		if (BITS_SET(phy_control,
1375 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1376 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1377 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1378 
1379 		/*
1380 		 * The DPLLB check accounts for the pipe B + port A usage
1381 		 * with CL2 powered up but all the lanes in the second channel
1382 		 * powered down.
1383 		 */
1384 		if (BITS_SET(phy_control,
1385 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1386 		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1387 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1388 
1389 		if (BITS_SET(phy_control,
1390 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1391 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1392 		if (BITS_SET(phy_control,
1393 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1394 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1395 
1396 		if (BITS_SET(phy_control,
1397 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1398 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1399 		if (BITS_SET(phy_control,
1400 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1401 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1402 	}
1403 
1404 	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1405 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1406 
1407 		/* this assumes override is only used to enable lanes */
1408 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1409 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1410 
1411 		if (BITS_SET(phy_control,
1412 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1413 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1414 
1415 		if (BITS_SET(phy_control,
1416 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1417 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1418 		if (BITS_SET(phy_control,
1419 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1420 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1421 	}
1422 
1423 	phy_status &= phy_status_mask;
1424 
1425 	/*
1426 	 * The PHY may be busy with some initial calibration and whatnot,
1427 	 * so the power state can take a while to actually change.
1428 	 */
1429 	if (intel_wait_for_register(dev_priv,
1430 				    DISPLAY_PHY_STATUS,
1431 				    phy_status_mask,
1432 				    phy_status,
1433 				    10))
1434 		DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1435 			  I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1436 			   phy_status, dev_priv->chv_phy_control);
1437 }
1438 
1439 #undef BITS_SET
1440 
1441 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1442 					   struct i915_power_well *power_well)
1443 {
1444 	enum dpio_phy phy;
1445 	enum pipe pipe;
1446 	uint32_t tmp;
1447 
1448 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1449 		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1450 
1451 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1452 		pipe = PIPE_A;
1453 		phy = DPIO_PHY0;
1454 	} else {
1455 		pipe = PIPE_C;
1456 		phy = DPIO_PHY1;
1457 	}
1458 
1459 	/* since ref/cri clock was enabled */
1460 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1461 	vlv_set_power_well(dev_priv, power_well, true);
1462 
1463 	/* Poll for phypwrgood signal */
1464 	if (intel_wait_for_register(dev_priv,
1465 				    DISPLAY_PHY_STATUS,
1466 				    PHY_POWERGOOD(phy),
1467 				    PHY_POWERGOOD(phy),
1468 				    1))
1469 		DRM_ERROR("Display PHY %d is not power up\n", phy);
1470 
1471 	mutex_lock(&dev_priv->sb_lock);
1472 
1473 	/* Enable dynamic power down */
1474 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1475 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1476 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1477 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1478 
1479 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1480 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1481 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1482 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1483 	} else {
1484 		/*
1485 		 * Force the non-existing CL2 off. BXT does this
1486 		 * too, so maybe it saves some power even though
1487 		 * CL2 doesn't exist?
1488 		 */
1489 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1490 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1491 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1492 	}
1493 
1494 	mutex_unlock(&dev_priv->sb_lock);
1495 
1496 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1497 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1498 
1499 	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1500 		      phy, dev_priv->chv_phy_control);
1501 
1502 	assert_chv_phy_status(dev_priv);
1503 }
1504 
1505 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1506 					    struct i915_power_well *power_well)
1507 {
1508 	enum dpio_phy phy;
1509 
1510 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1511 		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1512 
1513 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1514 		phy = DPIO_PHY0;
1515 		assert_pll_disabled(dev_priv, PIPE_A);
1516 		assert_pll_disabled(dev_priv, PIPE_B);
1517 	} else {
1518 		phy = DPIO_PHY1;
1519 		assert_pll_disabled(dev_priv, PIPE_C);
1520 	}
1521 
1522 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1523 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1524 
1525 	vlv_set_power_well(dev_priv, power_well, false);
1526 
1527 	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1528 		      phy, dev_priv->chv_phy_control);
1529 
1530 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1531 	dev_priv->chv_phy_assert[phy] = true;
1532 
1533 	assert_chv_phy_status(dev_priv);
1534 }
1535 
1536 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1537 				     enum dpio_channel ch, bool override, unsigned int mask)
1538 {
1539 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1540 	u32 reg, val, expected, actual;
1541 
1542 	/*
1543 	 * The BIOS can leave the PHY is some weird state
1544 	 * where it doesn't fully power down some parts.
1545 	 * Disable the asserts until the PHY has been fully
1546 	 * reset (ie. the power well has been disabled at
1547 	 * least once).
1548 	 */
1549 	if (!dev_priv->chv_phy_assert[phy])
1550 		return;
1551 
1552 	if (ch == DPIO_CH0)
1553 		reg = _CHV_CMN_DW0_CH0;
1554 	else
1555 		reg = _CHV_CMN_DW6_CH1;
1556 
1557 	mutex_lock(&dev_priv->sb_lock);
1558 	val = vlv_dpio_read(dev_priv, pipe, reg);
1559 	mutex_unlock(&dev_priv->sb_lock);
1560 
1561 	/*
1562 	 * This assumes !override is only used when the port is disabled.
1563 	 * All lanes should power down even without the override when
1564 	 * the port is disabled.
1565 	 */
1566 	if (!override || mask == 0xf) {
1567 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1568 		/*
1569 		 * If CH1 common lane is not active anymore
1570 		 * (eg. for pipe B DPLL) the entire channel will
1571 		 * shut down, which causes the common lane registers
1572 		 * to read as 0. That means we can't actually check
1573 		 * the lane power down status bits, but as the entire
1574 		 * register reads as 0 it's a good indication that the
1575 		 * channel is indeed entirely powered down.
1576 		 */
1577 		if (ch == DPIO_CH1 && val == 0)
1578 			expected = 0;
1579 	} else if (mask != 0x0) {
1580 		expected = DPIO_ANYDL_POWERDOWN;
1581 	} else {
1582 		expected = 0;
1583 	}
1584 
1585 	if (ch == DPIO_CH0)
1586 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1587 	else
1588 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1589 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1590 
1591 	WARN(actual != expected,
1592 	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1593 	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1594 	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1595 	     reg, val);
1596 }
1597 
1598 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1599 			  enum dpio_channel ch, bool override)
1600 {
1601 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1602 	bool was_override;
1603 
1604 	mutex_lock(&power_domains->lock);
1605 
1606 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1607 
1608 	if (override == was_override)
1609 		goto out;
1610 
1611 	if (override)
1612 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1613 	else
1614 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1615 
1616 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1617 
1618 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1619 		      phy, ch, dev_priv->chv_phy_control);
1620 
1621 	assert_chv_phy_status(dev_priv);
1622 
1623 out:
1624 	mutex_unlock(&power_domains->lock);
1625 
1626 	return was_override;
1627 }
1628 
1629 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1630 			     bool override, unsigned int mask)
1631 {
1632 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1633 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1634 	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1635 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1636 
1637 	mutex_lock(&power_domains->lock);
1638 
1639 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1640 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1641 
1642 	if (override)
1643 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1644 	else
1645 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1646 
1647 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1648 
1649 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1650 		      phy, ch, mask, dev_priv->chv_phy_control);
1651 
1652 	assert_chv_phy_status(dev_priv);
1653 
1654 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1655 
1656 	mutex_unlock(&power_domains->lock);
1657 }
1658 
1659 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1660 					struct i915_power_well *power_well)
1661 {
1662 	enum pipe pipe = power_well->id;
1663 	bool enabled;
1664 	u32 state, ctrl;
1665 
1666 	mutex_lock(&dev_priv->rps.hw_lock);
1667 
1668 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1669 	/*
1670 	 * We only ever set the power-on and power-gate states, anything
1671 	 * else is unexpected.
1672 	 */
1673 	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1674 	enabled = state == DP_SSS_PWR_ON(pipe);
1675 
1676 	/*
1677 	 * A transient state at this point would mean some unexpected party
1678 	 * is poking at the power controls too.
1679 	 */
1680 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1681 	WARN_ON(ctrl << 16 != state);
1682 
1683 	mutex_unlock(&dev_priv->rps.hw_lock);
1684 
1685 	return enabled;
1686 }
1687 
1688 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1689 				    struct i915_power_well *power_well,
1690 				    bool enable)
1691 {
1692 	enum pipe pipe = power_well->id;
1693 	u32 state;
1694 	u32 ctrl;
1695 
1696 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1697 
1698 	mutex_lock(&dev_priv->rps.hw_lock);
1699 
1700 #define COND \
1701 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1702 
1703 	if (COND)
1704 		goto out;
1705 
1706 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1707 	ctrl &= ~DP_SSC_MASK(pipe);
1708 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1709 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1710 
1711 	if (wait_for(COND, 100))
1712 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1713 			  state,
1714 			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1715 
1716 #undef COND
1717 
1718 out:
1719 	mutex_unlock(&dev_priv->rps.hw_lock);
1720 }
1721 
1722 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1723 				       struct i915_power_well *power_well)
1724 {
1725 	WARN_ON_ONCE(power_well->id != PIPE_A);
1726 
1727 	chv_set_pipe_power_well(dev_priv, power_well, true);
1728 
1729 	vlv_display_power_well_init(dev_priv);
1730 }
1731 
1732 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1733 					struct i915_power_well *power_well)
1734 {
1735 	WARN_ON_ONCE(power_well->id != PIPE_A);
1736 
1737 	vlv_display_power_well_deinit(dev_priv);
1738 
1739 	chv_set_pipe_power_well(dev_priv, power_well, false);
1740 }
1741 
1742 static void
1743 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1744 				 enum intel_display_power_domain domain)
1745 {
1746 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1747 	struct i915_power_well *power_well;
1748 
1749 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1750 		intel_power_well_get(dev_priv, power_well);
1751 
1752 	power_domains->domain_use_count[domain]++;
1753 }
1754 
1755 /**
1756  * intel_display_power_get - grab a power domain reference
1757  * @dev_priv: i915 device instance
1758  * @domain: power domain to reference
1759  *
1760  * This function grabs a power domain reference for @domain and ensures that the
1761  * power domain and all its parents are powered up. Therefore users should only
1762  * grab a reference to the innermost power domain they need.
1763  *
1764  * Any power domain reference obtained by this function must have a symmetric
1765  * call to intel_display_power_put() to release the reference again.
1766  */
1767 void intel_display_power_get(struct drm_i915_private *dev_priv,
1768 			     enum intel_display_power_domain domain)
1769 {
1770 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1771 
1772 	intel_runtime_pm_get(dev_priv);
1773 
1774 	mutex_lock(&power_domains->lock);
1775 
1776 	__intel_display_power_get_domain(dev_priv, domain);
1777 
1778 	mutex_unlock(&power_domains->lock);
1779 }
1780 
1781 /**
1782  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1783  * @dev_priv: i915 device instance
1784  * @domain: power domain to reference
1785  *
1786  * This function grabs a power domain reference for @domain and ensures that the
1787  * power domain and all its parents are powered up. Therefore users should only
1788  * grab a reference to the innermost power domain they need.
1789  *
1790  * Any power domain reference obtained by this function must have a symmetric
1791  * call to intel_display_power_put() to release the reference again.
1792  */
1793 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1794 					enum intel_display_power_domain domain)
1795 {
1796 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1797 	bool is_enabled;
1798 
1799 	if (!intel_runtime_pm_get_if_in_use(dev_priv))
1800 		return false;
1801 
1802 	mutex_lock(&power_domains->lock);
1803 
1804 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
1805 		__intel_display_power_get_domain(dev_priv, domain);
1806 		is_enabled = true;
1807 	} else {
1808 		is_enabled = false;
1809 	}
1810 
1811 	mutex_unlock(&power_domains->lock);
1812 
1813 	if (!is_enabled)
1814 		intel_runtime_pm_put(dev_priv);
1815 
1816 	return is_enabled;
1817 }
1818 
1819 /**
1820  * intel_display_power_put - release a power domain reference
1821  * @dev_priv: i915 device instance
1822  * @domain: power domain to reference
1823  *
1824  * This function drops the power domain reference obtained by
1825  * intel_display_power_get() and might power down the corresponding hardware
1826  * block right away if this is the last reference.
1827  */
1828 void intel_display_power_put(struct drm_i915_private *dev_priv,
1829 			     enum intel_display_power_domain domain)
1830 {
1831 	struct i915_power_domains *power_domains;
1832 	struct i915_power_well *power_well;
1833 
1834 	power_domains = &dev_priv->power_domains;
1835 
1836 	mutex_lock(&power_domains->lock);
1837 
1838 	WARN(!power_domains->domain_use_count[domain],
1839 	     "Use count on domain %s is already zero\n",
1840 	     intel_display_power_domain_str(domain));
1841 	power_domains->domain_use_count[domain]--;
1842 
1843 	for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
1844 		intel_power_well_put(dev_priv, power_well);
1845 
1846 	mutex_unlock(&power_domains->lock);
1847 
1848 	intel_runtime_pm_put(dev_priv);
1849 }
1850 
1851 #define HSW_DISPLAY_POWER_DOMAINS (			\
1852 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1853 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1854 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
1855 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1856 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1857 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1858 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1859 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1860 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1861 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1862 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1863 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1864 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1865 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1866 	BIT_ULL(POWER_DOMAIN_INIT))
1867 
1868 #define BDW_DISPLAY_POWER_DOMAINS (			\
1869 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1870 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1871 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1872 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1873 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1874 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1875 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1876 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1877 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1878 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1879 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1880 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1881 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1882 	BIT_ULL(POWER_DOMAIN_INIT))
1883 
1884 #define VLV_DISPLAY_POWER_DOMAINS (		\
1885 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1886 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1887 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1888 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1889 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1890 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1891 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1892 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1893 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1894 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1895 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1896 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1897 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1898 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1899 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1900 	BIT_ULL(POWER_DOMAIN_INIT))
1901 
1902 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1903 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1904 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1905 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1906 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1907 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1908 	BIT_ULL(POWER_DOMAIN_INIT))
1909 
1910 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
1911 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1912 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1913 	BIT_ULL(POWER_DOMAIN_INIT))
1914 
1915 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1916 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1917 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1918 	BIT_ULL(POWER_DOMAIN_INIT))
1919 
1920 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
1921 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1922 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1923 	BIT_ULL(POWER_DOMAIN_INIT))
1924 
1925 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
1926 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1927 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1928 	BIT_ULL(POWER_DOMAIN_INIT))
1929 
1930 #define CHV_DISPLAY_POWER_DOMAINS (		\
1931 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1932 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1933 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
1934 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1935 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1936 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
1937 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1938 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1939 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
1940 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1941 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1942 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1943 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1944 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1945 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1946 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1947 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1948 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
1949 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1950 	BIT_ULL(POWER_DOMAIN_INIT))
1951 
1952 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
1953 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1954 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1955 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1956 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1957 	BIT_ULL(POWER_DOMAIN_INIT))
1958 
1959 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
1960 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1961 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
1962 	BIT_ULL(POWER_DOMAIN_INIT))
1963 
1964 #define I830_PIPES_POWER_DOMAINS (		\
1965 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1966 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1967 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1968 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1969 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1970 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1971 	BIT_ULL(POWER_DOMAIN_INIT))
1972 
1973 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1974 	.sync_hw = i9xx_power_well_sync_hw_noop,
1975 	.enable = i9xx_always_on_power_well_noop,
1976 	.disable = i9xx_always_on_power_well_noop,
1977 	.is_enabled = i9xx_always_on_power_well_enabled,
1978 };
1979 
1980 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1981 	.sync_hw = i9xx_power_well_sync_hw_noop,
1982 	.enable = chv_pipe_power_well_enable,
1983 	.disable = chv_pipe_power_well_disable,
1984 	.is_enabled = chv_pipe_power_well_enabled,
1985 };
1986 
1987 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1988 	.sync_hw = i9xx_power_well_sync_hw_noop,
1989 	.enable = chv_dpio_cmn_power_well_enable,
1990 	.disable = chv_dpio_cmn_power_well_disable,
1991 	.is_enabled = vlv_power_well_enabled,
1992 };
1993 
1994 static struct i915_power_well i9xx_always_on_power_well[] = {
1995 	{
1996 		.name = "always-on",
1997 		.always_on = 1,
1998 		.domains = POWER_DOMAIN_MASK,
1999 		.ops = &i9xx_always_on_power_well_ops,
2000 	},
2001 };
2002 
2003 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2004 	.sync_hw = i830_pipes_power_well_sync_hw,
2005 	.enable = i830_pipes_power_well_enable,
2006 	.disable = i830_pipes_power_well_disable,
2007 	.is_enabled = i830_pipes_power_well_enabled,
2008 };
2009 
2010 static struct i915_power_well i830_power_wells[] = {
2011 	{
2012 		.name = "always-on",
2013 		.always_on = 1,
2014 		.domains = POWER_DOMAIN_MASK,
2015 		.ops = &i9xx_always_on_power_well_ops,
2016 	},
2017 	{
2018 		.name = "pipes",
2019 		.domains = I830_PIPES_POWER_DOMAINS,
2020 		.ops = &i830_pipes_power_well_ops,
2021 	},
2022 };
2023 
2024 static const struct i915_power_well_ops hsw_power_well_ops = {
2025 	.sync_hw = hsw_power_well_sync_hw,
2026 	.enable = hsw_power_well_enable,
2027 	.disable = hsw_power_well_disable,
2028 	.is_enabled = hsw_power_well_enabled,
2029 };
2030 
2031 static const struct i915_power_well_ops skl_power_well_ops = {
2032 	.sync_hw = skl_power_well_sync_hw,
2033 	.enable = skl_power_well_enable,
2034 	.disable = skl_power_well_disable,
2035 	.is_enabled = skl_power_well_enabled,
2036 };
2037 
2038 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2039 	.sync_hw = i9xx_power_well_sync_hw_noop,
2040 	.enable = gen9_dc_off_power_well_enable,
2041 	.disable = gen9_dc_off_power_well_disable,
2042 	.is_enabled = gen9_dc_off_power_well_enabled,
2043 };
2044 
2045 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2046 	.sync_hw = i9xx_power_well_sync_hw_noop,
2047 	.enable = bxt_dpio_cmn_power_well_enable,
2048 	.disable = bxt_dpio_cmn_power_well_disable,
2049 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
2050 };
2051 
2052 static struct i915_power_well hsw_power_wells[] = {
2053 	{
2054 		.name = "always-on",
2055 		.always_on = 1,
2056 		.domains = POWER_DOMAIN_MASK,
2057 		.ops = &i9xx_always_on_power_well_ops,
2058 	},
2059 	{
2060 		.name = "display",
2061 		.domains = HSW_DISPLAY_POWER_DOMAINS,
2062 		.ops = &hsw_power_well_ops,
2063 	},
2064 };
2065 
2066 static struct i915_power_well bdw_power_wells[] = {
2067 	{
2068 		.name = "always-on",
2069 		.always_on = 1,
2070 		.domains = POWER_DOMAIN_MASK,
2071 		.ops = &i9xx_always_on_power_well_ops,
2072 	},
2073 	{
2074 		.name = "display",
2075 		.domains = BDW_DISPLAY_POWER_DOMAINS,
2076 		.ops = &hsw_power_well_ops,
2077 	},
2078 };
2079 
2080 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2081 	.sync_hw = i9xx_power_well_sync_hw_noop,
2082 	.enable = vlv_display_power_well_enable,
2083 	.disable = vlv_display_power_well_disable,
2084 	.is_enabled = vlv_power_well_enabled,
2085 };
2086 
2087 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2088 	.sync_hw = i9xx_power_well_sync_hw_noop,
2089 	.enable = vlv_dpio_cmn_power_well_enable,
2090 	.disable = vlv_dpio_cmn_power_well_disable,
2091 	.is_enabled = vlv_power_well_enabled,
2092 };
2093 
2094 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2095 	.sync_hw = i9xx_power_well_sync_hw_noop,
2096 	.enable = vlv_power_well_enable,
2097 	.disable = vlv_power_well_disable,
2098 	.is_enabled = vlv_power_well_enabled,
2099 };
2100 
2101 static struct i915_power_well vlv_power_wells[] = {
2102 	{
2103 		.name = "always-on",
2104 		.always_on = 1,
2105 		.domains = POWER_DOMAIN_MASK,
2106 		.ops = &i9xx_always_on_power_well_ops,
2107 		.id = PUNIT_POWER_WELL_ALWAYS_ON,
2108 	},
2109 	{
2110 		.name = "display",
2111 		.domains = VLV_DISPLAY_POWER_DOMAINS,
2112 		.id = PUNIT_POWER_WELL_DISP2D,
2113 		.ops = &vlv_display_power_well_ops,
2114 	},
2115 	{
2116 		.name = "dpio-tx-b-01",
2117 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2118 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2119 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2120 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2121 		.ops = &vlv_dpio_power_well_ops,
2122 		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
2123 	},
2124 	{
2125 		.name = "dpio-tx-b-23",
2126 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2127 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2128 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2129 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2130 		.ops = &vlv_dpio_power_well_ops,
2131 		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
2132 	},
2133 	{
2134 		.name = "dpio-tx-c-01",
2135 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2136 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2137 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2138 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2139 		.ops = &vlv_dpio_power_well_ops,
2140 		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
2141 	},
2142 	{
2143 		.name = "dpio-tx-c-23",
2144 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2145 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2146 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2147 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2148 		.ops = &vlv_dpio_power_well_ops,
2149 		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
2150 	},
2151 	{
2152 		.name = "dpio-common",
2153 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2154 		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2155 		.ops = &vlv_dpio_cmn_power_well_ops,
2156 	},
2157 };
2158 
2159 static struct i915_power_well chv_power_wells[] = {
2160 	{
2161 		.name = "always-on",
2162 		.always_on = 1,
2163 		.domains = POWER_DOMAIN_MASK,
2164 		.ops = &i9xx_always_on_power_well_ops,
2165 	},
2166 	{
2167 		.name = "display",
2168 		/*
2169 		 * Pipe A power well is the new disp2d well. Pipe B and C
2170 		 * power wells don't actually exist. Pipe A power well is
2171 		 * required for any pipe to work.
2172 		 */
2173 		.domains = CHV_DISPLAY_POWER_DOMAINS,
2174 		.id = PIPE_A,
2175 		.ops = &chv_pipe_power_well_ops,
2176 	},
2177 	{
2178 		.name = "dpio-common-bc",
2179 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2180 		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2181 		.ops = &chv_dpio_cmn_power_well_ops,
2182 	},
2183 	{
2184 		.name = "dpio-common-d",
2185 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2186 		.id = PUNIT_POWER_WELL_DPIO_CMN_D,
2187 		.ops = &chv_dpio_cmn_power_well_ops,
2188 	},
2189 };
2190 
2191 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2192 				    int power_well_id)
2193 {
2194 	struct i915_power_well *power_well;
2195 	bool ret;
2196 
2197 	power_well = lookup_power_well(dev_priv, power_well_id);
2198 	ret = power_well->ops->is_enabled(dev_priv, power_well);
2199 
2200 	return ret;
2201 }
2202 
2203 static struct i915_power_well skl_power_wells[] = {
2204 	{
2205 		.name = "always-on",
2206 		.always_on = 1,
2207 		.domains = POWER_DOMAIN_MASK,
2208 		.ops = &i9xx_always_on_power_well_ops,
2209 		.id = SKL_DISP_PW_ALWAYS_ON,
2210 	},
2211 	{
2212 		.name = "power well 1",
2213 		/* Handled by the DMC firmware */
2214 		.domains = 0,
2215 		.ops = &skl_power_well_ops,
2216 		.id = SKL_DISP_PW_1,
2217 	},
2218 	{
2219 		.name = "MISC IO power well",
2220 		/* Handled by the DMC firmware */
2221 		.domains = 0,
2222 		.ops = &skl_power_well_ops,
2223 		.id = SKL_DISP_PW_MISC_IO,
2224 	},
2225 	{
2226 		.name = "DC off",
2227 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2228 		.ops = &gen9_dc_off_power_well_ops,
2229 		.id = SKL_DISP_PW_DC_OFF,
2230 	},
2231 	{
2232 		.name = "power well 2",
2233 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2234 		.ops = &skl_power_well_ops,
2235 		.id = SKL_DISP_PW_2,
2236 	},
2237 	{
2238 		.name = "DDI A/E IO power well",
2239 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2240 		.ops = &skl_power_well_ops,
2241 		.id = SKL_DISP_PW_DDI_A_E,
2242 	},
2243 	{
2244 		.name = "DDI B IO power well",
2245 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2246 		.ops = &skl_power_well_ops,
2247 		.id = SKL_DISP_PW_DDI_B,
2248 	},
2249 	{
2250 		.name = "DDI C IO power well",
2251 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2252 		.ops = &skl_power_well_ops,
2253 		.id = SKL_DISP_PW_DDI_C,
2254 	},
2255 	{
2256 		.name = "DDI D IO power well",
2257 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2258 		.ops = &skl_power_well_ops,
2259 		.id = SKL_DISP_PW_DDI_D,
2260 	},
2261 };
2262 
2263 static struct i915_power_well bxt_power_wells[] = {
2264 	{
2265 		.name = "always-on",
2266 		.always_on = 1,
2267 		.domains = POWER_DOMAIN_MASK,
2268 		.ops = &i9xx_always_on_power_well_ops,
2269 	},
2270 	{
2271 		.name = "power well 1",
2272 		.domains = 0,
2273 		.ops = &skl_power_well_ops,
2274 		.id = SKL_DISP_PW_1,
2275 	},
2276 	{
2277 		.name = "DC off",
2278 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2279 		.ops = &gen9_dc_off_power_well_ops,
2280 		.id = SKL_DISP_PW_DC_OFF,
2281 	},
2282 	{
2283 		.name = "power well 2",
2284 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2285 		.ops = &skl_power_well_ops,
2286 		.id = SKL_DISP_PW_2,
2287 	},
2288 	{
2289 		.name = "dpio-common-a",
2290 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2291 		.ops = &bxt_dpio_cmn_power_well_ops,
2292 		.id = BXT_DPIO_CMN_A,
2293 		.data = DPIO_PHY1,
2294 	},
2295 	{
2296 		.name = "dpio-common-bc",
2297 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2298 		.ops = &bxt_dpio_cmn_power_well_ops,
2299 		.id = BXT_DPIO_CMN_BC,
2300 		.data = DPIO_PHY0,
2301 	},
2302 };
2303 
2304 static struct i915_power_well glk_power_wells[] = {
2305 	{
2306 		.name = "always-on",
2307 		.always_on = 1,
2308 		.domains = POWER_DOMAIN_MASK,
2309 		.ops = &i9xx_always_on_power_well_ops,
2310 	},
2311 	{
2312 		.name = "power well 1",
2313 		/* Handled by the DMC firmware */
2314 		.domains = 0,
2315 		.ops = &skl_power_well_ops,
2316 		.id = SKL_DISP_PW_1,
2317 	},
2318 	{
2319 		.name = "DC off",
2320 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2321 		.ops = &gen9_dc_off_power_well_ops,
2322 		.id = SKL_DISP_PW_DC_OFF,
2323 	},
2324 	{
2325 		.name = "power well 2",
2326 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2327 		.ops = &skl_power_well_ops,
2328 		.id = SKL_DISP_PW_2,
2329 	},
2330 	{
2331 		.name = "dpio-common-a",
2332 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2333 		.ops = &bxt_dpio_cmn_power_well_ops,
2334 		.id = BXT_DPIO_CMN_A,
2335 		.data = DPIO_PHY1,
2336 	},
2337 	{
2338 		.name = "dpio-common-b",
2339 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2340 		.ops = &bxt_dpio_cmn_power_well_ops,
2341 		.id = BXT_DPIO_CMN_BC,
2342 		.data = DPIO_PHY0,
2343 	},
2344 	{
2345 		.name = "dpio-common-c",
2346 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2347 		.ops = &bxt_dpio_cmn_power_well_ops,
2348 		.id = GLK_DPIO_CMN_C,
2349 		.data = DPIO_PHY2,
2350 	},
2351 	{
2352 		.name = "AUX A",
2353 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2354 		.ops = &skl_power_well_ops,
2355 		.id = GLK_DISP_PW_AUX_A,
2356 	},
2357 	{
2358 		.name = "AUX B",
2359 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2360 		.ops = &skl_power_well_ops,
2361 		.id = GLK_DISP_PW_AUX_B,
2362 	},
2363 	{
2364 		.name = "AUX C",
2365 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2366 		.ops = &skl_power_well_ops,
2367 		.id = GLK_DISP_PW_AUX_C,
2368 	},
2369 	{
2370 		.name = "DDI A IO power well",
2371 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
2372 		.ops = &skl_power_well_ops,
2373 		.id = GLK_DISP_PW_DDI_A,
2374 	},
2375 	{
2376 		.name = "DDI B IO power well",
2377 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2378 		.ops = &skl_power_well_ops,
2379 		.id = SKL_DISP_PW_DDI_B,
2380 	},
2381 	{
2382 		.name = "DDI C IO power well",
2383 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2384 		.ops = &skl_power_well_ops,
2385 		.id = SKL_DISP_PW_DDI_C,
2386 	},
2387 };
2388 
2389 static struct i915_power_well cnl_power_wells[] = {
2390 	{
2391 		.name = "always-on",
2392 		.always_on = 1,
2393 		.domains = POWER_DOMAIN_MASK,
2394 		.ops = &i9xx_always_on_power_well_ops,
2395 	},
2396 	{
2397 		.name = "power well 1",
2398 		/* Handled by the DMC firmware */
2399 		.domains = 0,
2400 		.ops = &skl_power_well_ops,
2401 		.id = SKL_DISP_PW_1,
2402 	},
2403 	{
2404 		.name = "AUX A",
2405 		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2406 		.ops = &skl_power_well_ops,
2407 		.id = CNL_DISP_PW_AUX_A,
2408 	},
2409 	{
2410 		.name = "AUX B",
2411 		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2412 		.ops = &skl_power_well_ops,
2413 		.id = CNL_DISP_PW_AUX_B,
2414 	},
2415 	{
2416 		.name = "AUX C",
2417 		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
2418 		.ops = &skl_power_well_ops,
2419 		.id = CNL_DISP_PW_AUX_C,
2420 	},
2421 	{
2422 		.name = "AUX D",
2423 		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
2424 		.ops = &skl_power_well_ops,
2425 		.id = CNL_DISP_PW_AUX_D,
2426 	},
2427 	{
2428 		.name = "DC off",
2429 		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2430 		.ops = &gen9_dc_off_power_well_ops,
2431 		.id = SKL_DISP_PW_DC_OFF,
2432 	},
2433 	{
2434 		.name = "power well 2",
2435 		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2436 		.ops = &skl_power_well_ops,
2437 		.id = SKL_DISP_PW_2,
2438 	},
2439 	{
2440 		.name = "DDI A IO power well",
2441 		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
2442 		.ops = &skl_power_well_ops,
2443 		.id = CNL_DISP_PW_DDI_A,
2444 	},
2445 	{
2446 		.name = "DDI B IO power well",
2447 		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
2448 		.ops = &skl_power_well_ops,
2449 		.id = SKL_DISP_PW_DDI_B,
2450 	},
2451 	{
2452 		.name = "DDI C IO power well",
2453 		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
2454 		.ops = &skl_power_well_ops,
2455 		.id = SKL_DISP_PW_DDI_C,
2456 	},
2457 	{
2458 		.name = "DDI D IO power well",
2459 		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
2460 		.ops = &skl_power_well_ops,
2461 		.id = SKL_DISP_PW_DDI_D,
2462 	},
2463 };
2464 
2465 static int
2466 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2467 				   int disable_power_well)
2468 {
2469 	if (disable_power_well >= 0)
2470 		return !!disable_power_well;
2471 
2472 	return 1;
2473 }
2474 
2475 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2476 				    int enable_dc)
2477 {
2478 	uint32_t mask;
2479 	int requested_dc;
2480 	int max_dc;
2481 
2482 	if (IS_GEN9_BC(dev_priv)) {
2483 		max_dc = 2;
2484 		mask = 0;
2485 	} else if (IS_GEN9_LP(dev_priv)) {
2486 		max_dc = 1;
2487 		/*
2488 		 * DC9 has a separate HW flow from the rest of the DC states,
2489 		 * not depending on the DMC firmware. It's needed by system
2490 		 * suspend/resume, so allow it unconditionally.
2491 		 */
2492 		mask = DC_STATE_EN_DC9;
2493 	} else {
2494 		max_dc = 0;
2495 		mask = 0;
2496 	}
2497 
2498 	if (!i915.disable_power_well)
2499 		max_dc = 0;
2500 
2501 	if (enable_dc >= 0 && enable_dc <= max_dc) {
2502 		requested_dc = enable_dc;
2503 	} else if (enable_dc == -1) {
2504 		requested_dc = max_dc;
2505 	} else if (enable_dc > max_dc && enable_dc <= 2) {
2506 		DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
2507 			      enable_dc, max_dc);
2508 		requested_dc = max_dc;
2509 	} else {
2510 		DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
2511 		requested_dc = max_dc;
2512 	}
2513 
2514 	if (requested_dc > 1)
2515 		mask |= DC_STATE_EN_UPTO_DC6;
2516 	if (requested_dc > 0)
2517 		mask |= DC_STATE_EN_UPTO_DC5;
2518 
2519 	DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
2520 
2521 	return mask;
2522 }
2523 
2524 #define set_power_wells(power_domains, __power_wells) ({		\
2525 	(power_domains)->power_wells = (__power_wells);			\
2526 	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
2527 })
2528 
2529 /**
2530  * intel_power_domains_init - initializes the power domain structures
2531  * @dev_priv: i915 device instance
2532  *
2533  * Initializes the power domain structures for @dev_priv depending upon the
2534  * supported platform.
2535  */
2536 int intel_power_domains_init(struct drm_i915_private *dev_priv)
2537 {
2538 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2539 
2540 	i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
2541 						     i915.disable_power_well);
2542 	dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
2543 							    i915.enable_dc);
2544 
2545 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
2546 
2547 	mutex_init(&power_domains->lock);
2548 
2549 	/*
2550 	 * The enabling order will be from lower to higher indexed wells,
2551 	 * the disabling order is reversed.
2552 	 */
2553 	if (IS_HASWELL(dev_priv)) {
2554 		set_power_wells(power_domains, hsw_power_wells);
2555 	} else if (IS_BROADWELL(dev_priv)) {
2556 		set_power_wells(power_domains, bdw_power_wells);
2557 	} else if (IS_GEN9_BC(dev_priv)) {
2558 		set_power_wells(power_domains, skl_power_wells);
2559 	} else if (IS_CANNONLAKE(dev_priv)) {
2560 		set_power_wells(power_domains, cnl_power_wells);
2561 	} else if (IS_BROXTON(dev_priv)) {
2562 		set_power_wells(power_domains, bxt_power_wells);
2563 	} else if (IS_GEMINILAKE(dev_priv)) {
2564 		set_power_wells(power_domains, glk_power_wells);
2565 	} else if (IS_CHERRYVIEW(dev_priv)) {
2566 		set_power_wells(power_domains, chv_power_wells);
2567 	} else if (IS_VALLEYVIEW(dev_priv)) {
2568 		set_power_wells(power_domains, vlv_power_wells);
2569 	} else if (IS_I830(dev_priv)) {
2570 		set_power_wells(power_domains, i830_power_wells);
2571 	} else {
2572 		set_power_wells(power_domains, i9xx_always_on_power_well);
2573 	}
2574 
2575 	return 0;
2576 }
2577 
2578 /**
2579  * intel_power_domains_fini - finalizes the power domain structures
2580  * @dev_priv: i915 device instance
2581  *
2582  * Finalizes the power domain structures for @dev_priv depending upon the
2583  * supported platform. This function also disables runtime pm and ensures that
2584  * the device stays powered up so that the driver can be reloaded.
2585  */
2586 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2587 {
2588 	struct device *kdev = &dev_priv->drm.pdev->dev;
2589 
2590 	/*
2591 	 * The i915.ko module is still not prepared to be loaded when
2592 	 * the power well is not enabled, so just enable it in case
2593 	 * we're going to unload/reload.
2594 	 * The following also reacquires the RPM reference the core passed
2595 	 * to the driver during loading, which is dropped in
2596 	 * intel_runtime_pm_enable(). We have to hand back the control of the
2597 	 * device to the core with this reference held.
2598 	 */
2599 	intel_display_set_init_power(dev_priv, true);
2600 
2601 	/* Remove the refcount we took to keep power well support disabled. */
2602 	if (!i915.disable_power_well)
2603 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2604 
2605 	/*
2606 	 * Remove the refcount we took in intel_runtime_pm_enable() in case
2607 	 * the platform doesn't support runtime PM.
2608 	 */
2609 	if (!HAS_RUNTIME_PM(dev_priv))
2610 		pm_runtime_put(kdev);
2611 }
2612 
2613 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2614 {
2615 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2616 	struct i915_power_well *power_well;
2617 
2618 	mutex_lock(&power_domains->lock);
2619 	for_each_power_well(dev_priv, power_well) {
2620 		power_well->ops->sync_hw(dev_priv, power_well);
2621 		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2622 								     power_well);
2623 	}
2624 	mutex_unlock(&power_domains->lock);
2625 }
2626 
2627 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2628 {
2629 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
2630 	POSTING_READ(DBUF_CTL);
2631 
2632 	udelay(10);
2633 
2634 	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
2635 		DRM_ERROR("DBuf power enable timeout\n");
2636 }
2637 
2638 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2639 {
2640 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
2641 	POSTING_READ(DBUF_CTL);
2642 
2643 	udelay(10);
2644 
2645 	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
2646 		DRM_ERROR("DBuf power disable timeout!\n");
2647 }
2648 
2649 static void skl_display_core_init(struct drm_i915_private *dev_priv,
2650 				   bool resume)
2651 {
2652 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2653 	struct i915_power_well *well;
2654 	uint32_t val;
2655 
2656 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2657 
2658 	/* enable PCH reset handshake */
2659 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
2660 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
2661 
2662 	/* enable PG1 and Misc I/O */
2663 	mutex_lock(&power_domains->lock);
2664 
2665 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2666 	intel_power_well_enable(dev_priv, well);
2667 
2668 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2669 	intel_power_well_enable(dev_priv, well);
2670 
2671 	mutex_unlock(&power_domains->lock);
2672 
2673 	skl_init_cdclk(dev_priv);
2674 
2675 	gen9_dbuf_enable(dev_priv);
2676 
2677 	if (resume && dev_priv->csr.dmc_payload)
2678 		intel_csr_load_program(dev_priv);
2679 }
2680 
2681 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2682 {
2683 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2684 	struct i915_power_well *well;
2685 
2686 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2687 
2688 	gen9_dbuf_disable(dev_priv);
2689 
2690 	skl_uninit_cdclk(dev_priv);
2691 
2692 	/* The spec doesn't call for removing the reset handshake flag */
2693 	/* disable PG1 and Misc I/O */
2694 
2695 	mutex_lock(&power_domains->lock);
2696 
2697 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2698 	intel_power_well_disable(dev_priv, well);
2699 
2700 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2701 	intel_power_well_disable(dev_priv, well);
2702 
2703 	mutex_unlock(&power_domains->lock);
2704 }
2705 
2706 void bxt_display_core_init(struct drm_i915_private *dev_priv,
2707 			   bool resume)
2708 {
2709 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2710 	struct i915_power_well *well;
2711 	uint32_t val;
2712 
2713 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2714 
2715 	/*
2716 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
2717 	 * or else the reset will hang because there is no PCH to respond.
2718 	 * Move the handshake programming to initialization sequence.
2719 	 * Previously was left up to BIOS.
2720 	 */
2721 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
2722 	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
2723 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2724 
2725 	/* Enable PG1 */
2726 	mutex_lock(&power_domains->lock);
2727 
2728 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2729 	intel_power_well_enable(dev_priv, well);
2730 
2731 	mutex_unlock(&power_domains->lock);
2732 
2733 	bxt_init_cdclk(dev_priv);
2734 
2735 	gen9_dbuf_enable(dev_priv);
2736 
2737 	if (resume && dev_priv->csr.dmc_payload)
2738 		intel_csr_load_program(dev_priv);
2739 }
2740 
2741 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2742 {
2743 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2744 	struct i915_power_well *well;
2745 
2746 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2747 
2748 	gen9_dbuf_disable(dev_priv);
2749 
2750 	bxt_uninit_cdclk(dev_priv);
2751 
2752 	/* The spec doesn't call for removing the reset handshake flag */
2753 
2754 	/* Disable PG1 */
2755 	mutex_lock(&power_domains->lock);
2756 
2757 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2758 	intel_power_well_disable(dev_priv, well);
2759 
2760 	mutex_unlock(&power_domains->lock);
2761 }
2762 
2763 #define CNL_PROCMON_IDX(val) \
2764 	(((val) & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) >> VOLTAGE_INFO_SHIFT)
2765 #define NUM_CNL_PROCMON \
2766 	(CNL_PROCMON_IDX(VOLTAGE_INFO_MASK | PROCESS_INFO_MASK) + 1)
2767 
2768 static const struct cnl_procmon {
2769 	u32 dw1, dw9, dw10;
2770 } cnl_procmon_values[NUM_CNL_PROCMON] = {
2771 	[CNL_PROCMON_IDX(VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0)] =
2772 		{ .dw1 = 0x00 << 16, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
2773 	[CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0)] =
2774 		{ .dw1 = 0x00 << 16, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
2775 	[CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1)] =
2776 		{ .dw1 = 0x00 << 16, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
2777 	[CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0)] =
2778 		{ .dw1 = 0x00 << 16, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
2779 	[CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1)] =
2780 		{ .dw1 = 0x44 << 16, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
2781 };
2782 
2783 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
2784 {
2785 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2786 	const struct cnl_procmon *procmon;
2787 	struct i915_power_well *well;
2788 	u32 val;
2789 
2790 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2791 
2792 	/* 1. Enable PCH Reset Handshake */
2793 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
2794 	val |= RESET_PCH_HANDSHAKE_ENABLE;
2795 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2796 
2797 	/* 2. Enable Comp */
2798 	val = I915_READ(CHICKEN_MISC_2);
2799 	val &= ~COMP_PWR_DOWN;
2800 	I915_WRITE(CHICKEN_MISC_2, val);
2801 
2802 	val = I915_READ(CNL_PORT_COMP_DW3);
2803 	procmon = &cnl_procmon_values[CNL_PROCMON_IDX(val)];
2804 
2805 	WARN_ON(procmon->dw10 == 0);
2806 
2807 	val = I915_READ(CNL_PORT_COMP_DW1);
2808 	val &= ~((0xff << 16) | 0xff);
2809 	val |= procmon->dw1;
2810 	I915_WRITE(CNL_PORT_COMP_DW1, val);
2811 
2812 	I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9);
2813 	I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10);
2814 
2815 	val = I915_READ(CNL_PORT_COMP_DW0);
2816 	val |= COMP_INIT;
2817 	I915_WRITE(CNL_PORT_COMP_DW0, val);
2818 
2819 	/* 3. */
2820 	val = I915_READ(CNL_PORT_CL1CM_DW5);
2821 	val |= CL_POWER_DOWN_ENABLE;
2822 	I915_WRITE(CNL_PORT_CL1CM_DW5, val);
2823 
2824 	/* 4. Enable Power Well 1 (PG1) and Aux IO Power */
2825 	mutex_lock(&power_domains->lock);
2826 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2827 	intel_power_well_enable(dev_priv, well);
2828 	mutex_unlock(&power_domains->lock);
2829 
2830 	/* 5. Enable CD clock */
2831 	cnl_init_cdclk(dev_priv);
2832 
2833 	/* 6. Enable DBUF */
2834 	gen9_dbuf_enable(dev_priv);
2835 }
2836 
2837 #undef CNL_PROCMON_IDX
2838 #undef NUM_CNL_PROCMON
2839 
2840 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
2841 {
2842 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2843 	struct i915_power_well *well;
2844 	u32 val;
2845 
2846 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2847 
2848 	/* 1. Disable all display engine functions -> aready done */
2849 
2850 	/* 2. Disable DBUF */
2851 	gen9_dbuf_disable(dev_priv);
2852 
2853 	/* 3. Disable CD clock */
2854 	cnl_uninit_cdclk(dev_priv);
2855 
2856 	/* 4. Disable Power Well 1 (PG1) and Aux IO Power */
2857 	mutex_lock(&power_domains->lock);
2858 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2859 	intel_power_well_disable(dev_priv, well);
2860 	mutex_unlock(&power_domains->lock);
2861 
2862 	/* 5. Disable Comp */
2863 	val = I915_READ(CHICKEN_MISC_2);
2864 	val |= COMP_PWR_DOWN;
2865 	I915_WRITE(CHICKEN_MISC_2, val);
2866 }
2867 
2868 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
2869 {
2870 	struct i915_power_well *cmn_bc =
2871 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2872 	struct i915_power_well *cmn_d =
2873 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
2874 
2875 	/*
2876 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
2877 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
2878 	 * instead maintain a shadow copy ourselves. Use the actual
2879 	 * power well state and lane status to reconstruct the
2880 	 * expected initial value.
2881 	 */
2882 	dev_priv->chv_phy_control =
2883 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
2884 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
2885 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
2886 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
2887 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
2888 
2889 	/*
2890 	 * If all lanes are disabled we leave the override disabled
2891 	 * with all power down bits cleared to match the state we
2892 	 * would use after disabling the port. Otherwise enable the
2893 	 * override and set the lane powerdown bits accding to the
2894 	 * current lane status.
2895 	 */
2896 	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
2897 		uint32_t status = I915_READ(DPLL(PIPE_A));
2898 		unsigned int mask;
2899 
2900 		mask = status & DPLL_PORTB_READY_MASK;
2901 		if (mask == 0xf)
2902 			mask = 0x0;
2903 		else
2904 			dev_priv->chv_phy_control |=
2905 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
2906 
2907 		dev_priv->chv_phy_control |=
2908 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
2909 
2910 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
2911 		if (mask == 0xf)
2912 			mask = 0x0;
2913 		else
2914 			dev_priv->chv_phy_control |=
2915 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
2916 
2917 		dev_priv->chv_phy_control |=
2918 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
2919 
2920 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
2921 
2922 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
2923 	} else {
2924 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
2925 	}
2926 
2927 	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
2928 		uint32_t status = I915_READ(DPIO_PHY_STATUS);
2929 		unsigned int mask;
2930 
2931 		mask = status & DPLL_PORTD_READY_MASK;
2932 
2933 		if (mask == 0xf)
2934 			mask = 0x0;
2935 		else
2936 			dev_priv->chv_phy_control |=
2937 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
2938 
2939 		dev_priv->chv_phy_control |=
2940 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
2941 
2942 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
2943 
2944 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
2945 	} else {
2946 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
2947 	}
2948 
2949 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2950 
2951 	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2952 		      dev_priv->chv_phy_control);
2953 }
2954 
2955 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2956 {
2957 	struct i915_power_well *cmn =
2958 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2959 	struct i915_power_well *disp2d =
2960 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
2961 
2962 	/* If the display might be already active skip this */
2963 	if (cmn->ops->is_enabled(dev_priv, cmn) &&
2964 	    disp2d->ops->is_enabled(dev_priv, disp2d) &&
2965 	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
2966 		return;
2967 
2968 	DRM_DEBUG_KMS("toggling display PHY side reset\n");
2969 
2970 	/* cmnlane needs DPLL registers */
2971 	disp2d->ops->enable(dev_priv, disp2d);
2972 
2973 	/*
2974 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
2975 	 * Need to assert and de-assert PHY SB reset by gating the
2976 	 * common lane power, then un-gating it.
2977 	 * Simply ungating isn't enough to reset the PHY enough to get
2978 	 * ports and lanes running.
2979 	 */
2980 	cmn->ops->disable(dev_priv, cmn);
2981 }
2982 
2983 /**
2984  * intel_power_domains_init_hw - initialize hardware power domain state
2985  * @dev_priv: i915 device instance
2986  * @resume: Called from resume code paths or not
2987  *
2988  * This function initializes the hardware power domain state and enables all
2989  * power wells belonging to the INIT power domain. Power wells in other
2990  * domains (and not in the INIT domain) are referenced or disabled during the
2991  * modeset state HW readout. After that the reference count of each power well
2992  * must match its HW enabled state, see intel_power_domains_verify_state().
2993  */
2994 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2995 {
2996 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2997 
2998 	power_domains->initializing = true;
2999 
3000 	if (IS_CANNONLAKE(dev_priv)) {
3001 		cnl_display_core_init(dev_priv, resume);
3002 	} else if (IS_GEN9_BC(dev_priv)) {
3003 		skl_display_core_init(dev_priv, resume);
3004 	} else if (IS_GEN9_LP(dev_priv)) {
3005 		bxt_display_core_init(dev_priv, resume);
3006 	} else if (IS_CHERRYVIEW(dev_priv)) {
3007 		mutex_lock(&power_domains->lock);
3008 		chv_phy_control_init(dev_priv);
3009 		mutex_unlock(&power_domains->lock);
3010 	} else if (IS_VALLEYVIEW(dev_priv)) {
3011 		mutex_lock(&power_domains->lock);
3012 		vlv_cmnlane_wa(dev_priv);
3013 		mutex_unlock(&power_domains->lock);
3014 	}
3015 
3016 	/* For now, we need the power well to be always enabled. */
3017 	intel_display_set_init_power(dev_priv, true);
3018 	/* Disable power support if the user asked so. */
3019 	if (!i915.disable_power_well)
3020 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
3021 	intel_power_domains_sync_hw(dev_priv);
3022 	power_domains->initializing = false;
3023 }
3024 
3025 /**
3026  * intel_power_domains_suspend - suspend power domain state
3027  * @dev_priv: i915 device instance
3028  *
3029  * This function prepares the hardware power domain state before entering
3030  * system suspend. It must be paired with intel_power_domains_init_hw().
3031  */
3032 void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
3033 {
3034 	/*
3035 	 * Even if power well support was disabled we still want to disable
3036 	 * power wells while we are system suspended.
3037 	 */
3038 	if (!i915.disable_power_well)
3039 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3040 
3041 	if (IS_CANNONLAKE(dev_priv))
3042 		cnl_display_core_uninit(dev_priv);
3043 	else if (IS_GEN9_BC(dev_priv))
3044 		skl_display_core_uninit(dev_priv);
3045 	else if (IS_GEN9_LP(dev_priv))
3046 		bxt_display_core_uninit(dev_priv);
3047 }
3048 
3049 static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
3050 {
3051 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3052 	struct i915_power_well *power_well;
3053 
3054 	for_each_power_well(dev_priv, power_well) {
3055 		enum intel_display_power_domain domain;
3056 
3057 		DRM_DEBUG_DRIVER("%-25s %d\n",
3058 				 power_well->name, power_well->count);
3059 
3060 		for_each_power_domain(domain, power_well->domains)
3061 			DRM_DEBUG_DRIVER("  %-23s %d\n",
3062 					 intel_display_power_domain_str(domain),
3063 					 power_domains->domain_use_count[domain]);
3064 	}
3065 }
3066 
3067 /**
3068  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
3069  * @dev_priv: i915 device instance
3070  *
3071  * Verify if the reference count of each power well matches its HW enabled
3072  * state and the total refcount of the domains it belongs to. This must be
3073  * called after modeset HW state sanitization, which is responsible for
3074  * acquiring reference counts for any power wells in use and disabling the
3075  * ones left on by BIOS but not required by any active output.
3076  */
3077 void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
3078 {
3079 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3080 	struct i915_power_well *power_well;
3081 	bool dump_domain_info;
3082 
3083 	mutex_lock(&power_domains->lock);
3084 
3085 	dump_domain_info = false;
3086 	for_each_power_well(dev_priv, power_well) {
3087 		enum intel_display_power_domain domain;
3088 		int domains_count;
3089 		bool enabled;
3090 
3091 		/*
3092 		 * Power wells not belonging to any domain (like the MISC_IO
3093 		 * and PW1 power wells) are under FW control, so ignore them,
3094 		 * since their state can change asynchronously.
3095 		 */
3096 		if (!power_well->domains)
3097 			continue;
3098 
3099 		enabled = power_well->ops->is_enabled(dev_priv, power_well);
3100 		if ((power_well->count || power_well->always_on) != enabled)
3101 			DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
3102 				  power_well->name, power_well->count, enabled);
3103 
3104 		domains_count = 0;
3105 		for_each_power_domain(domain, power_well->domains)
3106 			domains_count += power_domains->domain_use_count[domain];
3107 
3108 		if (power_well->count != domains_count) {
3109 			DRM_ERROR("power well %s refcount/domain refcount mismatch "
3110 				  "(refcount %d/domains refcount %d)\n",
3111 				  power_well->name, power_well->count,
3112 				  domains_count);
3113 			dump_domain_info = true;
3114 		}
3115 	}
3116 
3117 	if (dump_domain_info) {
3118 		static bool dumped;
3119 
3120 		if (!dumped) {
3121 			intel_power_domains_dump_info(dev_priv);
3122 			dumped = true;
3123 		}
3124 	}
3125 
3126 	mutex_unlock(&power_domains->lock);
3127 }
3128 
3129 /**
3130  * intel_runtime_pm_get - grab a runtime pm reference
3131  * @dev_priv: i915 device instance
3132  *
3133  * This function grabs a device-level runtime pm reference (mostly used for GEM
3134  * code to ensure the GTT or GT is on) and ensures that it is powered up.
3135  *
3136  * Any runtime pm reference obtained by this function must have a symmetric
3137  * call to intel_runtime_pm_put() to release the reference again.
3138  */
3139 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
3140 {
3141 	struct pci_dev *pdev = dev_priv->drm.pdev;
3142 	struct device *kdev = &pdev->dev;
3143 	int ret;
3144 
3145 	ret = pm_runtime_get_sync(kdev);
3146 	WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
3147 
3148 	atomic_inc(&dev_priv->pm.wakeref_count);
3149 	assert_rpm_wakelock_held(dev_priv);
3150 }
3151 
3152 /**
3153  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
3154  * @dev_priv: i915 device instance
3155  *
3156  * This function grabs a device-level runtime pm reference if the device is
3157  * already in use and ensures that it is powered up.
3158  *
3159  * Any runtime pm reference obtained by this function must have a symmetric
3160  * call to intel_runtime_pm_put() to release the reference again.
3161  */
3162 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
3163 {
3164 	struct pci_dev *pdev = dev_priv->drm.pdev;
3165 	struct device *kdev = &pdev->dev;
3166 
3167 	if (IS_ENABLED(CONFIG_PM)) {
3168 		int ret = pm_runtime_get_if_in_use(kdev);
3169 
3170 		/*
3171 		 * In cases runtime PM is disabled by the RPM core and we get
3172 		 * an -EINVAL return value we are not supposed to call this
3173 		 * function, since the power state is undefined. This applies
3174 		 * atm to the late/early system suspend/resume handlers.
3175 		 */
3176 		WARN_ONCE(ret < 0,
3177 			  "pm_runtime_get_if_in_use() failed: %d\n", ret);
3178 		if (ret <= 0)
3179 			return false;
3180 	}
3181 
3182 	atomic_inc(&dev_priv->pm.wakeref_count);
3183 	assert_rpm_wakelock_held(dev_priv);
3184 
3185 	return true;
3186 }
3187 
3188 /**
3189  * intel_runtime_pm_get_noresume - grab a runtime pm reference
3190  * @dev_priv: i915 device instance
3191  *
3192  * This function grabs a device-level runtime pm reference (mostly used for GEM
3193  * code to ensure the GTT or GT is on).
3194  *
3195  * It will _not_ power up the device but instead only check that it's powered
3196  * on.  Therefore it is only valid to call this functions from contexts where
3197  * the device is known to be powered up and where trying to power it up would
3198  * result in hilarity and deadlocks. That pretty much means only the system
3199  * suspend/resume code where this is used to grab runtime pm references for
3200  * delayed setup down in work items.
3201  *
3202  * Any runtime pm reference obtained by this function must have a symmetric
3203  * call to intel_runtime_pm_put() to release the reference again.
3204  */
3205 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
3206 {
3207 	struct pci_dev *pdev = dev_priv->drm.pdev;
3208 	struct device *kdev = &pdev->dev;
3209 
3210 	assert_rpm_wakelock_held(dev_priv);
3211 	pm_runtime_get_noresume(kdev);
3212 
3213 	atomic_inc(&dev_priv->pm.wakeref_count);
3214 }
3215 
3216 /**
3217  * intel_runtime_pm_put - release a runtime pm reference
3218  * @dev_priv: i915 device instance
3219  *
3220  * This function drops the device-level runtime pm reference obtained by
3221  * intel_runtime_pm_get() and might power down the corresponding
3222  * hardware block right away if this is the last reference.
3223  */
3224 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
3225 {
3226 	struct pci_dev *pdev = dev_priv->drm.pdev;
3227 	struct device *kdev = &pdev->dev;
3228 
3229 	assert_rpm_wakelock_held(dev_priv);
3230 	atomic_dec(&dev_priv->pm.wakeref_count);
3231 
3232 	pm_runtime_mark_last_busy(kdev);
3233 	pm_runtime_put_autosuspend(kdev);
3234 }
3235 
3236 /**
3237  * intel_runtime_pm_enable - enable runtime pm
3238  * @dev_priv: i915 device instance
3239  *
3240  * This function enables runtime pm at the end of the driver load sequence.
3241  *
3242  * Note that this function does currently not enable runtime pm for the
3243  * subordinate display power domains. That is only done on the first modeset
3244  * using intel_display_set_init_power().
3245  */
3246 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
3247 {
3248 	struct pci_dev *pdev = dev_priv->drm.pdev;
3249 	struct device *kdev = &pdev->dev;
3250 
3251 	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
3252 	pm_runtime_mark_last_busy(kdev);
3253 
3254 	/*
3255 	 * Take a permanent reference to disable the RPM functionality and drop
3256 	 * it only when unloading the driver. Use the low level get/put helpers,
3257 	 * so the driver's own RPM reference tracking asserts also work on
3258 	 * platforms without RPM support.
3259 	 */
3260 	if (!HAS_RUNTIME_PM(dev_priv)) {
3261 		int ret;
3262 
3263 		pm_runtime_dont_use_autosuspend(kdev);
3264 		ret = pm_runtime_get_sync(kdev);
3265 		WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
3266 	} else {
3267 		pm_runtime_use_autosuspend(kdev);
3268 	}
3269 
3270 	/*
3271 	 * The core calls the driver load handler with an RPM reference held.
3272 	 * We drop that here and will reacquire it during unloading in
3273 	 * intel_power_domains_fini().
3274 	 */
3275 	pm_runtime_put_autosuspend(kdev);
3276 }
3277