1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28 
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31 
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34 
35 /**
36  * DOC: runtime pm
37  *
38  * The i915 driver supports dynamic enabling and disabling of entire hardware
39  * blocks at runtime. This is especially important on the display side where
40  * software is supposed to control many power gates manually on recent hardware,
41  * since on the GT side a lot of the power management is done by the hardware.
42  * But even there some manual control at the device level is required.
43  *
44  * Since i915 supports a diverse set of platforms with a unified codebase and
45  * hardware engineers just love to shuffle functionality around between power
46  * domains there's a sizeable amount of indirection required. This file provides
47  * generic functions to the driver for grabbing and releasing references for
48  * abstract power domains. It then maps those to the actual power wells
49  * present for a given platform.
50  */
51 
52 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
53 				    int power_well_id);
54 
55 static struct i915_power_well *
56 lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
57 
58 const char *
59 intel_display_power_domain_str(enum intel_display_power_domain domain)
60 {
61 	switch (domain) {
62 	case POWER_DOMAIN_PIPE_A:
63 		return "PIPE_A";
64 	case POWER_DOMAIN_PIPE_B:
65 		return "PIPE_B";
66 	case POWER_DOMAIN_PIPE_C:
67 		return "PIPE_C";
68 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
69 		return "PIPE_A_PANEL_FITTER";
70 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
71 		return "PIPE_B_PANEL_FITTER";
72 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
73 		return "PIPE_C_PANEL_FITTER";
74 	case POWER_DOMAIN_TRANSCODER_A:
75 		return "TRANSCODER_A";
76 	case POWER_DOMAIN_TRANSCODER_B:
77 		return "TRANSCODER_B";
78 	case POWER_DOMAIN_TRANSCODER_C:
79 		return "TRANSCODER_C";
80 	case POWER_DOMAIN_TRANSCODER_EDP:
81 		return "TRANSCODER_EDP";
82 	case POWER_DOMAIN_TRANSCODER_DSI_A:
83 		return "TRANSCODER_DSI_A";
84 	case POWER_DOMAIN_TRANSCODER_DSI_C:
85 		return "TRANSCODER_DSI_C";
86 	case POWER_DOMAIN_PORT_DDI_A_LANES:
87 		return "PORT_DDI_A_LANES";
88 	case POWER_DOMAIN_PORT_DDI_B_LANES:
89 		return "PORT_DDI_B_LANES";
90 	case POWER_DOMAIN_PORT_DDI_C_LANES:
91 		return "PORT_DDI_C_LANES";
92 	case POWER_DOMAIN_PORT_DDI_D_LANES:
93 		return "PORT_DDI_D_LANES";
94 	case POWER_DOMAIN_PORT_DDI_E_LANES:
95 		return "PORT_DDI_E_LANES";
96 	case POWER_DOMAIN_PORT_DDI_A_IO:
97 		return "PORT_DDI_A_IO";
98 	case POWER_DOMAIN_PORT_DDI_B_IO:
99 		return "PORT_DDI_B_IO";
100 	case POWER_DOMAIN_PORT_DDI_C_IO:
101 		return "PORT_DDI_C_IO";
102 	case POWER_DOMAIN_PORT_DDI_D_IO:
103 		return "PORT_DDI_D_IO";
104 	case POWER_DOMAIN_PORT_DDI_E_IO:
105 		return "PORT_DDI_E_IO";
106 	case POWER_DOMAIN_PORT_DSI:
107 		return "PORT_DSI";
108 	case POWER_DOMAIN_PORT_CRT:
109 		return "PORT_CRT";
110 	case POWER_DOMAIN_PORT_OTHER:
111 		return "PORT_OTHER";
112 	case POWER_DOMAIN_VGA:
113 		return "VGA";
114 	case POWER_DOMAIN_AUDIO:
115 		return "AUDIO";
116 	case POWER_DOMAIN_PLLS:
117 		return "PLLS";
118 	case POWER_DOMAIN_AUX_A:
119 		return "AUX_A";
120 	case POWER_DOMAIN_AUX_B:
121 		return "AUX_B";
122 	case POWER_DOMAIN_AUX_C:
123 		return "AUX_C";
124 	case POWER_DOMAIN_AUX_D:
125 		return "AUX_D";
126 	case POWER_DOMAIN_GMBUS:
127 		return "GMBUS";
128 	case POWER_DOMAIN_INIT:
129 		return "INIT";
130 	case POWER_DOMAIN_MODESET:
131 		return "MODESET";
132 	default:
133 		MISSING_CASE(domain);
134 		return "?";
135 	}
136 }
137 
138 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
139 				    struct i915_power_well *power_well)
140 {
141 	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
142 	power_well->ops->enable(dev_priv, power_well);
143 	power_well->hw_enabled = true;
144 }
145 
146 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
147 				     struct i915_power_well *power_well)
148 {
149 	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
150 	power_well->hw_enabled = false;
151 	power_well->ops->disable(dev_priv, power_well);
152 }
153 
154 static void intel_power_well_get(struct drm_i915_private *dev_priv,
155 				 struct i915_power_well *power_well)
156 {
157 	if (!power_well->count++)
158 		intel_power_well_enable(dev_priv, power_well);
159 }
160 
161 static void intel_power_well_put(struct drm_i915_private *dev_priv,
162 				 struct i915_power_well *power_well)
163 {
164 	WARN(!power_well->count, "Use count on power well %s is already zero",
165 	     power_well->name);
166 
167 	if (!--power_well->count)
168 		intel_power_well_disable(dev_priv, power_well);
169 }
170 
171 /*
172  * We should only use the power well if we explicitly asked the hardware to
173  * enable it, so check if it's enabled and also check if we've requested it to
174  * be enabled.
175  */
176 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
177 				   struct i915_power_well *power_well)
178 {
179 	return I915_READ(HSW_PWR_WELL_DRIVER) ==
180 		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
181 }
182 
183 /**
184  * __intel_display_power_is_enabled - unlocked check for a power domain
185  * @dev_priv: i915 device instance
186  * @domain: power domain to check
187  *
188  * This is the unlocked version of intel_display_power_is_enabled() and should
189  * only be used from error capture and recovery code where deadlocks are
190  * possible.
191  *
192  * Returns:
193  * True when the power domain is enabled, false otherwise.
194  */
195 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
196 				      enum intel_display_power_domain domain)
197 {
198 	struct i915_power_well *power_well;
199 	bool is_enabled;
200 
201 	if (dev_priv->pm.suspended)
202 		return false;
203 
204 	is_enabled = true;
205 
206 	for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
207 		if (power_well->always_on)
208 			continue;
209 
210 		if (!power_well->hw_enabled) {
211 			is_enabled = false;
212 			break;
213 		}
214 	}
215 
216 	return is_enabled;
217 }
218 
219 /**
220  * intel_display_power_is_enabled - check for a power domain
221  * @dev_priv: i915 device instance
222  * @domain: power domain to check
223  *
224  * This function can be used to check the hw power domain state. It is mostly
225  * used in hardware state readout functions. Everywhere else code should rely
226  * upon explicit power domain reference counting to ensure that the hardware
227  * block is powered up before accessing it.
228  *
229  * Callers must hold the relevant modesetting locks to ensure that concurrent
230  * threads can't disable the power well while the caller tries to read a few
231  * registers.
232  *
233  * Returns:
234  * True when the power domain is enabled, false otherwise.
235  */
236 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
237 				    enum intel_display_power_domain domain)
238 {
239 	struct i915_power_domains *power_domains;
240 	bool ret;
241 
242 	power_domains = &dev_priv->power_domains;
243 
244 	mutex_lock(&power_domains->lock);
245 	ret = __intel_display_power_is_enabled(dev_priv, domain);
246 	mutex_unlock(&power_domains->lock);
247 
248 	return ret;
249 }
250 
251 /**
252  * intel_display_set_init_power - set the initial power domain state
253  * @dev_priv: i915 device instance
254  * @enable: whether to enable or disable the initial power domain state
255  *
256  * For simplicity our driver load/unload and system suspend/resume code assumes
257  * that all power domains are always enabled. This functions controls the state
258  * of this little hack. While the initial power domain state is enabled runtime
259  * pm is effectively disabled.
260  */
261 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
262 				  bool enable)
263 {
264 	if (dev_priv->power_domains.init_power_on == enable)
265 		return;
266 
267 	if (enable)
268 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
269 	else
270 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
271 
272 	dev_priv->power_domains.init_power_on = enable;
273 }
274 
275 /*
276  * Starting with Haswell, we have a "Power Down Well" that can be turned off
277  * when not needed anymore. We have 4 registers that can request the power well
278  * to be enabled, and it will only be disabled if none of the registers is
279  * requesting it to be enabled.
280  */
281 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
282 {
283 	struct pci_dev *pdev = dev_priv->drm.pdev;
284 
285 	/*
286 	 * After we re-enable the power well, if we touch VGA register 0x3d5
287 	 * we'll get unclaimed register interrupts. This stops after we write
288 	 * anything to the VGA MSR register. The vgacon module uses this
289 	 * register all the time, so if we unbind our driver and, as a
290 	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
291 	 * console_unlock(). So make here we touch the VGA MSR register, making
292 	 * sure vgacon can keep working normally without triggering interrupts
293 	 * and error messages.
294 	 */
295 	vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
296 	outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
297 	vga_put(pdev, VGA_RSRC_LEGACY_IO);
298 
299 	if (IS_BROADWELL(dev_priv))
300 		gen8_irq_power_well_post_enable(dev_priv,
301 						1 << PIPE_C | 1 << PIPE_B);
302 }
303 
304 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
305 {
306 	if (IS_BROADWELL(dev_priv))
307 		gen8_irq_power_well_pre_disable(dev_priv,
308 						1 << PIPE_C | 1 << PIPE_B);
309 }
310 
311 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
312 				       struct i915_power_well *power_well)
313 {
314 	struct pci_dev *pdev = dev_priv->drm.pdev;
315 
316 	/*
317 	 * After we re-enable the power well, if we touch VGA register 0x3d5
318 	 * we'll get unclaimed register interrupts. This stops after we write
319 	 * anything to the VGA MSR register. The vgacon module uses this
320 	 * register all the time, so if we unbind our driver and, as a
321 	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
322 	 * console_unlock(). So make here we touch the VGA MSR register, making
323 	 * sure vgacon can keep working normally without triggering interrupts
324 	 * and error messages.
325 	 */
326 	if (power_well->id == SKL_DISP_PW_2) {
327 		vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
328 		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
329 		vga_put(pdev, VGA_RSRC_LEGACY_IO);
330 
331 		gen8_irq_power_well_post_enable(dev_priv,
332 						1 << PIPE_C | 1 << PIPE_B);
333 	}
334 }
335 
336 static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
337 				       struct i915_power_well *power_well)
338 {
339 	if (power_well->id == SKL_DISP_PW_2)
340 		gen8_irq_power_well_pre_disable(dev_priv,
341 						1 << PIPE_C | 1 << PIPE_B);
342 }
343 
344 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
345 			       struct i915_power_well *power_well, bool enable)
346 {
347 	bool is_enabled, enable_requested;
348 	uint32_t tmp;
349 
350 	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
351 	is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
352 	enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
353 
354 	if (enable) {
355 		if (!enable_requested)
356 			I915_WRITE(HSW_PWR_WELL_DRIVER,
357 				   HSW_PWR_WELL_ENABLE_REQUEST);
358 
359 		if (!is_enabled) {
360 			DRM_DEBUG_KMS("Enabling power well\n");
361 			if (intel_wait_for_register(dev_priv,
362 						    HSW_PWR_WELL_DRIVER,
363 						    HSW_PWR_WELL_STATE_ENABLED,
364 						    HSW_PWR_WELL_STATE_ENABLED,
365 						    20))
366 				DRM_ERROR("Timeout enabling power well\n");
367 			hsw_power_well_post_enable(dev_priv);
368 		}
369 
370 	} else {
371 		if (enable_requested) {
372 			hsw_power_well_pre_disable(dev_priv);
373 			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
374 			POSTING_READ(HSW_PWR_WELL_DRIVER);
375 			DRM_DEBUG_KMS("Requesting to disable the power well\n");
376 		}
377 	}
378 }
379 
380 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
381 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
382 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
383 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
384 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
385 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
386 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
387 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
388 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
389 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
390 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
391 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
392 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
393 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
394 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
395 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
396 	BIT_ULL(POWER_DOMAIN_VGA) |				\
397 	BIT_ULL(POWER_DOMAIN_INIT))
398 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
399 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
400 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
401 	BIT_ULL(POWER_DOMAIN_INIT))
402 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
403 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
404 	BIT_ULL(POWER_DOMAIN_INIT))
405 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
406 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
407 	BIT_ULL(POWER_DOMAIN_INIT))
408 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
409 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
410 	BIT_ULL(POWER_DOMAIN_INIT))
411 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
412 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
413 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
414 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
415 	BIT_ULL(POWER_DOMAIN_INIT))
416 
417 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
418 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
419 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
420 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
421 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
422 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
423 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
424 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
425 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
426 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
427 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
428 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
429 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
430 	BIT_ULL(POWER_DOMAIN_VGA) |				\
431 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
432 	BIT_ULL(POWER_DOMAIN_INIT))
433 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
434 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
435 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
436 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
437 	BIT_ULL(POWER_DOMAIN_INIT))
438 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
439 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
440 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
441 	BIT_ULL(POWER_DOMAIN_INIT))
442 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
443 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
444 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
445 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
446 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
447 	BIT_ULL(POWER_DOMAIN_INIT))
448 
449 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
450 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
451 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
452 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
453 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
454 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
455 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
456 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
457 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
458 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
459 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
460 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
461 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
462 	BIT_ULL(POWER_DOMAIN_VGA) |				\
463 	BIT_ULL(POWER_DOMAIN_INIT))
464 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
465 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
466 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
467 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
468 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
469 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
470 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
471 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
472 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
473 	BIT_ULL(POWER_DOMAIN_INIT))
474 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
475 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
476 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
477 	BIT_ULL(POWER_DOMAIN_INIT))
478 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
479 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
480 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
481 	BIT_ULL(POWER_DOMAIN_INIT))
482 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
483 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
484 	BIT_ULL(POWER_DOMAIN_INIT))
485 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
486 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
487 	BIT_ULL(POWER_DOMAIN_INIT))
488 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
489 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
490 	BIT_ULL(POWER_DOMAIN_INIT))
491 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
492 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
493 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
494 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
495 	BIT_ULL(POWER_DOMAIN_INIT))
496 
497 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
498 {
499 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
500 		  "DC9 already programmed to be enabled.\n");
501 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
502 		  "DC5 still not disabled to enable DC9.\n");
503 	WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
504 	WARN_ONCE(intel_irqs_enabled(dev_priv),
505 		  "Interrupts not disabled yet.\n");
506 
507 	 /*
508 	  * TODO: check for the following to verify the conditions to enter DC9
509 	  * state are satisfied:
510 	  * 1] Check relevant display engine registers to verify if mode set
511 	  * disable sequence was followed.
512 	  * 2] Check if display uninitialize sequence is initialized.
513 	  */
514 }
515 
516 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
517 {
518 	WARN_ONCE(intel_irqs_enabled(dev_priv),
519 		  "Interrupts not disabled yet.\n");
520 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
521 		  "DC5 still not disabled.\n");
522 
523 	 /*
524 	  * TODO: check for the following to verify DC9 state was indeed
525 	  * entered before programming to disable it:
526 	  * 1] Check relevant display engine registers to verify if mode
527 	  *  set disable sequence was followed.
528 	  * 2] Check if display uninitialize sequence is initialized.
529 	  */
530 }
531 
532 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
533 				u32 state)
534 {
535 	int rewrites = 0;
536 	int rereads = 0;
537 	u32 v;
538 
539 	I915_WRITE(DC_STATE_EN, state);
540 
541 	/* It has been observed that disabling the dc6 state sometimes
542 	 * doesn't stick and dmc keeps returning old value. Make sure
543 	 * the write really sticks enough times and also force rewrite until
544 	 * we are confident that state is exactly what we want.
545 	 */
546 	do  {
547 		v = I915_READ(DC_STATE_EN);
548 
549 		if (v != state) {
550 			I915_WRITE(DC_STATE_EN, state);
551 			rewrites++;
552 			rereads = 0;
553 		} else if (rereads++ > 5) {
554 			break;
555 		}
556 
557 	} while (rewrites < 100);
558 
559 	if (v != state)
560 		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
561 			  state, v);
562 
563 	/* Most of the times we need one retry, avoid spam */
564 	if (rewrites > 1)
565 		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
566 			      state, rewrites);
567 }
568 
569 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
570 {
571 	u32 mask;
572 
573 	mask = DC_STATE_EN_UPTO_DC5;
574 	if (IS_GEN9_LP(dev_priv))
575 		mask |= DC_STATE_EN_DC9;
576 	else
577 		mask |= DC_STATE_EN_UPTO_DC6;
578 
579 	return mask;
580 }
581 
582 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
583 {
584 	u32 val;
585 
586 	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
587 
588 	DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
589 		      dev_priv->csr.dc_state, val);
590 	dev_priv->csr.dc_state = val;
591 }
592 
593 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
594 {
595 	uint32_t val;
596 	uint32_t mask;
597 
598 	if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
599 		state &= dev_priv->csr.allowed_dc_mask;
600 
601 	val = I915_READ(DC_STATE_EN);
602 	mask = gen9_dc_mask(dev_priv);
603 	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
604 		      val & mask, state);
605 
606 	/* Check if DMC is ignoring our DC state requests */
607 	if ((val & mask) != dev_priv->csr.dc_state)
608 		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
609 			  dev_priv->csr.dc_state, val & mask);
610 
611 	val &= ~mask;
612 	val |= state;
613 
614 	gen9_write_dc_state(dev_priv, val);
615 
616 	dev_priv->csr.dc_state = val & mask;
617 }
618 
619 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
620 {
621 	assert_can_enable_dc9(dev_priv);
622 
623 	DRM_DEBUG_KMS("Enabling DC9\n");
624 
625 	intel_power_sequencer_reset(dev_priv);
626 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
627 }
628 
629 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
630 {
631 	assert_can_disable_dc9(dev_priv);
632 
633 	DRM_DEBUG_KMS("Disabling DC9\n");
634 
635 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
636 
637 	intel_pps_unlock_regs_wa(dev_priv);
638 }
639 
640 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
641 {
642 	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
643 		  "CSR program storage start is NULL\n");
644 	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
645 	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
646 }
647 
648 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
649 {
650 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
651 					SKL_DISP_PW_2);
652 
653 	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
654 
655 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
656 		  "DC5 already programmed to be enabled.\n");
657 	assert_rpm_wakelock_held(dev_priv);
658 
659 	assert_csr_loaded(dev_priv);
660 }
661 
662 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
663 {
664 	assert_can_enable_dc5(dev_priv);
665 
666 	DRM_DEBUG_KMS("Enabling DC5\n");
667 
668 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
669 }
670 
671 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
672 {
673 	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
674 		  "Backlight is not disabled.\n");
675 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
676 		  "DC6 already programmed to be enabled.\n");
677 
678 	assert_csr_loaded(dev_priv);
679 }
680 
681 void skl_enable_dc6(struct drm_i915_private *dev_priv)
682 {
683 	assert_can_enable_dc6(dev_priv);
684 
685 	DRM_DEBUG_KMS("Enabling DC6\n");
686 
687 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
688 
689 }
690 
691 void skl_disable_dc6(struct drm_i915_private *dev_priv)
692 {
693 	DRM_DEBUG_KMS("Disabling DC6\n");
694 
695 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
696 }
697 
698 static void
699 gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
700 				  struct i915_power_well *power_well)
701 {
702 	enum skl_disp_power_wells power_well_id = power_well->id;
703 	u32 val;
704 	u32 mask;
705 
706 	mask = SKL_POWER_WELL_REQ(power_well_id);
707 
708 	val = I915_READ(HSW_PWR_WELL_KVMR);
709 	if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
710 		      power_well->name))
711 		I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
712 
713 	val = I915_READ(HSW_PWR_WELL_BIOS);
714 	val |= I915_READ(HSW_PWR_WELL_DEBUG);
715 
716 	if (!(val & mask))
717 		return;
718 
719 	/*
720 	 * DMC is known to force on the request bits for power well 1 on SKL
721 	 * and BXT and the misc IO power well on SKL but we don't expect any
722 	 * other request bits to be set, so WARN for those.
723 	 */
724 	if (power_well_id == SKL_DISP_PW_1 ||
725 	    (IS_GEN9_BC(dev_priv) &&
726 	     power_well_id == SKL_DISP_PW_MISC_IO))
727 		DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
728 				 "by DMC\n", power_well->name);
729 	else
730 		WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
731 			  power_well->name);
732 
733 	I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
734 	I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
735 }
736 
737 static void skl_set_power_well(struct drm_i915_private *dev_priv,
738 			       struct i915_power_well *power_well, bool enable)
739 {
740 	uint32_t tmp, fuse_status;
741 	uint32_t req_mask, state_mask;
742 	bool is_enabled, enable_requested, check_fuse_status = false;
743 
744 	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
745 	fuse_status = I915_READ(SKL_FUSE_STATUS);
746 
747 	switch (power_well->id) {
748 	case SKL_DISP_PW_1:
749 		if (intel_wait_for_register(dev_priv,
750 					    SKL_FUSE_STATUS,
751 					    SKL_FUSE_PG0_DIST_STATUS,
752 					    SKL_FUSE_PG0_DIST_STATUS,
753 					    1)) {
754 			DRM_ERROR("PG0 not enabled\n");
755 			return;
756 		}
757 		break;
758 	case SKL_DISP_PW_2:
759 		if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
760 			DRM_ERROR("PG1 in disabled state\n");
761 			return;
762 		}
763 		break;
764 	case SKL_DISP_PW_MISC_IO:
765 	case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A */
766 	case SKL_DISP_PW_DDI_B:
767 	case SKL_DISP_PW_DDI_C:
768 	case SKL_DISP_PW_DDI_D:
769 	case GLK_DISP_PW_AUX_A:
770 	case GLK_DISP_PW_AUX_B:
771 	case GLK_DISP_PW_AUX_C:
772 		break;
773 	default:
774 		WARN(1, "Unknown power well %lu\n", power_well->id);
775 		return;
776 	}
777 
778 	req_mask = SKL_POWER_WELL_REQ(power_well->id);
779 	enable_requested = tmp & req_mask;
780 	state_mask = SKL_POWER_WELL_STATE(power_well->id);
781 	is_enabled = tmp & state_mask;
782 
783 	if (!enable && enable_requested)
784 		skl_power_well_pre_disable(dev_priv, power_well);
785 
786 	if (enable) {
787 		if (!enable_requested) {
788 			WARN((tmp & state_mask) &&
789 				!I915_READ(HSW_PWR_WELL_BIOS),
790 				"Invalid for power well status to be enabled, unless done by the BIOS, \
791 				when request is to disable!\n");
792 			I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
793 		}
794 
795 		if (!is_enabled) {
796 			DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
797 			check_fuse_status = true;
798 		}
799 	} else {
800 		if (enable_requested) {
801 			I915_WRITE(HSW_PWR_WELL_DRIVER,	tmp & ~req_mask);
802 			POSTING_READ(HSW_PWR_WELL_DRIVER);
803 			DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
804 		}
805 
806 		if (IS_GEN9(dev_priv))
807 			gen9_sanitize_power_well_requests(dev_priv, power_well);
808 	}
809 
810 	if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
811 		     1))
812 		DRM_ERROR("%s %s timeout\n",
813 			  power_well->name, enable ? "enable" : "disable");
814 
815 	if (check_fuse_status) {
816 		if (power_well->id == SKL_DISP_PW_1) {
817 			if (intel_wait_for_register(dev_priv,
818 						    SKL_FUSE_STATUS,
819 						    SKL_FUSE_PG1_DIST_STATUS,
820 						    SKL_FUSE_PG1_DIST_STATUS,
821 						    1))
822 				DRM_ERROR("PG1 distributing status timeout\n");
823 		} else if (power_well->id == SKL_DISP_PW_2) {
824 			if (intel_wait_for_register(dev_priv,
825 						    SKL_FUSE_STATUS,
826 						    SKL_FUSE_PG2_DIST_STATUS,
827 						    SKL_FUSE_PG2_DIST_STATUS,
828 						    1))
829 				DRM_ERROR("PG2 distributing status timeout\n");
830 		}
831 	}
832 
833 	if (enable && !is_enabled)
834 		skl_power_well_post_enable(dev_priv, power_well);
835 }
836 
837 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
838 				   struct i915_power_well *power_well)
839 {
840 	/* Take over the request bit if set by BIOS. */
841 	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) {
842 		if (!(I915_READ(HSW_PWR_WELL_DRIVER) &
843 		      HSW_PWR_WELL_ENABLE_REQUEST))
844 			I915_WRITE(HSW_PWR_WELL_DRIVER,
845 				   HSW_PWR_WELL_ENABLE_REQUEST);
846 		I915_WRITE(HSW_PWR_WELL_BIOS, 0);
847 	}
848 }
849 
850 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
851 				  struct i915_power_well *power_well)
852 {
853 	hsw_set_power_well(dev_priv, power_well, true);
854 }
855 
856 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
857 				   struct i915_power_well *power_well)
858 {
859 	hsw_set_power_well(dev_priv, power_well, false);
860 }
861 
862 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
863 					struct i915_power_well *power_well)
864 {
865 	uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
866 		SKL_POWER_WELL_STATE(power_well->id);
867 
868 	return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
869 }
870 
871 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
872 				struct i915_power_well *power_well)
873 {
874 	uint32_t mask = SKL_POWER_WELL_REQ(power_well->id);
875 	uint32_t bios_req = I915_READ(HSW_PWR_WELL_BIOS);
876 
877 	/* Take over the request bit if set by BIOS. */
878 	if (bios_req & mask) {
879 		uint32_t drv_req = I915_READ(HSW_PWR_WELL_DRIVER);
880 
881 		if (!(drv_req & mask))
882 			I915_WRITE(HSW_PWR_WELL_DRIVER, drv_req | mask);
883 		I915_WRITE(HSW_PWR_WELL_BIOS, bios_req & ~mask);
884 	}
885 }
886 
887 static void skl_power_well_enable(struct drm_i915_private *dev_priv,
888 				struct i915_power_well *power_well)
889 {
890 	skl_set_power_well(dev_priv, power_well, true);
891 }
892 
893 static void skl_power_well_disable(struct drm_i915_private *dev_priv,
894 				struct i915_power_well *power_well)
895 {
896 	skl_set_power_well(dev_priv, power_well, false);
897 }
898 
899 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
900 					   struct i915_power_well *power_well)
901 {
902 	bxt_ddi_phy_init(dev_priv, power_well->data);
903 }
904 
905 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
906 					    struct i915_power_well *power_well)
907 {
908 	bxt_ddi_phy_uninit(dev_priv, power_well->data);
909 }
910 
911 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
912 					    struct i915_power_well *power_well)
913 {
914 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
915 }
916 
917 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
918 {
919 	struct i915_power_well *power_well;
920 
921 	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
922 	if (power_well->count > 0)
923 		bxt_ddi_phy_verify_state(dev_priv, power_well->data);
924 
925 	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
926 	if (power_well->count > 0)
927 		bxt_ddi_phy_verify_state(dev_priv, power_well->data);
928 
929 	if (IS_GEMINILAKE(dev_priv)) {
930 		power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
931 		if (power_well->count > 0)
932 			bxt_ddi_phy_verify_state(dev_priv, power_well->data);
933 	}
934 }
935 
936 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
937 					   struct i915_power_well *power_well)
938 {
939 	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
940 }
941 
942 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
943 {
944 	u32 tmp = I915_READ(DBUF_CTL);
945 
946 	WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
947 	     (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
948 	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
949 }
950 
951 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
952 					  struct i915_power_well *power_well)
953 {
954 	struct intel_cdclk_state cdclk_state = {};
955 
956 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
957 
958 	dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
959 	WARN_ON(!intel_cdclk_state_compare(&dev_priv->cdclk.hw, &cdclk_state));
960 
961 	gen9_assert_dbuf_enabled(dev_priv);
962 
963 	if (IS_GEN9_LP(dev_priv))
964 		bxt_verify_ddi_phy_power_wells(dev_priv);
965 }
966 
967 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
968 					   struct i915_power_well *power_well)
969 {
970 	if (!dev_priv->csr.dmc_payload)
971 		return;
972 
973 	if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
974 		skl_enable_dc6(dev_priv);
975 	else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
976 		gen9_enable_dc5(dev_priv);
977 }
978 
979 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
980 					 struct i915_power_well *power_well)
981 {
982 }
983 
984 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
985 					   struct i915_power_well *power_well)
986 {
987 }
988 
989 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
990 					     struct i915_power_well *power_well)
991 {
992 	return true;
993 }
994 
995 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
996 			       struct i915_power_well *power_well, bool enable)
997 {
998 	enum punit_power_well power_well_id = power_well->id;
999 	u32 mask;
1000 	u32 state;
1001 	u32 ctrl;
1002 
1003 	mask = PUNIT_PWRGT_MASK(power_well_id);
1004 	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
1005 			 PUNIT_PWRGT_PWR_GATE(power_well_id);
1006 
1007 	mutex_lock(&dev_priv->rps.hw_lock);
1008 
1009 #define COND \
1010 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1011 
1012 	if (COND)
1013 		goto out;
1014 
1015 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1016 	ctrl &= ~mask;
1017 	ctrl |= state;
1018 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1019 
1020 	if (wait_for(COND, 100))
1021 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1022 			  state,
1023 			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1024 
1025 #undef COND
1026 
1027 out:
1028 	mutex_unlock(&dev_priv->rps.hw_lock);
1029 }
1030 
1031 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1032 				  struct i915_power_well *power_well)
1033 {
1034 	vlv_set_power_well(dev_priv, power_well, true);
1035 }
1036 
1037 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1038 				   struct i915_power_well *power_well)
1039 {
1040 	vlv_set_power_well(dev_priv, power_well, false);
1041 }
1042 
1043 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1044 				   struct i915_power_well *power_well)
1045 {
1046 	int power_well_id = power_well->id;
1047 	bool enabled = false;
1048 	u32 mask;
1049 	u32 state;
1050 	u32 ctrl;
1051 
1052 	mask = PUNIT_PWRGT_MASK(power_well_id);
1053 	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
1054 
1055 	mutex_lock(&dev_priv->rps.hw_lock);
1056 
1057 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1058 	/*
1059 	 * We only ever set the power-on and power-gate states, anything
1060 	 * else is unexpected.
1061 	 */
1062 	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
1063 		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
1064 	if (state == ctrl)
1065 		enabled = true;
1066 
1067 	/*
1068 	 * A transient state at this point would mean some unexpected party
1069 	 * is poking at the power controls too.
1070 	 */
1071 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1072 	WARN_ON(ctrl != state);
1073 
1074 	mutex_unlock(&dev_priv->rps.hw_lock);
1075 
1076 	return enabled;
1077 }
1078 
1079 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1080 {
1081 	u32 val;
1082 
1083 	/*
1084 	 * On driver load, a pipe may be active and driving a DSI display.
1085 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1086 	 * (and never recovering) in this case. intel_dsi_post_disable() will
1087 	 * clear it when we turn off the display.
1088 	 */
1089 	val = I915_READ(DSPCLK_GATE_D);
1090 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1091 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1092 	I915_WRITE(DSPCLK_GATE_D, val);
1093 
1094 	/*
1095 	 * Disable trickle feed and enable pnd deadline calculation
1096 	 */
1097 	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1098 	I915_WRITE(CBR1_VLV, 0);
1099 
1100 	WARN_ON(dev_priv->rawclk_freq == 0);
1101 
1102 	I915_WRITE(RAWCLK_FREQ_VLV,
1103 		   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1104 }
1105 
1106 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1107 {
1108 	struct intel_encoder *encoder;
1109 	enum pipe pipe;
1110 
1111 	/*
1112 	 * Enable the CRI clock source so we can get at the
1113 	 * display and the reference clock for VGA
1114 	 * hotplug / manual detection. Supposedly DSI also
1115 	 * needs the ref clock up and running.
1116 	 *
1117 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1118 	 */
1119 	for_each_pipe(dev_priv, pipe) {
1120 		u32 val = I915_READ(DPLL(pipe));
1121 
1122 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1123 		if (pipe != PIPE_A)
1124 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1125 
1126 		I915_WRITE(DPLL(pipe), val);
1127 	}
1128 
1129 	vlv_init_display_clock_gating(dev_priv);
1130 
1131 	spin_lock_irq(&dev_priv->irq_lock);
1132 	valleyview_enable_display_irqs(dev_priv);
1133 	spin_unlock_irq(&dev_priv->irq_lock);
1134 
1135 	/*
1136 	 * During driver initialization/resume we can avoid restoring the
1137 	 * part of the HW/SW state that will be inited anyway explicitly.
1138 	 */
1139 	if (dev_priv->power_domains.initializing)
1140 		return;
1141 
1142 	intel_hpd_init(dev_priv);
1143 
1144 	/* Re-enable the ADPA, if we have one */
1145 	for_each_intel_encoder(&dev_priv->drm, encoder) {
1146 		if (encoder->type == INTEL_OUTPUT_ANALOG)
1147 			intel_crt_reset(&encoder->base);
1148 	}
1149 
1150 	i915_redisable_vga_power_on(dev_priv);
1151 
1152 	intel_pps_unlock_regs_wa(dev_priv);
1153 }
1154 
1155 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1156 {
1157 	spin_lock_irq(&dev_priv->irq_lock);
1158 	valleyview_disable_display_irqs(dev_priv);
1159 	spin_unlock_irq(&dev_priv->irq_lock);
1160 
1161 	/* make sure we're done processing display irqs */
1162 	synchronize_irq(dev_priv->drm.irq);
1163 
1164 	intel_power_sequencer_reset(dev_priv);
1165 
1166 	/* Prevent us from re-enabling polling on accident in late suspend */
1167 	if (!dev_priv->drm.dev->power.is_suspended)
1168 		intel_hpd_poll_init(dev_priv);
1169 }
1170 
1171 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1172 					  struct i915_power_well *power_well)
1173 {
1174 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1175 
1176 	vlv_set_power_well(dev_priv, power_well, true);
1177 
1178 	vlv_display_power_well_init(dev_priv);
1179 }
1180 
1181 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1182 					   struct i915_power_well *power_well)
1183 {
1184 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1185 
1186 	vlv_display_power_well_deinit(dev_priv);
1187 
1188 	vlv_set_power_well(dev_priv, power_well, false);
1189 }
1190 
1191 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1192 					   struct i915_power_well *power_well)
1193 {
1194 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1195 
1196 	/* since ref/cri clock was enabled */
1197 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1198 
1199 	vlv_set_power_well(dev_priv, power_well, true);
1200 
1201 	/*
1202 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1203 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1204 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1205 	 *   b.	The other bits such as sfr settings / modesel may all
1206 	 *	be set to 0.
1207 	 *
1208 	 * This should only be done on init and resume from S3 with
1209 	 * both PLLs disabled, or we risk losing DPIO and PLL
1210 	 * synchronization.
1211 	 */
1212 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1213 }
1214 
1215 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1216 					    struct i915_power_well *power_well)
1217 {
1218 	enum pipe pipe;
1219 
1220 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1221 
1222 	for_each_pipe(dev_priv, pipe)
1223 		assert_pll_disabled(dev_priv, pipe);
1224 
1225 	/* Assert common reset */
1226 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1227 
1228 	vlv_set_power_well(dev_priv, power_well, false);
1229 }
1230 
1231 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1232 
1233 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1234 						 int power_well_id)
1235 {
1236 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1237 	int i;
1238 
1239 	for (i = 0; i < power_domains->power_well_count; i++) {
1240 		struct i915_power_well *power_well;
1241 
1242 		power_well = &power_domains->power_wells[i];
1243 		if (power_well->id == power_well_id)
1244 			return power_well;
1245 	}
1246 
1247 	return NULL;
1248 }
1249 
1250 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1251 
1252 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1253 {
1254 	struct i915_power_well *cmn_bc =
1255 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1256 	struct i915_power_well *cmn_d =
1257 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1258 	u32 phy_control = dev_priv->chv_phy_control;
1259 	u32 phy_status = 0;
1260 	u32 phy_status_mask = 0xffffffff;
1261 
1262 	/*
1263 	 * The BIOS can leave the PHY is some weird state
1264 	 * where it doesn't fully power down some parts.
1265 	 * Disable the asserts until the PHY has been fully
1266 	 * reset (ie. the power well has been disabled at
1267 	 * least once).
1268 	 */
1269 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1270 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1271 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1272 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1273 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1274 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1275 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1276 
1277 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1278 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1279 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1280 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1281 
1282 	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1283 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1284 
1285 		/* this assumes override is only used to enable lanes */
1286 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1287 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1288 
1289 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1290 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1291 
1292 		/* CL1 is on whenever anything is on in either channel */
1293 		if (BITS_SET(phy_control,
1294 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1295 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1296 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1297 
1298 		/*
1299 		 * The DPLLB check accounts for the pipe B + port A usage
1300 		 * with CL2 powered up but all the lanes in the second channel
1301 		 * powered down.
1302 		 */
1303 		if (BITS_SET(phy_control,
1304 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1305 		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1306 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1307 
1308 		if (BITS_SET(phy_control,
1309 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1310 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1311 		if (BITS_SET(phy_control,
1312 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1313 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1314 
1315 		if (BITS_SET(phy_control,
1316 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1317 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1318 		if (BITS_SET(phy_control,
1319 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1320 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1321 	}
1322 
1323 	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1324 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1325 
1326 		/* this assumes override is only used to enable lanes */
1327 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1328 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1329 
1330 		if (BITS_SET(phy_control,
1331 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1332 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1333 
1334 		if (BITS_SET(phy_control,
1335 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1336 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1337 		if (BITS_SET(phy_control,
1338 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1339 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1340 	}
1341 
1342 	phy_status &= phy_status_mask;
1343 
1344 	/*
1345 	 * The PHY may be busy with some initial calibration and whatnot,
1346 	 * so the power state can take a while to actually change.
1347 	 */
1348 	if (intel_wait_for_register(dev_priv,
1349 				    DISPLAY_PHY_STATUS,
1350 				    phy_status_mask,
1351 				    phy_status,
1352 				    10))
1353 		DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1354 			  I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1355 			   phy_status, dev_priv->chv_phy_control);
1356 }
1357 
1358 #undef BITS_SET
1359 
1360 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1361 					   struct i915_power_well *power_well)
1362 {
1363 	enum dpio_phy phy;
1364 	enum pipe pipe;
1365 	uint32_t tmp;
1366 
1367 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1368 		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1369 
1370 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1371 		pipe = PIPE_A;
1372 		phy = DPIO_PHY0;
1373 	} else {
1374 		pipe = PIPE_C;
1375 		phy = DPIO_PHY1;
1376 	}
1377 
1378 	/* since ref/cri clock was enabled */
1379 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1380 	vlv_set_power_well(dev_priv, power_well, true);
1381 
1382 	/* Poll for phypwrgood signal */
1383 	if (intel_wait_for_register(dev_priv,
1384 				    DISPLAY_PHY_STATUS,
1385 				    PHY_POWERGOOD(phy),
1386 				    PHY_POWERGOOD(phy),
1387 				    1))
1388 		DRM_ERROR("Display PHY %d is not power up\n", phy);
1389 
1390 	mutex_lock(&dev_priv->sb_lock);
1391 
1392 	/* Enable dynamic power down */
1393 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1394 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1395 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1396 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1397 
1398 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1399 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1400 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1401 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1402 	} else {
1403 		/*
1404 		 * Force the non-existing CL2 off. BXT does this
1405 		 * too, so maybe it saves some power even though
1406 		 * CL2 doesn't exist?
1407 		 */
1408 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1409 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1410 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1411 	}
1412 
1413 	mutex_unlock(&dev_priv->sb_lock);
1414 
1415 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1416 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1417 
1418 	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1419 		      phy, dev_priv->chv_phy_control);
1420 
1421 	assert_chv_phy_status(dev_priv);
1422 }
1423 
1424 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1425 					    struct i915_power_well *power_well)
1426 {
1427 	enum dpio_phy phy;
1428 
1429 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1430 		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1431 
1432 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1433 		phy = DPIO_PHY0;
1434 		assert_pll_disabled(dev_priv, PIPE_A);
1435 		assert_pll_disabled(dev_priv, PIPE_B);
1436 	} else {
1437 		phy = DPIO_PHY1;
1438 		assert_pll_disabled(dev_priv, PIPE_C);
1439 	}
1440 
1441 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1442 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1443 
1444 	vlv_set_power_well(dev_priv, power_well, false);
1445 
1446 	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1447 		      phy, dev_priv->chv_phy_control);
1448 
1449 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1450 	dev_priv->chv_phy_assert[phy] = true;
1451 
1452 	assert_chv_phy_status(dev_priv);
1453 }
1454 
1455 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1456 				     enum dpio_channel ch, bool override, unsigned int mask)
1457 {
1458 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1459 	u32 reg, val, expected, actual;
1460 
1461 	/*
1462 	 * The BIOS can leave the PHY is some weird state
1463 	 * where it doesn't fully power down some parts.
1464 	 * Disable the asserts until the PHY has been fully
1465 	 * reset (ie. the power well has been disabled at
1466 	 * least once).
1467 	 */
1468 	if (!dev_priv->chv_phy_assert[phy])
1469 		return;
1470 
1471 	if (ch == DPIO_CH0)
1472 		reg = _CHV_CMN_DW0_CH0;
1473 	else
1474 		reg = _CHV_CMN_DW6_CH1;
1475 
1476 	mutex_lock(&dev_priv->sb_lock);
1477 	val = vlv_dpio_read(dev_priv, pipe, reg);
1478 	mutex_unlock(&dev_priv->sb_lock);
1479 
1480 	/*
1481 	 * This assumes !override is only used when the port is disabled.
1482 	 * All lanes should power down even without the override when
1483 	 * the port is disabled.
1484 	 */
1485 	if (!override || mask == 0xf) {
1486 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1487 		/*
1488 		 * If CH1 common lane is not active anymore
1489 		 * (eg. for pipe B DPLL) the entire channel will
1490 		 * shut down, which causes the common lane registers
1491 		 * to read as 0. That means we can't actually check
1492 		 * the lane power down status bits, but as the entire
1493 		 * register reads as 0 it's a good indication that the
1494 		 * channel is indeed entirely powered down.
1495 		 */
1496 		if (ch == DPIO_CH1 && val == 0)
1497 			expected = 0;
1498 	} else if (mask != 0x0) {
1499 		expected = DPIO_ANYDL_POWERDOWN;
1500 	} else {
1501 		expected = 0;
1502 	}
1503 
1504 	if (ch == DPIO_CH0)
1505 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1506 	else
1507 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1508 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1509 
1510 	WARN(actual != expected,
1511 	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1512 	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1513 	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1514 	     reg, val);
1515 }
1516 
1517 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1518 			  enum dpio_channel ch, bool override)
1519 {
1520 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1521 	bool was_override;
1522 
1523 	mutex_lock(&power_domains->lock);
1524 
1525 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1526 
1527 	if (override == was_override)
1528 		goto out;
1529 
1530 	if (override)
1531 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1532 	else
1533 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1534 
1535 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1536 
1537 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1538 		      phy, ch, dev_priv->chv_phy_control);
1539 
1540 	assert_chv_phy_status(dev_priv);
1541 
1542 out:
1543 	mutex_unlock(&power_domains->lock);
1544 
1545 	return was_override;
1546 }
1547 
1548 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1549 			     bool override, unsigned int mask)
1550 {
1551 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1552 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1553 	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1554 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1555 
1556 	mutex_lock(&power_domains->lock);
1557 
1558 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1559 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1560 
1561 	if (override)
1562 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1563 	else
1564 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1565 
1566 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1567 
1568 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1569 		      phy, ch, mask, dev_priv->chv_phy_control);
1570 
1571 	assert_chv_phy_status(dev_priv);
1572 
1573 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1574 
1575 	mutex_unlock(&power_domains->lock);
1576 }
1577 
1578 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1579 					struct i915_power_well *power_well)
1580 {
1581 	enum pipe pipe = power_well->id;
1582 	bool enabled;
1583 	u32 state, ctrl;
1584 
1585 	mutex_lock(&dev_priv->rps.hw_lock);
1586 
1587 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1588 	/*
1589 	 * We only ever set the power-on and power-gate states, anything
1590 	 * else is unexpected.
1591 	 */
1592 	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1593 	enabled = state == DP_SSS_PWR_ON(pipe);
1594 
1595 	/*
1596 	 * A transient state at this point would mean some unexpected party
1597 	 * is poking at the power controls too.
1598 	 */
1599 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1600 	WARN_ON(ctrl << 16 != state);
1601 
1602 	mutex_unlock(&dev_priv->rps.hw_lock);
1603 
1604 	return enabled;
1605 }
1606 
1607 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1608 				    struct i915_power_well *power_well,
1609 				    bool enable)
1610 {
1611 	enum pipe pipe = power_well->id;
1612 	u32 state;
1613 	u32 ctrl;
1614 
1615 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1616 
1617 	mutex_lock(&dev_priv->rps.hw_lock);
1618 
1619 #define COND \
1620 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1621 
1622 	if (COND)
1623 		goto out;
1624 
1625 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1626 	ctrl &= ~DP_SSC_MASK(pipe);
1627 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1628 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1629 
1630 	if (wait_for(COND, 100))
1631 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1632 			  state,
1633 			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1634 
1635 #undef COND
1636 
1637 out:
1638 	mutex_unlock(&dev_priv->rps.hw_lock);
1639 }
1640 
1641 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1642 				       struct i915_power_well *power_well)
1643 {
1644 	WARN_ON_ONCE(power_well->id != PIPE_A);
1645 
1646 	chv_set_pipe_power_well(dev_priv, power_well, true);
1647 
1648 	vlv_display_power_well_init(dev_priv);
1649 }
1650 
1651 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1652 					struct i915_power_well *power_well)
1653 {
1654 	WARN_ON_ONCE(power_well->id != PIPE_A);
1655 
1656 	vlv_display_power_well_deinit(dev_priv);
1657 
1658 	chv_set_pipe_power_well(dev_priv, power_well, false);
1659 }
1660 
1661 static void
1662 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1663 				 enum intel_display_power_domain domain)
1664 {
1665 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1666 	struct i915_power_well *power_well;
1667 
1668 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1669 		intel_power_well_get(dev_priv, power_well);
1670 
1671 	power_domains->domain_use_count[domain]++;
1672 }
1673 
1674 /**
1675  * intel_display_power_get - grab a power domain reference
1676  * @dev_priv: i915 device instance
1677  * @domain: power domain to reference
1678  *
1679  * This function grabs a power domain reference for @domain and ensures that the
1680  * power domain and all its parents are powered up. Therefore users should only
1681  * grab a reference to the innermost power domain they need.
1682  *
1683  * Any power domain reference obtained by this function must have a symmetric
1684  * call to intel_display_power_put() to release the reference again.
1685  */
1686 void intel_display_power_get(struct drm_i915_private *dev_priv,
1687 			     enum intel_display_power_domain domain)
1688 {
1689 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1690 
1691 	intel_runtime_pm_get(dev_priv);
1692 
1693 	mutex_lock(&power_domains->lock);
1694 
1695 	__intel_display_power_get_domain(dev_priv, domain);
1696 
1697 	mutex_unlock(&power_domains->lock);
1698 }
1699 
1700 /**
1701  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1702  * @dev_priv: i915 device instance
1703  * @domain: power domain to reference
1704  *
1705  * This function grabs a power domain reference for @domain and ensures that the
1706  * power domain and all its parents are powered up. Therefore users should only
1707  * grab a reference to the innermost power domain they need.
1708  *
1709  * Any power domain reference obtained by this function must have a symmetric
1710  * call to intel_display_power_put() to release the reference again.
1711  */
1712 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1713 					enum intel_display_power_domain domain)
1714 {
1715 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1716 	bool is_enabled;
1717 
1718 	if (!intel_runtime_pm_get_if_in_use(dev_priv))
1719 		return false;
1720 
1721 	mutex_lock(&power_domains->lock);
1722 
1723 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
1724 		__intel_display_power_get_domain(dev_priv, domain);
1725 		is_enabled = true;
1726 	} else {
1727 		is_enabled = false;
1728 	}
1729 
1730 	mutex_unlock(&power_domains->lock);
1731 
1732 	if (!is_enabled)
1733 		intel_runtime_pm_put(dev_priv);
1734 
1735 	return is_enabled;
1736 }
1737 
1738 /**
1739  * intel_display_power_put - release a power domain reference
1740  * @dev_priv: i915 device instance
1741  * @domain: power domain to reference
1742  *
1743  * This function drops the power domain reference obtained by
1744  * intel_display_power_get() and might power down the corresponding hardware
1745  * block right away if this is the last reference.
1746  */
1747 void intel_display_power_put(struct drm_i915_private *dev_priv,
1748 			     enum intel_display_power_domain domain)
1749 {
1750 	struct i915_power_domains *power_domains;
1751 	struct i915_power_well *power_well;
1752 
1753 	power_domains = &dev_priv->power_domains;
1754 
1755 	mutex_lock(&power_domains->lock);
1756 
1757 	WARN(!power_domains->domain_use_count[domain],
1758 	     "Use count on domain %s is already zero\n",
1759 	     intel_display_power_domain_str(domain));
1760 	power_domains->domain_use_count[domain]--;
1761 
1762 	for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
1763 		intel_power_well_put(dev_priv, power_well);
1764 
1765 	mutex_unlock(&power_domains->lock);
1766 
1767 	intel_runtime_pm_put(dev_priv);
1768 }
1769 
1770 #define HSW_DISPLAY_POWER_DOMAINS (			\
1771 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1772 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1773 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
1774 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1775 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1776 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1777 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1778 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1779 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1780 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1781 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1782 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1783 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1784 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1785 	BIT_ULL(POWER_DOMAIN_INIT))
1786 
1787 #define BDW_DISPLAY_POWER_DOMAINS (			\
1788 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1789 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1790 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1791 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1792 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1793 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1794 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1795 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1796 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1797 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1798 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1799 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1800 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1801 	BIT_ULL(POWER_DOMAIN_INIT))
1802 
1803 #define VLV_DISPLAY_POWER_DOMAINS (		\
1804 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1805 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1806 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1807 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1808 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1809 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1810 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1811 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1812 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1813 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1814 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1815 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1816 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1817 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1818 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1819 	BIT_ULL(POWER_DOMAIN_INIT))
1820 
1821 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1822 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1823 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1824 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1825 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1826 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1827 	BIT_ULL(POWER_DOMAIN_INIT))
1828 
1829 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
1830 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1831 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1832 	BIT_ULL(POWER_DOMAIN_INIT))
1833 
1834 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1835 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1836 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1837 	BIT_ULL(POWER_DOMAIN_INIT))
1838 
1839 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
1840 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1841 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1842 	BIT_ULL(POWER_DOMAIN_INIT))
1843 
1844 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
1845 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1846 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1847 	BIT_ULL(POWER_DOMAIN_INIT))
1848 
1849 #define CHV_DISPLAY_POWER_DOMAINS (		\
1850 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1851 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1852 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
1853 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1854 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1855 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
1856 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1857 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1858 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
1859 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1860 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1861 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1862 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1863 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1864 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1865 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1866 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1867 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
1868 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1869 	BIT_ULL(POWER_DOMAIN_INIT))
1870 
1871 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
1872 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1873 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1874 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1875 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1876 	BIT_ULL(POWER_DOMAIN_INIT))
1877 
1878 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
1879 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1880 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
1881 	BIT_ULL(POWER_DOMAIN_INIT))
1882 
1883 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1884 	.sync_hw = i9xx_power_well_sync_hw_noop,
1885 	.enable = i9xx_always_on_power_well_noop,
1886 	.disable = i9xx_always_on_power_well_noop,
1887 	.is_enabled = i9xx_always_on_power_well_enabled,
1888 };
1889 
1890 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1891 	.sync_hw = i9xx_power_well_sync_hw_noop,
1892 	.enable = chv_pipe_power_well_enable,
1893 	.disable = chv_pipe_power_well_disable,
1894 	.is_enabled = chv_pipe_power_well_enabled,
1895 };
1896 
1897 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1898 	.sync_hw = i9xx_power_well_sync_hw_noop,
1899 	.enable = chv_dpio_cmn_power_well_enable,
1900 	.disable = chv_dpio_cmn_power_well_disable,
1901 	.is_enabled = vlv_power_well_enabled,
1902 };
1903 
1904 static struct i915_power_well i9xx_always_on_power_well[] = {
1905 	{
1906 		.name = "always-on",
1907 		.always_on = 1,
1908 		.domains = POWER_DOMAIN_MASK,
1909 		.ops = &i9xx_always_on_power_well_ops,
1910 	},
1911 };
1912 
1913 static const struct i915_power_well_ops hsw_power_well_ops = {
1914 	.sync_hw = hsw_power_well_sync_hw,
1915 	.enable = hsw_power_well_enable,
1916 	.disable = hsw_power_well_disable,
1917 	.is_enabled = hsw_power_well_enabled,
1918 };
1919 
1920 static const struct i915_power_well_ops skl_power_well_ops = {
1921 	.sync_hw = skl_power_well_sync_hw,
1922 	.enable = skl_power_well_enable,
1923 	.disable = skl_power_well_disable,
1924 	.is_enabled = skl_power_well_enabled,
1925 };
1926 
1927 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1928 	.sync_hw = i9xx_power_well_sync_hw_noop,
1929 	.enable = gen9_dc_off_power_well_enable,
1930 	.disable = gen9_dc_off_power_well_disable,
1931 	.is_enabled = gen9_dc_off_power_well_enabled,
1932 };
1933 
1934 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
1935 	.sync_hw = i9xx_power_well_sync_hw_noop,
1936 	.enable = bxt_dpio_cmn_power_well_enable,
1937 	.disable = bxt_dpio_cmn_power_well_disable,
1938 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
1939 };
1940 
1941 static struct i915_power_well hsw_power_wells[] = {
1942 	{
1943 		.name = "always-on",
1944 		.always_on = 1,
1945 		.domains = POWER_DOMAIN_MASK,
1946 		.ops = &i9xx_always_on_power_well_ops,
1947 	},
1948 	{
1949 		.name = "display",
1950 		.domains = HSW_DISPLAY_POWER_DOMAINS,
1951 		.ops = &hsw_power_well_ops,
1952 	},
1953 };
1954 
1955 static struct i915_power_well bdw_power_wells[] = {
1956 	{
1957 		.name = "always-on",
1958 		.always_on = 1,
1959 		.domains = POWER_DOMAIN_MASK,
1960 		.ops = &i9xx_always_on_power_well_ops,
1961 	},
1962 	{
1963 		.name = "display",
1964 		.domains = BDW_DISPLAY_POWER_DOMAINS,
1965 		.ops = &hsw_power_well_ops,
1966 	},
1967 };
1968 
1969 static const struct i915_power_well_ops vlv_display_power_well_ops = {
1970 	.sync_hw = i9xx_power_well_sync_hw_noop,
1971 	.enable = vlv_display_power_well_enable,
1972 	.disable = vlv_display_power_well_disable,
1973 	.is_enabled = vlv_power_well_enabled,
1974 };
1975 
1976 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1977 	.sync_hw = i9xx_power_well_sync_hw_noop,
1978 	.enable = vlv_dpio_cmn_power_well_enable,
1979 	.disable = vlv_dpio_cmn_power_well_disable,
1980 	.is_enabled = vlv_power_well_enabled,
1981 };
1982 
1983 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1984 	.sync_hw = i9xx_power_well_sync_hw_noop,
1985 	.enable = vlv_power_well_enable,
1986 	.disable = vlv_power_well_disable,
1987 	.is_enabled = vlv_power_well_enabled,
1988 };
1989 
1990 static struct i915_power_well vlv_power_wells[] = {
1991 	{
1992 		.name = "always-on",
1993 		.always_on = 1,
1994 		.domains = POWER_DOMAIN_MASK,
1995 		.ops = &i9xx_always_on_power_well_ops,
1996 		.id = PUNIT_POWER_WELL_ALWAYS_ON,
1997 	},
1998 	{
1999 		.name = "display",
2000 		.domains = VLV_DISPLAY_POWER_DOMAINS,
2001 		.id = PUNIT_POWER_WELL_DISP2D,
2002 		.ops = &vlv_display_power_well_ops,
2003 	},
2004 	{
2005 		.name = "dpio-tx-b-01",
2006 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2007 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2008 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2009 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2010 		.ops = &vlv_dpio_power_well_ops,
2011 		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
2012 	},
2013 	{
2014 		.name = "dpio-tx-b-23",
2015 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2016 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2017 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2018 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2019 		.ops = &vlv_dpio_power_well_ops,
2020 		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
2021 	},
2022 	{
2023 		.name = "dpio-tx-c-01",
2024 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2025 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2026 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2027 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2028 		.ops = &vlv_dpio_power_well_ops,
2029 		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
2030 	},
2031 	{
2032 		.name = "dpio-tx-c-23",
2033 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2034 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2035 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2036 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2037 		.ops = &vlv_dpio_power_well_ops,
2038 		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
2039 	},
2040 	{
2041 		.name = "dpio-common",
2042 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2043 		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2044 		.ops = &vlv_dpio_cmn_power_well_ops,
2045 	},
2046 };
2047 
2048 static struct i915_power_well chv_power_wells[] = {
2049 	{
2050 		.name = "always-on",
2051 		.always_on = 1,
2052 		.domains = POWER_DOMAIN_MASK,
2053 		.ops = &i9xx_always_on_power_well_ops,
2054 	},
2055 	{
2056 		.name = "display",
2057 		/*
2058 		 * Pipe A power well is the new disp2d well. Pipe B and C
2059 		 * power wells don't actually exist. Pipe A power well is
2060 		 * required for any pipe to work.
2061 		 */
2062 		.domains = CHV_DISPLAY_POWER_DOMAINS,
2063 		.id = PIPE_A,
2064 		.ops = &chv_pipe_power_well_ops,
2065 	},
2066 	{
2067 		.name = "dpio-common-bc",
2068 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2069 		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2070 		.ops = &chv_dpio_cmn_power_well_ops,
2071 	},
2072 	{
2073 		.name = "dpio-common-d",
2074 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2075 		.id = PUNIT_POWER_WELL_DPIO_CMN_D,
2076 		.ops = &chv_dpio_cmn_power_well_ops,
2077 	},
2078 };
2079 
2080 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2081 				    int power_well_id)
2082 {
2083 	struct i915_power_well *power_well;
2084 	bool ret;
2085 
2086 	power_well = lookup_power_well(dev_priv, power_well_id);
2087 	ret = power_well->ops->is_enabled(dev_priv, power_well);
2088 
2089 	return ret;
2090 }
2091 
2092 static struct i915_power_well skl_power_wells[] = {
2093 	{
2094 		.name = "always-on",
2095 		.always_on = 1,
2096 		.domains = POWER_DOMAIN_MASK,
2097 		.ops = &i9xx_always_on_power_well_ops,
2098 		.id = SKL_DISP_PW_ALWAYS_ON,
2099 	},
2100 	{
2101 		.name = "power well 1",
2102 		/* Handled by the DMC firmware */
2103 		.domains = 0,
2104 		.ops = &skl_power_well_ops,
2105 		.id = SKL_DISP_PW_1,
2106 	},
2107 	{
2108 		.name = "MISC IO power well",
2109 		/* Handled by the DMC firmware */
2110 		.domains = 0,
2111 		.ops = &skl_power_well_ops,
2112 		.id = SKL_DISP_PW_MISC_IO,
2113 	},
2114 	{
2115 		.name = "DC off",
2116 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2117 		.ops = &gen9_dc_off_power_well_ops,
2118 		.id = SKL_DISP_PW_DC_OFF,
2119 	},
2120 	{
2121 		.name = "power well 2",
2122 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2123 		.ops = &skl_power_well_ops,
2124 		.id = SKL_DISP_PW_2,
2125 	},
2126 	{
2127 		.name = "DDI A/E IO power well",
2128 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2129 		.ops = &skl_power_well_ops,
2130 		.id = SKL_DISP_PW_DDI_A_E,
2131 	},
2132 	{
2133 		.name = "DDI B IO power well",
2134 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2135 		.ops = &skl_power_well_ops,
2136 		.id = SKL_DISP_PW_DDI_B,
2137 	},
2138 	{
2139 		.name = "DDI C IO power well",
2140 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2141 		.ops = &skl_power_well_ops,
2142 		.id = SKL_DISP_PW_DDI_C,
2143 	},
2144 	{
2145 		.name = "DDI D IO power well",
2146 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2147 		.ops = &skl_power_well_ops,
2148 		.id = SKL_DISP_PW_DDI_D,
2149 	},
2150 };
2151 
2152 static struct i915_power_well bxt_power_wells[] = {
2153 	{
2154 		.name = "always-on",
2155 		.always_on = 1,
2156 		.domains = POWER_DOMAIN_MASK,
2157 		.ops = &i9xx_always_on_power_well_ops,
2158 	},
2159 	{
2160 		.name = "power well 1",
2161 		.domains = 0,
2162 		.ops = &skl_power_well_ops,
2163 		.id = SKL_DISP_PW_1,
2164 	},
2165 	{
2166 		.name = "DC off",
2167 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2168 		.ops = &gen9_dc_off_power_well_ops,
2169 		.id = SKL_DISP_PW_DC_OFF,
2170 	},
2171 	{
2172 		.name = "power well 2",
2173 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2174 		.ops = &skl_power_well_ops,
2175 		.id = SKL_DISP_PW_2,
2176 	},
2177 	{
2178 		.name = "dpio-common-a",
2179 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2180 		.ops = &bxt_dpio_cmn_power_well_ops,
2181 		.id = BXT_DPIO_CMN_A,
2182 		.data = DPIO_PHY1,
2183 	},
2184 	{
2185 		.name = "dpio-common-bc",
2186 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2187 		.ops = &bxt_dpio_cmn_power_well_ops,
2188 		.id = BXT_DPIO_CMN_BC,
2189 		.data = DPIO_PHY0,
2190 	},
2191 };
2192 
2193 static struct i915_power_well glk_power_wells[] = {
2194 	{
2195 		.name = "always-on",
2196 		.always_on = 1,
2197 		.domains = POWER_DOMAIN_MASK,
2198 		.ops = &i9xx_always_on_power_well_ops,
2199 	},
2200 	{
2201 		.name = "power well 1",
2202 		/* Handled by the DMC firmware */
2203 		.domains = 0,
2204 		.ops = &skl_power_well_ops,
2205 		.id = SKL_DISP_PW_1,
2206 	},
2207 	{
2208 		.name = "DC off",
2209 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2210 		.ops = &gen9_dc_off_power_well_ops,
2211 		.id = SKL_DISP_PW_DC_OFF,
2212 	},
2213 	{
2214 		.name = "power well 2",
2215 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2216 		.ops = &skl_power_well_ops,
2217 		.id = SKL_DISP_PW_2,
2218 	},
2219 	{
2220 		.name = "dpio-common-a",
2221 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2222 		.ops = &bxt_dpio_cmn_power_well_ops,
2223 		.id = BXT_DPIO_CMN_A,
2224 		.data = DPIO_PHY1,
2225 	},
2226 	{
2227 		.name = "dpio-common-b",
2228 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2229 		.ops = &bxt_dpio_cmn_power_well_ops,
2230 		.id = BXT_DPIO_CMN_BC,
2231 		.data = DPIO_PHY0,
2232 	},
2233 	{
2234 		.name = "dpio-common-c",
2235 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2236 		.ops = &bxt_dpio_cmn_power_well_ops,
2237 		.id = GLK_DPIO_CMN_C,
2238 		.data = DPIO_PHY2,
2239 	},
2240 	{
2241 		.name = "AUX A",
2242 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2243 		.ops = &skl_power_well_ops,
2244 		.id = GLK_DISP_PW_AUX_A,
2245 	},
2246 	{
2247 		.name = "AUX B",
2248 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2249 		.ops = &skl_power_well_ops,
2250 		.id = GLK_DISP_PW_AUX_B,
2251 	},
2252 	{
2253 		.name = "AUX C",
2254 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2255 		.ops = &skl_power_well_ops,
2256 		.id = GLK_DISP_PW_AUX_C,
2257 	},
2258 	{
2259 		.name = "DDI A IO power well",
2260 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
2261 		.ops = &skl_power_well_ops,
2262 		.id = GLK_DISP_PW_DDI_A,
2263 	},
2264 	{
2265 		.name = "DDI B IO power well",
2266 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2267 		.ops = &skl_power_well_ops,
2268 		.id = SKL_DISP_PW_DDI_B,
2269 	},
2270 	{
2271 		.name = "DDI C IO power well",
2272 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2273 		.ops = &skl_power_well_ops,
2274 		.id = SKL_DISP_PW_DDI_C,
2275 	},
2276 };
2277 
2278 static int
2279 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2280 				   int disable_power_well)
2281 {
2282 	if (disable_power_well >= 0)
2283 		return !!disable_power_well;
2284 
2285 	return 1;
2286 }
2287 
2288 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2289 				    int enable_dc)
2290 {
2291 	uint32_t mask;
2292 	int requested_dc;
2293 	int max_dc;
2294 
2295 	if (IS_GEN9_BC(dev_priv)) {
2296 		max_dc = 2;
2297 		mask = 0;
2298 	} else if (IS_GEN9_LP(dev_priv)) {
2299 		max_dc = 1;
2300 		/*
2301 		 * DC9 has a separate HW flow from the rest of the DC states,
2302 		 * not depending on the DMC firmware. It's needed by system
2303 		 * suspend/resume, so allow it unconditionally.
2304 		 */
2305 		mask = DC_STATE_EN_DC9;
2306 	} else {
2307 		max_dc = 0;
2308 		mask = 0;
2309 	}
2310 
2311 	if (!i915.disable_power_well)
2312 		max_dc = 0;
2313 
2314 	if (enable_dc >= 0 && enable_dc <= max_dc) {
2315 		requested_dc = enable_dc;
2316 	} else if (enable_dc == -1) {
2317 		requested_dc = max_dc;
2318 	} else if (enable_dc > max_dc && enable_dc <= 2) {
2319 		DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
2320 			      enable_dc, max_dc);
2321 		requested_dc = max_dc;
2322 	} else {
2323 		DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
2324 		requested_dc = max_dc;
2325 	}
2326 
2327 	if (requested_dc > 1)
2328 		mask |= DC_STATE_EN_UPTO_DC6;
2329 	if (requested_dc > 0)
2330 		mask |= DC_STATE_EN_UPTO_DC5;
2331 
2332 	DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
2333 
2334 	return mask;
2335 }
2336 
2337 #define set_power_wells(power_domains, __power_wells) ({		\
2338 	(power_domains)->power_wells = (__power_wells);			\
2339 	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
2340 })
2341 
2342 /**
2343  * intel_power_domains_init - initializes the power domain structures
2344  * @dev_priv: i915 device instance
2345  *
2346  * Initializes the power domain structures for @dev_priv depending upon the
2347  * supported platform.
2348  */
2349 int intel_power_domains_init(struct drm_i915_private *dev_priv)
2350 {
2351 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2352 
2353 	i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
2354 						     i915.disable_power_well);
2355 	dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
2356 							    i915.enable_dc);
2357 
2358 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
2359 
2360 	mutex_init(&power_domains->lock);
2361 
2362 	/*
2363 	 * The enabling order will be from lower to higher indexed wells,
2364 	 * the disabling order is reversed.
2365 	 */
2366 	if (IS_HASWELL(dev_priv)) {
2367 		set_power_wells(power_domains, hsw_power_wells);
2368 	} else if (IS_BROADWELL(dev_priv)) {
2369 		set_power_wells(power_domains, bdw_power_wells);
2370 	} else if (IS_GEN9_BC(dev_priv)) {
2371 		set_power_wells(power_domains, skl_power_wells);
2372 	} else if (IS_BROXTON(dev_priv)) {
2373 		set_power_wells(power_domains, bxt_power_wells);
2374 	} else if (IS_GEMINILAKE(dev_priv)) {
2375 		set_power_wells(power_domains, glk_power_wells);
2376 	} else if (IS_CHERRYVIEW(dev_priv)) {
2377 		set_power_wells(power_domains, chv_power_wells);
2378 	} else if (IS_VALLEYVIEW(dev_priv)) {
2379 		set_power_wells(power_domains, vlv_power_wells);
2380 	} else {
2381 		set_power_wells(power_domains, i9xx_always_on_power_well);
2382 	}
2383 
2384 	return 0;
2385 }
2386 
2387 /**
2388  * intel_power_domains_fini - finalizes the power domain structures
2389  * @dev_priv: i915 device instance
2390  *
2391  * Finalizes the power domain structures for @dev_priv depending upon the
2392  * supported platform. This function also disables runtime pm and ensures that
2393  * the device stays powered up so that the driver can be reloaded.
2394  */
2395 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2396 {
2397 	struct device *kdev = &dev_priv->drm.pdev->dev;
2398 
2399 	/*
2400 	 * The i915.ko module is still not prepared to be loaded when
2401 	 * the power well is not enabled, so just enable it in case
2402 	 * we're going to unload/reload.
2403 	 * The following also reacquires the RPM reference the core passed
2404 	 * to the driver during loading, which is dropped in
2405 	 * intel_runtime_pm_enable(). We have to hand back the control of the
2406 	 * device to the core with this reference held.
2407 	 */
2408 	intel_display_set_init_power(dev_priv, true);
2409 
2410 	/* Remove the refcount we took to keep power well support disabled. */
2411 	if (!i915.disable_power_well)
2412 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2413 
2414 	/*
2415 	 * Remove the refcount we took in intel_runtime_pm_enable() in case
2416 	 * the platform doesn't support runtime PM.
2417 	 */
2418 	if (!HAS_RUNTIME_PM(dev_priv))
2419 		pm_runtime_put(kdev);
2420 }
2421 
2422 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2423 {
2424 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2425 	struct i915_power_well *power_well;
2426 
2427 	mutex_lock(&power_domains->lock);
2428 	for_each_power_well(dev_priv, power_well) {
2429 		power_well->ops->sync_hw(dev_priv, power_well);
2430 		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2431 								     power_well);
2432 	}
2433 	mutex_unlock(&power_domains->lock);
2434 }
2435 
2436 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2437 {
2438 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
2439 	POSTING_READ(DBUF_CTL);
2440 
2441 	udelay(10);
2442 
2443 	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
2444 		DRM_ERROR("DBuf power enable timeout\n");
2445 }
2446 
2447 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2448 {
2449 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
2450 	POSTING_READ(DBUF_CTL);
2451 
2452 	udelay(10);
2453 
2454 	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
2455 		DRM_ERROR("DBuf power disable timeout!\n");
2456 }
2457 
2458 static void skl_display_core_init(struct drm_i915_private *dev_priv,
2459 				   bool resume)
2460 {
2461 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2462 	struct i915_power_well *well;
2463 	uint32_t val;
2464 
2465 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2466 
2467 	/* enable PCH reset handshake */
2468 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
2469 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
2470 
2471 	/* enable PG1 and Misc I/O */
2472 	mutex_lock(&power_domains->lock);
2473 
2474 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2475 	intel_power_well_enable(dev_priv, well);
2476 
2477 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2478 	intel_power_well_enable(dev_priv, well);
2479 
2480 	mutex_unlock(&power_domains->lock);
2481 
2482 	skl_init_cdclk(dev_priv);
2483 
2484 	gen9_dbuf_enable(dev_priv);
2485 
2486 	if (resume && dev_priv->csr.dmc_payload)
2487 		intel_csr_load_program(dev_priv);
2488 }
2489 
2490 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2491 {
2492 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2493 	struct i915_power_well *well;
2494 
2495 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2496 
2497 	gen9_dbuf_disable(dev_priv);
2498 
2499 	skl_uninit_cdclk(dev_priv);
2500 
2501 	/* The spec doesn't call for removing the reset handshake flag */
2502 	/* disable PG1 and Misc I/O */
2503 
2504 	mutex_lock(&power_domains->lock);
2505 
2506 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2507 	intel_power_well_disable(dev_priv, well);
2508 
2509 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2510 	intel_power_well_disable(dev_priv, well);
2511 
2512 	mutex_unlock(&power_domains->lock);
2513 }
2514 
2515 void bxt_display_core_init(struct drm_i915_private *dev_priv,
2516 			   bool resume)
2517 {
2518 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2519 	struct i915_power_well *well;
2520 	uint32_t val;
2521 
2522 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2523 
2524 	/*
2525 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
2526 	 * or else the reset will hang because there is no PCH to respond.
2527 	 * Move the handshake programming to initialization sequence.
2528 	 * Previously was left up to BIOS.
2529 	 */
2530 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
2531 	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
2532 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2533 
2534 	/* Enable PG1 */
2535 	mutex_lock(&power_domains->lock);
2536 
2537 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2538 	intel_power_well_enable(dev_priv, well);
2539 
2540 	mutex_unlock(&power_domains->lock);
2541 
2542 	bxt_init_cdclk(dev_priv);
2543 
2544 	gen9_dbuf_enable(dev_priv);
2545 
2546 	if (resume && dev_priv->csr.dmc_payload)
2547 		intel_csr_load_program(dev_priv);
2548 }
2549 
2550 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2551 {
2552 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2553 	struct i915_power_well *well;
2554 
2555 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2556 
2557 	gen9_dbuf_disable(dev_priv);
2558 
2559 	bxt_uninit_cdclk(dev_priv);
2560 
2561 	/* The spec doesn't call for removing the reset handshake flag */
2562 
2563 	/* Disable PG1 */
2564 	mutex_lock(&power_domains->lock);
2565 
2566 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2567 	intel_power_well_disable(dev_priv, well);
2568 
2569 	mutex_unlock(&power_domains->lock);
2570 }
2571 
2572 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
2573 {
2574 	struct i915_power_well *cmn_bc =
2575 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2576 	struct i915_power_well *cmn_d =
2577 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
2578 
2579 	/*
2580 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
2581 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
2582 	 * instead maintain a shadow copy ourselves. Use the actual
2583 	 * power well state and lane status to reconstruct the
2584 	 * expected initial value.
2585 	 */
2586 	dev_priv->chv_phy_control =
2587 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
2588 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
2589 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
2590 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
2591 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
2592 
2593 	/*
2594 	 * If all lanes are disabled we leave the override disabled
2595 	 * with all power down bits cleared to match the state we
2596 	 * would use after disabling the port. Otherwise enable the
2597 	 * override and set the lane powerdown bits accding to the
2598 	 * current lane status.
2599 	 */
2600 	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
2601 		uint32_t status = I915_READ(DPLL(PIPE_A));
2602 		unsigned int mask;
2603 
2604 		mask = status & DPLL_PORTB_READY_MASK;
2605 		if (mask == 0xf)
2606 			mask = 0x0;
2607 		else
2608 			dev_priv->chv_phy_control |=
2609 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
2610 
2611 		dev_priv->chv_phy_control |=
2612 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
2613 
2614 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
2615 		if (mask == 0xf)
2616 			mask = 0x0;
2617 		else
2618 			dev_priv->chv_phy_control |=
2619 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
2620 
2621 		dev_priv->chv_phy_control |=
2622 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
2623 
2624 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
2625 
2626 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
2627 	} else {
2628 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
2629 	}
2630 
2631 	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
2632 		uint32_t status = I915_READ(DPIO_PHY_STATUS);
2633 		unsigned int mask;
2634 
2635 		mask = status & DPLL_PORTD_READY_MASK;
2636 
2637 		if (mask == 0xf)
2638 			mask = 0x0;
2639 		else
2640 			dev_priv->chv_phy_control |=
2641 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
2642 
2643 		dev_priv->chv_phy_control |=
2644 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
2645 
2646 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
2647 
2648 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
2649 	} else {
2650 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
2651 	}
2652 
2653 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2654 
2655 	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2656 		      dev_priv->chv_phy_control);
2657 }
2658 
2659 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2660 {
2661 	struct i915_power_well *cmn =
2662 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2663 	struct i915_power_well *disp2d =
2664 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
2665 
2666 	/* If the display might be already active skip this */
2667 	if (cmn->ops->is_enabled(dev_priv, cmn) &&
2668 	    disp2d->ops->is_enabled(dev_priv, disp2d) &&
2669 	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
2670 		return;
2671 
2672 	DRM_DEBUG_KMS("toggling display PHY side reset\n");
2673 
2674 	/* cmnlane needs DPLL registers */
2675 	disp2d->ops->enable(dev_priv, disp2d);
2676 
2677 	/*
2678 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
2679 	 * Need to assert and de-assert PHY SB reset by gating the
2680 	 * common lane power, then un-gating it.
2681 	 * Simply ungating isn't enough to reset the PHY enough to get
2682 	 * ports and lanes running.
2683 	 */
2684 	cmn->ops->disable(dev_priv, cmn);
2685 }
2686 
2687 /**
2688  * intel_power_domains_init_hw - initialize hardware power domain state
2689  * @dev_priv: i915 device instance
2690  * @resume: Called from resume code paths or not
2691  *
2692  * This function initializes the hardware power domain state and enables all
2693  * power wells belonging to the INIT power domain. Power wells in other
2694  * domains (and not in the INIT domain) are referenced or disabled during the
2695  * modeset state HW readout. After that the reference count of each power well
2696  * must match its HW enabled state, see intel_power_domains_verify_state().
2697  */
2698 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2699 {
2700 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2701 
2702 	power_domains->initializing = true;
2703 
2704 	if (IS_GEN9_BC(dev_priv)) {
2705 		skl_display_core_init(dev_priv, resume);
2706 	} else if (IS_GEN9_LP(dev_priv)) {
2707 		bxt_display_core_init(dev_priv, resume);
2708 	} else if (IS_CHERRYVIEW(dev_priv)) {
2709 		mutex_lock(&power_domains->lock);
2710 		chv_phy_control_init(dev_priv);
2711 		mutex_unlock(&power_domains->lock);
2712 	} else if (IS_VALLEYVIEW(dev_priv)) {
2713 		mutex_lock(&power_domains->lock);
2714 		vlv_cmnlane_wa(dev_priv);
2715 		mutex_unlock(&power_domains->lock);
2716 	}
2717 
2718 	/* For now, we need the power well to be always enabled. */
2719 	intel_display_set_init_power(dev_priv, true);
2720 	/* Disable power support if the user asked so. */
2721 	if (!i915.disable_power_well)
2722 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
2723 	intel_power_domains_sync_hw(dev_priv);
2724 	power_domains->initializing = false;
2725 }
2726 
2727 /**
2728  * intel_power_domains_suspend - suspend power domain state
2729  * @dev_priv: i915 device instance
2730  *
2731  * This function prepares the hardware power domain state before entering
2732  * system suspend. It must be paired with intel_power_domains_init_hw().
2733  */
2734 void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2735 {
2736 	/*
2737 	 * Even if power well support was disabled we still want to disable
2738 	 * power wells while we are system suspended.
2739 	 */
2740 	if (!i915.disable_power_well)
2741 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2742 
2743 	if (IS_GEN9_BC(dev_priv))
2744 		skl_display_core_uninit(dev_priv);
2745 	else if (IS_GEN9_LP(dev_priv))
2746 		bxt_display_core_uninit(dev_priv);
2747 }
2748 
2749 static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
2750 {
2751 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2752 	struct i915_power_well *power_well;
2753 
2754 	for_each_power_well(dev_priv, power_well) {
2755 		enum intel_display_power_domain domain;
2756 
2757 		DRM_DEBUG_DRIVER("%-25s %d\n",
2758 				 power_well->name, power_well->count);
2759 
2760 		for_each_power_domain(domain, power_well->domains)
2761 			DRM_DEBUG_DRIVER("  %-23s %d\n",
2762 					 intel_display_power_domain_str(domain),
2763 					 power_domains->domain_use_count[domain]);
2764 	}
2765 }
2766 
2767 /**
2768  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
2769  * @dev_priv: i915 device instance
2770  *
2771  * Verify if the reference count of each power well matches its HW enabled
2772  * state and the total refcount of the domains it belongs to. This must be
2773  * called after modeset HW state sanitization, which is responsible for
2774  * acquiring reference counts for any power wells in use and disabling the
2775  * ones left on by BIOS but not required by any active output.
2776  */
2777 void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
2778 {
2779 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2780 	struct i915_power_well *power_well;
2781 	bool dump_domain_info;
2782 
2783 	mutex_lock(&power_domains->lock);
2784 
2785 	dump_domain_info = false;
2786 	for_each_power_well(dev_priv, power_well) {
2787 		enum intel_display_power_domain domain;
2788 		int domains_count;
2789 		bool enabled;
2790 
2791 		/*
2792 		 * Power wells not belonging to any domain (like the MISC_IO
2793 		 * and PW1 power wells) are under FW control, so ignore them,
2794 		 * since their state can change asynchronously.
2795 		 */
2796 		if (!power_well->domains)
2797 			continue;
2798 
2799 		enabled = power_well->ops->is_enabled(dev_priv, power_well);
2800 		if ((power_well->count || power_well->always_on) != enabled)
2801 			DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
2802 				  power_well->name, power_well->count, enabled);
2803 
2804 		domains_count = 0;
2805 		for_each_power_domain(domain, power_well->domains)
2806 			domains_count += power_domains->domain_use_count[domain];
2807 
2808 		if (power_well->count != domains_count) {
2809 			DRM_ERROR("power well %s refcount/domain refcount mismatch "
2810 				  "(refcount %d/domains refcount %d)\n",
2811 				  power_well->name, power_well->count,
2812 				  domains_count);
2813 			dump_domain_info = true;
2814 		}
2815 	}
2816 
2817 	if (dump_domain_info) {
2818 		static bool dumped;
2819 
2820 		if (!dumped) {
2821 			intel_power_domains_dump_info(dev_priv);
2822 			dumped = true;
2823 		}
2824 	}
2825 
2826 	mutex_unlock(&power_domains->lock);
2827 }
2828 
2829 /**
2830  * intel_runtime_pm_get - grab a runtime pm reference
2831  * @dev_priv: i915 device instance
2832  *
2833  * This function grabs a device-level runtime pm reference (mostly used for GEM
2834  * code to ensure the GTT or GT is on) and ensures that it is powered up.
2835  *
2836  * Any runtime pm reference obtained by this function must have a symmetric
2837  * call to intel_runtime_pm_put() to release the reference again.
2838  */
2839 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2840 {
2841 	struct pci_dev *pdev = dev_priv->drm.pdev;
2842 	struct device *kdev = &pdev->dev;
2843 	int ret;
2844 
2845 	ret = pm_runtime_get_sync(kdev);
2846 	WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
2847 
2848 	atomic_inc(&dev_priv->pm.wakeref_count);
2849 	assert_rpm_wakelock_held(dev_priv);
2850 }
2851 
2852 /**
2853  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
2854  * @dev_priv: i915 device instance
2855  *
2856  * This function grabs a device-level runtime pm reference if the device is
2857  * already in use and ensures that it is powered up.
2858  *
2859  * Any runtime pm reference obtained by this function must have a symmetric
2860  * call to intel_runtime_pm_put() to release the reference again.
2861  */
2862 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2863 {
2864 	struct pci_dev *pdev = dev_priv->drm.pdev;
2865 	struct device *kdev = &pdev->dev;
2866 
2867 	if (IS_ENABLED(CONFIG_PM)) {
2868 		int ret = pm_runtime_get_if_in_use(kdev);
2869 
2870 		/*
2871 		 * In cases runtime PM is disabled by the RPM core and we get
2872 		 * an -EINVAL return value we are not supposed to call this
2873 		 * function, since the power state is undefined. This applies
2874 		 * atm to the late/early system suspend/resume handlers.
2875 		 */
2876 		WARN_ONCE(ret < 0,
2877 			  "pm_runtime_get_if_in_use() failed: %d\n", ret);
2878 		if (ret <= 0)
2879 			return false;
2880 	}
2881 
2882 	atomic_inc(&dev_priv->pm.wakeref_count);
2883 	assert_rpm_wakelock_held(dev_priv);
2884 
2885 	return true;
2886 }
2887 
2888 /**
2889  * intel_runtime_pm_get_noresume - grab a runtime pm reference
2890  * @dev_priv: i915 device instance
2891  *
2892  * This function grabs a device-level runtime pm reference (mostly used for GEM
2893  * code to ensure the GTT or GT is on).
2894  *
2895  * It will _not_ power up the device but instead only check that it's powered
2896  * on.  Therefore it is only valid to call this functions from contexts where
2897  * the device is known to be powered up and where trying to power it up would
2898  * result in hilarity and deadlocks. That pretty much means only the system
2899  * suspend/resume code where this is used to grab runtime pm references for
2900  * delayed setup down in work items.
2901  *
2902  * Any runtime pm reference obtained by this function must have a symmetric
2903  * call to intel_runtime_pm_put() to release the reference again.
2904  */
2905 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2906 {
2907 	struct pci_dev *pdev = dev_priv->drm.pdev;
2908 	struct device *kdev = &pdev->dev;
2909 
2910 	assert_rpm_wakelock_held(dev_priv);
2911 	pm_runtime_get_noresume(kdev);
2912 
2913 	atomic_inc(&dev_priv->pm.wakeref_count);
2914 }
2915 
2916 /**
2917  * intel_runtime_pm_put - release a runtime pm reference
2918  * @dev_priv: i915 device instance
2919  *
2920  * This function drops the device-level runtime pm reference obtained by
2921  * intel_runtime_pm_get() and might power down the corresponding
2922  * hardware block right away if this is the last reference.
2923  */
2924 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2925 {
2926 	struct pci_dev *pdev = dev_priv->drm.pdev;
2927 	struct device *kdev = &pdev->dev;
2928 
2929 	assert_rpm_wakelock_held(dev_priv);
2930 	atomic_dec(&dev_priv->pm.wakeref_count);
2931 
2932 	pm_runtime_mark_last_busy(kdev);
2933 	pm_runtime_put_autosuspend(kdev);
2934 }
2935 
2936 /**
2937  * intel_runtime_pm_enable - enable runtime pm
2938  * @dev_priv: i915 device instance
2939  *
2940  * This function enables runtime pm at the end of the driver load sequence.
2941  *
2942  * Note that this function does currently not enable runtime pm for the
2943  * subordinate display power domains. That is only done on the first modeset
2944  * using intel_display_set_init_power().
2945  */
2946 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2947 {
2948 	struct pci_dev *pdev = dev_priv->drm.pdev;
2949 	struct device *kdev = &pdev->dev;
2950 
2951 	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
2952 	pm_runtime_mark_last_busy(kdev);
2953 
2954 	/*
2955 	 * Take a permanent reference to disable the RPM functionality and drop
2956 	 * it only when unloading the driver. Use the low level get/put helpers,
2957 	 * so the driver's own RPM reference tracking asserts also work on
2958 	 * platforms without RPM support.
2959 	 */
2960 	if (!HAS_RUNTIME_PM(dev_priv)) {
2961 		int ret;
2962 
2963 		pm_runtime_dont_use_autosuspend(kdev);
2964 		ret = pm_runtime_get_sync(kdev);
2965 		WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
2966 	} else {
2967 		pm_runtime_use_autosuspend(kdev);
2968 	}
2969 
2970 	/*
2971 	 * The core calls the driver load handler with an RPM reference held.
2972 	 * We drop that here and will reacquire it during unloading in
2973 	 * intel_power_domains_fini().
2974 	 */
2975 	pm_runtime_put_autosuspend(kdev);
2976 }
2977