xref: /openbmc/linux/drivers/gpu/drm/i915/intel_runtime_pm.c (revision 530e7a660fb795452357b36cce26b839a9a187a9)
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28 
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31 
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34 
35 /**
36  * DOC: runtime pm
37  *
38  * The i915 driver supports dynamic enabling and disabling of entire hardware
39  * blocks at runtime. This is especially important on the display side where
40  * software is supposed to control many power gates manually on recent hardware,
41  * since on the GT side a lot of the power management is done by the hardware.
42  * But even there some manual control at the device level is required.
43  *
44  * Since i915 supports a diverse set of platforms with a unified codebase and
45  * hardware engineers just love to shuffle functionality around between power
46  * domains there's a sizeable amount of indirection required. This file provides
47  * generic functions to the driver for grabbing and releasing references for
48  * abstract power domains. It then maps those to the actual power wells
49  * present for a given platform.
50  */
51 
52 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
53 					 enum i915_power_well_id power_well_id);
54 
55 static struct i915_power_well *
56 lookup_power_well(struct drm_i915_private *dev_priv,
57 		  enum i915_power_well_id power_well_id);
58 
59 const char *
60 intel_display_power_domain_str(enum intel_display_power_domain domain)
61 {
62 	switch (domain) {
63 	case POWER_DOMAIN_PIPE_A:
64 		return "PIPE_A";
65 	case POWER_DOMAIN_PIPE_B:
66 		return "PIPE_B";
67 	case POWER_DOMAIN_PIPE_C:
68 		return "PIPE_C";
69 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
70 		return "PIPE_A_PANEL_FITTER";
71 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
72 		return "PIPE_B_PANEL_FITTER";
73 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
74 		return "PIPE_C_PANEL_FITTER";
75 	case POWER_DOMAIN_TRANSCODER_A:
76 		return "TRANSCODER_A";
77 	case POWER_DOMAIN_TRANSCODER_B:
78 		return "TRANSCODER_B";
79 	case POWER_DOMAIN_TRANSCODER_C:
80 		return "TRANSCODER_C";
81 	case POWER_DOMAIN_TRANSCODER_EDP:
82 		return "TRANSCODER_EDP";
83 	case POWER_DOMAIN_TRANSCODER_DSI_A:
84 		return "TRANSCODER_DSI_A";
85 	case POWER_DOMAIN_TRANSCODER_DSI_C:
86 		return "TRANSCODER_DSI_C";
87 	case POWER_DOMAIN_PORT_DDI_A_LANES:
88 		return "PORT_DDI_A_LANES";
89 	case POWER_DOMAIN_PORT_DDI_B_LANES:
90 		return "PORT_DDI_B_LANES";
91 	case POWER_DOMAIN_PORT_DDI_C_LANES:
92 		return "PORT_DDI_C_LANES";
93 	case POWER_DOMAIN_PORT_DDI_D_LANES:
94 		return "PORT_DDI_D_LANES";
95 	case POWER_DOMAIN_PORT_DDI_E_LANES:
96 		return "PORT_DDI_E_LANES";
97 	case POWER_DOMAIN_PORT_DDI_F_LANES:
98 		return "PORT_DDI_F_LANES";
99 	case POWER_DOMAIN_PORT_DDI_A_IO:
100 		return "PORT_DDI_A_IO";
101 	case POWER_DOMAIN_PORT_DDI_B_IO:
102 		return "PORT_DDI_B_IO";
103 	case POWER_DOMAIN_PORT_DDI_C_IO:
104 		return "PORT_DDI_C_IO";
105 	case POWER_DOMAIN_PORT_DDI_D_IO:
106 		return "PORT_DDI_D_IO";
107 	case POWER_DOMAIN_PORT_DDI_E_IO:
108 		return "PORT_DDI_E_IO";
109 	case POWER_DOMAIN_PORT_DDI_F_IO:
110 		return "PORT_DDI_F_IO";
111 	case POWER_DOMAIN_PORT_DSI:
112 		return "PORT_DSI";
113 	case POWER_DOMAIN_PORT_CRT:
114 		return "PORT_CRT";
115 	case POWER_DOMAIN_PORT_OTHER:
116 		return "PORT_OTHER";
117 	case POWER_DOMAIN_VGA:
118 		return "VGA";
119 	case POWER_DOMAIN_AUDIO:
120 		return "AUDIO";
121 	case POWER_DOMAIN_PLLS:
122 		return "PLLS";
123 	case POWER_DOMAIN_AUX_A:
124 		return "AUX_A";
125 	case POWER_DOMAIN_AUX_B:
126 		return "AUX_B";
127 	case POWER_DOMAIN_AUX_C:
128 		return "AUX_C";
129 	case POWER_DOMAIN_AUX_D:
130 		return "AUX_D";
131 	case POWER_DOMAIN_AUX_E:
132 		return "AUX_E";
133 	case POWER_DOMAIN_AUX_F:
134 		return "AUX_F";
135 	case POWER_DOMAIN_AUX_IO_A:
136 		return "AUX_IO_A";
137 	case POWER_DOMAIN_GMBUS:
138 		return "GMBUS";
139 	case POWER_DOMAIN_INIT:
140 		return "INIT";
141 	case POWER_DOMAIN_MODESET:
142 		return "MODESET";
143 	case POWER_DOMAIN_GT_IRQ:
144 		return "GT_IRQ";
145 	default:
146 		MISSING_CASE(domain);
147 		return "?";
148 	}
149 }
150 
151 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
152 				    struct i915_power_well *power_well)
153 {
154 	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
155 	power_well->ops->enable(dev_priv, power_well);
156 	power_well->hw_enabled = true;
157 }
158 
159 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
160 				     struct i915_power_well *power_well)
161 {
162 	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
163 	power_well->hw_enabled = false;
164 	power_well->ops->disable(dev_priv, power_well);
165 }
166 
167 static void intel_power_well_get(struct drm_i915_private *dev_priv,
168 				 struct i915_power_well *power_well)
169 {
170 	if (!power_well->count++)
171 		intel_power_well_enable(dev_priv, power_well);
172 }
173 
174 static void intel_power_well_put(struct drm_i915_private *dev_priv,
175 				 struct i915_power_well *power_well)
176 {
177 	WARN(!power_well->count, "Use count on power well %s is already zero",
178 	     power_well->name);
179 
180 	if (!--power_well->count)
181 		intel_power_well_disable(dev_priv, power_well);
182 }
183 
184 /**
185  * __intel_display_power_is_enabled - unlocked check for a power domain
186  * @dev_priv: i915 device instance
187  * @domain: power domain to check
188  *
189  * This is the unlocked version of intel_display_power_is_enabled() and should
190  * only be used from error capture and recovery code where deadlocks are
191  * possible.
192  *
193  * Returns:
194  * True when the power domain is enabled, false otherwise.
195  */
196 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
197 				      enum intel_display_power_domain domain)
198 {
199 	struct i915_power_well *power_well;
200 	bool is_enabled;
201 
202 	if (dev_priv->runtime_pm.suspended)
203 		return false;
204 
205 	is_enabled = true;
206 
207 	for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
208 		if (power_well->always_on)
209 			continue;
210 
211 		if (!power_well->hw_enabled) {
212 			is_enabled = false;
213 			break;
214 		}
215 	}
216 
217 	return is_enabled;
218 }
219 
220 /**
221  * intel_display_power_is_enabled - check for a power domain
222  * @dev_priv: i915 device instance
223  * @domain: power domain to check
224  *
225  * This function can be used to check the hw power domain state. It is mostly
226  * used in hardware state readout functions. Everywhere else code should rely
227  * upon explicit power domain reference counting to ensure that the hardware
228  * block is powered up before accessing it.
229  *
230  * Callers must hold the relevant modesetting locks to ensure that concurrent
231  * threads can't disable the power well while the caller tries to read a few
232  * registers.
233  *
234  * Returns:
235  * True when the power domain is enabled, false otherwise.
236  */
237 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
238 				    enum intel_display_power_domain domain)
239 {
240 	struct i915_power_domains *power_domains;
241 	bool ret;
242 
243 	power_domains = &dev_priv->power_domains;
244 
245 	mutex_lock(&power_domains->lock);
246 	ret = __intel_display_power_is_enabled(dev_priv, domain);
247 	mutex_unlock(&power_domains->lock);
248 
249 	return ret;
250 }
251 
252 /**
253  * intel_display_set_init_power - set the initial power domain state
254  * @dev_priv: i915 device instance
255  * @enable: whether to enable or disable the initial power domain state
256  *
257  * For simplicity our driver load/unload and system suspend/resume code assumes
258  * that all power domains are always enabled. This functions controls the state
259  * of this little hack. While the initial power domain state is enabled runtime
260  * pm is effectively disabled.
261  */
262 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
263 				  bool enable)
264 {
265 	if (dev_priv->power_domains.init_power_on == enable)
266 		return;
267 
268 	if (enable)
269 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
270 	else
271 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
272 
273 	dev_priv->power_domains.init_power_on = enable;
274 }
275 
276 /*
277  * Starting with Haswell, we have a "Power Down Well" that can be turned off
278  * when not needed anymore. We have 4 registers that can request the power well
279  * to be enabled, and it will only be disabled if none of the registers is
280  * requesting it to be enabled.
281  */
282 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
283 				       u8 irq_pipe_mask, bool has_vga)
284 {
285 	struct pci_dev *pdev = dev_priv->drm.pdev;
286 
287 	/*
288 	 * After we re-enable the power well, if we touch VGA register 0x3d5
289 	 * we'll get unclaimed register interrupts. This stops after we write
290 	 * anything to the VGA MSR register. The vgacon module uses this
291 	 * register all the time, so if we unbind our driver and, as a
292 	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
293 	 * console_unlock(). So make here we touch the VGA MSR register, making
294 	 * sure vgacon can keep working normally without triggering interrupts
295 	 * and error messages.
296 	 */
297 	if (has_vga) {
298 		vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
299 		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
300 		vga_put(pdev, VGA_RSRC_LEGACY_IO);
301 	}
302 
303 	if (irq_pipe_mask)
304 		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
305 }
306 
307 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
308 				       u8 irq_pipe_mask)
309 {
310 	if (irq_pipe_mask)
311 		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
312 }
313 
314 
315 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
316 					   struct i915_power_well *power_well)
317 {
318 	enum i915_power_well_id id = power_well->id;
319 
320 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
321 	WARN_ON(intel_wait_for_register(dev_priv,
322 					HSW_PWR_WELL_CTL_DRIVER(id),
323 					HSW_PWR_WELL_CTL_STATE(id),
324 					HSW_PWR_WELL_CTL_STATE(id),
325 					1));
326 }
327 
328 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
329 				     enum i915_power_well_id id)
330 {
331 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(id);
332 	u32 ret;
333 
334 	ret = I915_READ(HSW_PWR_WELL_CTL_BIOS(id)) & req_mask ? 1 : 0;
335 	ret |= I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & req_mask ? 2 : 0;
336 	ret |= I915_READ(HSW_PWR_WELL_CTL_KVMR) & req_mask ? 4 : 0;
337 	ret |= I915_READ(HSW_PWR_WELL_CTL_DEBUG(id)) & req_mask ? 8 : 0;
338 
339 	return ret;
340 }
341 
342 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
343 					    struct i915_power_well *power_well)
344 {
345 	enum i915_power_well_id id = power_well->id;
346 	bool disabled;
347 	u32 reqs;
348 
349 	/*
350 	 * Bspec doesn't require waiting for PWs to get disabled, but still do
351 	 * this for paranoia. The known cases where a PW will be forced on:
352 	 * - a KVMR request on any power well via the KVMR request register
353 	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
354 	 *   DEBUG request registers
355 	 * Skip the wait in case any of the request bits are set and print a
356 	 * diagnostic message.
357 	 */
358 	wait_for((disabled = !(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
359 			       HSW_PWR_WELL_CTL_STATE(id))) ||
360 		 (reqs = hsw_power_well_requesters(dev_priv, id)), 1);
361 	if (disabled)
362 		return;
363 
364 	DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
365 		      power_well->name,
366 		      !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
367 }
368 
369 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
370 					   enum skl_power_gate pg)
371 {
372 	/* Timeout 5us for PG#0, for other PGs 1us */
373 	WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
374 					SKL_FUSE_PG_DIST_STATUS(pg),
375 					SKL_FUSE_PG_DIST_STATUS(pg), 1));
376 }
377 
378 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
379 				  struct i915_power_well *power_well)
380 {
381 	enum i915_power_well_id id = power_well->id;
382 	bool wait_fuses = power_well->hsw.has_fuses;
383 	enum skl_power_gate uninitialized_var(pg);
384 	u32 val;
385 
386 	if (wait_fuses) {
387 		pg = SKL_PW_TO_PG(id);
388 		/*
389 		 * For PW1 we have to wait both for the PW0/PG0 fuse state
390 		 * before enabling the power well and PW1/PG1's own fuse
391 		 * state after the enabling. For all other power wells with
392 		 * fuses we only have to wait for that PW/PG's fuse state
393 		 * after the enabling.
394 		 */
395 		if (pg == SKL_PG1)
396 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
397 	}
398 
399 	val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
400 	I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
401 	hsw_wait_for_power_well_enable(dev_priv, power_well);
402 
403 	/* Display WA #1178: cnl */
404 	if (IS_CANNONLAKE(dev_priv) &&
405 	    (id == CNL_DISP_PW_AUX_B || id == CNL_DISP_PW_AUX_C ||
406 	     id == CNL_DISP_PW_AUX_D || id == CNL_DISP_PW_AUX_F)) {
407 		val = I915_READ(CNL_AUX_ANAOVRD1(id));
408 		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
409 		I915_WRITE(CNL_AUX_ANAOVRD1(id), val);
410 	}
411 
412 	if (wait_fuses)
413 		gen9_wait_for_power_well_fuses(dev_priv, pg);
414 
415 	hsw_power_well_post_enable(dev_priv, power_well->hsw.irq_pipe_mask,
416 				   power_well->hsw.has_vga);
417 }
418 
419 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
420 				   struct i915_power_well *power_well)
421 {
422 	enum i915_power_well_id id = power_well->id;
423 	u32 val;
424 
425 	hsw_power_well_pre_disable(dev_priv, power_well->hsw.irq_pipe_mask);
426 
427 	val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
428 	I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
429 		   val & ~HSW_PWR_WELL_CTL_REQ(id));
430 	hsw_wait_for_power_well_disable(dev_priv, power_well);
431 }
432 
433 /*
434  * We should only use the power well if we explicitly asked the hardware to
435  * enable it, so check if it's enabled and also check if we've requested it to
436  * be enabled.
437  */
438 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
439 				   struct i915_power_well *power_well)
440 {
441 	enum i915_power_well_id id = power_well->id;
442 	u32 mask = HSW_PWR_WELL_CTL_REQ(id) | HSW_PWR_WELL_CTL_STATE(id);
443 
444 	return (I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & mask) == mask;
445 }
446 
447 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
448 {
449 	enum i915_power_well_id id = SKL_DISP_PW_2;
450 
451 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
452 		  "DC9 already programmed to be enabled.\n");
453 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
454 		  "DC5 still not disabled to enable DC9.\n");
455 	WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
456 		  HSW_PWR_WELL_CTL_REQ(id),
457 		  "Power well 2 on.\n");
458 	WARN_ONCE(intel_irqs_enabled(dev_priv),
459 		  "Interrupts not disabled yet.\n");
460 
461 	 /*
462 	  * TODO: check for the following to verify the conditions to enter DC9
463 	  * state are satisfied:
464 	  * 1] Check relevant display engine registers to verify if mode set
465 	  * disable sequence was followed.
466 	  * 2] Check if display uninitialize sequence is initialized.
467 	  */
468 }
469 
470 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
471 {
472 	WARN_ONCE(intel_irqs_enabled(dev_priv),
473 		  "Interrupts not disabled yet.\n");
474 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
475 		  "DC5 still not disabled.\n");
476 
477 	 /*
478 	  * TODO: check for the following to verify DC9 state was indeed
479 	  * entered before programming to disable it:
480 	  * 1] Check relevant display engine registers to verify if mode
481 	  *  set disable sequence was followed.
482 	  * 2] Check if display uninitialize sequence is initialized.
483 	  */
484 }
485 
486 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
487 				u32 state)
488 {
489 	int rewrites = 0;
490 	int rereads = 0;
491 	u32 v;
492 
493 	I915_WRITE(DC_STATE_EN, state);
494 
495 	/* It has been observed that disabling the dc6 state sometimes
496 	 * doesn't stick and dmc keeps returning old value. Make sure
497 	 * the write really sticks enough times and also force rewrite until
498 	 * we are confident that state is exactly what we want.
499 	 */
500 	do  {
501 		v = I915_READ(DC_STATE_EN);
502 
503 		if (v != state) {
504 			I915_WRITE(DC_STATE_EN, state);
505 			rewrites++;
506 			rereads = 0;
507 		} else if (rereads++ > 5) {
508 			break;
509 		}
510 
511 	} while (rewrites < 100);
512 
513 	if (v != state)
514 		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
515 			  state, v);
516 
517 	/* Most of the times we need one retry, avoid spam */
518 	if (rewrites > 1)
519 		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
520 			      state, rewrites);
521 }
522 
523 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
524 {
525 	u32 mask;
526 
527 	mask = DC_STATE_EN_UPTO_DC5;
528 	if (IS_GEN9_LP(dev_priv))
529 		mask |= DC_STATE_EN_DC9;
530 	else
531 		mask |= DC_STATE_EN_UPTO_DC6;
532 
533 	return mask;
534 }
535 
536 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
537 {
538 	u32 val;
539 
540 	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
541 
542 	DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
543 		      dev_priv->csr.dc_state, val);
544 	dev_priv->csr.dc_state = val;
545 }
546 
547 /**
548  * gen9_set_dc_state - set target display C power state
549  * @dev_priv: i915 device instance
550  * @state: target DC power state
551  * - DC_STATE_DISABLE
552  * - DC_STATE_EN_UPTO_DC5
553  * - DC_STATE_EN_UPTO_DC6
554  * - DC_STATE_EN_DC9
555  *
556  * Signal to DMC firmware/HW the target DC power state passed in @state.
557  * DMC/HW can turn off individual display clocks and power rails when entering
558  * a deeper DC power state (higher in number) and turns these back when exiting
559  * that state to a shallower power state (lower in number). The HW will decide
560  * when to actually enter a given state on an on-demand basis, for instance
561  * depending on the active state of display pipes. The state of display
562  * registers backed by affected power rails are saved/restored as needed.
563  *
564  * Based on the above enabling a deeper DC power state is asynchronous wrt.
565  * enabling it. Disabling a deeper power state is synchronous: for instance
566  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
567  * back on and register state is restored. This is guaranteed by the MMIO write
568  * to DC_STATE_EN blocking until the state is restored.
569  */
570 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
571 {
572 	uint32_t val;
573 	uint32_t mask;
574 
575 	if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
576 		state &= dev_priv->csr.allowed_dc_mask;
577 
578 	val = I915_READ(DC_STATE_EN);
579 	mask = gen9_dc_mask(dev_priv);
580 	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
581 		      val & mask, state);
582 
583 	/* Check if DMC is ignoring our DC state requests */
584 	if ((val & mask) != dev_priv->csr.dc_state)
585 		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
586 			  dev_priv->csr.dc_state, val & mask);
587 
588 	val &= ~mask;
589 	val |= state;
590 
591 	gen9_write_dc_state(dev_priv, val);
592 
593 	dev_priv->csr.dc_state = val & mask;
594 }
595 
596 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
597 {
598 	assert_can_enable_dc9(dev_priv);
599 
600 	DRM_DEBUG_KMS("Enabling DC9\n");
601 
602 	intel_power_sequencer_reset(dev_priv);
603 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
604 }
605 
606 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
607 {
608 	assert_can_disable_dc9(dev_priv);
609 
610 	DRM_DEBUG_KMS("Disabling DC9\n");
611 
612 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
613 
614 	intel_pps_unlock_regs_wa(dev_priv);
615 }
616 
617 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
618 {
619 	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
620 		  "CSR program storage start is NULL\n");
621 	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
622 	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
623 }
624 
625 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
626 {
627 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
628 					SKL_DISP_PW_2);
629 
630 	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
631 
632 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
633 		  "DC5 already programmed to be enabled.\n");
634 	assert_rpm_wakelock_held(dev_priv);
635 
636 	assert_csr_loaded(dev_priv);
637 }
638 
639 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
640 {
641 	assert_can_enable_dc5(dev_priv);
642 
643 	DRM_DEBUG_KMS("Enabling DC5\n");
644 
645 	/* Wa Display #1183: skl,kbl,cfl */
646 	if (IS_GEN9_BC(dev_priv))
647 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
648 			   SKL_SELECT_ALTERNATE_DC_EXIT);
649 
650 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
651 }
652 
653 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
654 {
655 	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
656 		  "Backlight is not disabled.\n");
657 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
658 		  "DC6 already programmed to be enabled.\n");
659 
660 	assert_csr_loaded(dev_priv);
661 }
662 
663 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
664 {
665 	assert_can_enable_dc6(dev_priv);
666 
667 	DRM_DEBUG_KMS("Enabling DC6\n");
668 
669 	/* Wa Display #1183: skl,kbl,cfl */
670 	if (IS_GEN9_BC(dev_priv))
671 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
672 			   SKL_SELECT_ALTERNATE_DC_EXIT);
673 
674 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
675 }
676 
677 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
678 				   struct i915_power_well *power_well)
679 {
680 	enum i915_power_well_id id = power_well->id;
681 	u32 mask = HSW_PWR_WELL_CTL_REQ(id);
682 	u32 bios_req = I915_READ(HSW_PWR_WELL_CTL_BIOS(id));
683 
684 	/* Take over the request bit if set by BIOS. */
685 	if (bios_req & mask) {
686 		u32 drv_req = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
687 
688 		if (!(drv_req & mask))
689 			I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), drv_req | mask);
690 		I915_WRITE(HSW_PWR_WELL_CTL_BIOS(id), bios_req & ~mask);
691 	}
692 }
693 
694 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
695 					   struct i915_power_well *power_well)
696 {
697 	bxt_ddi_phy_init(dev_priv, power_well->bxt.phy);
698 }
699 
700 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
701 					    struct i915_power_well *power_well)
702 {
703 	bxt_ddi_phy_uninit(dev_priv, power_well->bxt.phy);
704 }
705 
706 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
707 					    struct i915_power_well *power_well)
708 {
709 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->bxt.phy);
710 }
711 
712 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
713 {
714 	struct i915_power_well *power_well;
715 
716 	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
717 	if (power_well->count > 0)
718 		bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
719 
720 	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
721 	if (power_well->count > 0)
722 		bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
723 
724 	if (IS_GEMINILAKE(dev_priv)) {
725 		power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
726 		if (power_well->count > 0)
727 			bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
728 	}
729 }
730 
731 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
732 					   struct i915_power_well *power_well)
733 {
734 	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
735 }
736 
737 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
738 {
739 	u32 tmp = I915_READ(DBUF_CTL);
740 
741 	WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
742 	     (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
743 	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
744 }
745 
746 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
747 					  struct i915_power_well *power_well)
748 {
749 	struct intel_cdclk_state cdclk_state = {};
750 
751 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
752 
753 	dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
754 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
755 	WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
756 
757 	gen9_assert_dbuf_enabled(dev_priv);
758 
759 	if (IS_GEN9_LP(dev_priv))
760 		bxt_verify_ddi_phy_power_wells(dev_priv);
761 }
762 
763 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
764 					   struct i915_power_well *power_well)
765 {
766 	if (!dev_priv->csr.dmc_payload)
767 		return;
768 
769 	if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
770 		skl_enable_dc6(dev_priv);
771 	else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
772 		gen9_enable_dc5(dev_priv);
773 }
774 
775 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
776 					 struct i915_power_well *power_well)
777 {
778 }
779 
780 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
781 					   struct i915_power_well *power_well)
782 {
783 }
784 
785 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
786 					     struct i915_power_well *power_well)
787 {
788 	return true;
789 }
790 
791 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
792 					 struct i915_power_well *power_well)
793 {
794 	if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
795 		i830_enable_pipe(dev_priv, PIPE_A);
796 	if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
797 		i830_enable_pipe(dev_priv, PIPE_B);
798 }
799 
800 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
801 					  struct i915_power_well *power_well)
802 {
803 	i830_disable_pipe(dev_priv, PIPE_B);
804 	i830_disable_pipe(dev_priv, PIPE_A);
805 }
806 
807 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
808 					  struct i915_power_well *power_well)
809 {
810 	return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
811 		I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
812 }
813 
814 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
815 					  struct i915_power_well *power_well)
816 {
817 	if (power_well->count > 0)
818 		i830_pipes_power_well_enable(dev_priv, power_well);
819 	else
820 		i830_pipes_power_well_disable(dev_priv, power_well);
821 }
822 
823 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
824 			       struct i915_power_well *power_well, bool enable)
825 {
826 	enum i915_power_well_id power_well_id = power_well->id;
827 	u32 mask;
828 	u32 state;
829 	u32 ctrl;
830 
831 	mask = PUNIT_PWRGT_MASK(power_well_id);
832 	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
833 			 PUNIT_PWRGT_PWR_GATE(power_well_id);
834 
835 	mutex_lock(&dev_priv->pcu_lock);
836 
837 #define COND \
838 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
839 
840 	if (COND)
841 		goto out;
842 
843 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
844 	ctrl &= ~mask;
845 	ctrl |= state;
846 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
847 
848 	if (wait_for(COND, 100))
849 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
850 			  state,
851 			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
852 
853 #undef COND
854 
855 out:
856 	mutex_unlock(&dev_priv->pcu_lock);
857 }
858 
859 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
860 				  struct i915_power_well *power_well)
861 {
862 	vlv_set_power_well(dev_priv, power_well, true);
863 }
864 
865 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
866 				   struct i915_power_well *power_well)
867 {
868 	vlv_set_power_well(dev_priv, power_well, false);
869 }
870 
871 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
872 				   struct i915_power_well *power_well)
873 {
874 	enum i915_power_well_id power_well_id = power_well->id;
875 	bool enabled = false;
876 	u32 mask;
877 	u32 state;
878 	u32 ctrl;
879 
880 	mask = PUNIT_PWRGT_MASK(power_well_id);
881 	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
882 
883 	mutex_lock(&dev_priv->pcu_lock);
884 
885 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
886 	/*
887 	 * We only ever set the power-on and power-gate states, anything
888 	 * else is unexpected.
889 	 */
890 	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
891 		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
892 	if (state == ctrl)
893 		enabled = true;
894 
895 	/*
896 	 * A transient state at this point would mean some unexpected party
897 	 * is poking at the power controls too.
898 	 */
899 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
900 	WARN_ON(ctrl != state);
901 
902 	mutex_unlock(&dev_priv->pcu_lock);
903 
904 	return enabled;
905 }
906 
907 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
908 {
909 	u32 val;
910 
911 	/*
912 	 * On driver load, a pipe may be active and driving a DSI display.
913 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
914 	 * (and never recovering) in this case. intel_dsi_post_disable() will
915 	 * clear it when we turn off the display.
916 	 */
917 	val = I915_READ(DSPCLK_GATE_D);
918 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
919 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
920 	I915_WRITE(DSPCLK_GATE_D, val);
921 
922 	/*
923 	 * Disable trickle feed and enable pnd deadline calculation
924 	 */
925 	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
926 	I915_WRITE(CBR1_VLV, 0);
927 
928 	WARN_ON(dev_priv->rawclk_freq == 0);
929 
930 	I915_WRITE(RAWCLK_FREQ_VLV,
931 		   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
932 }
933 
934 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
935 {
936 	struct intel_encoder *encoder;
937 	enum pipe pipe;
938 
939 	/*
940 	 * Enable the CRI clock source so we can get at the
941 	 * display and the reference clock for VGA
942 	 * hotplug / manual detection. Supposedly DSI also
943 	 * needs the ref clock up and running.
944 	 *
945 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
946 	 */
947 	for_each_pipe(dev_priv, pipe) {
948 		u32 val = I915_READ(DPLL(pipe));
949 
950 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
951 		if (pipe != PIPE_A)
952 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
953 
954 		I915_WRITE(DPLL(pipe), val);
955 	}
956 
957 	vlv_init_display_clock_gating(dev_priv);
958 
959 	spin_lock_irq(&dev_priv->irq_lock);
960 	valleyview_enable_display_irqs(dev_priv);
961 	spin_unlock_irq(&dev_priv->irq_lock);
962 
963 	/*
964 	 * During driver initialization/resume we can avoid restoring the
965 	 * part of the HW/SW state that will be inited anyway explicitly.
966 	 */
967 	if (dev_priv->power_domains.initializing)
968 		return;
969 
970 	intel_hpd_init(dev_priv);
971 
972 	/* Re-enable the ADPA, if we have one */
973 	for_each_intel_encoder(&dev_priv->drm, encoder) {
974 		if (encoder->type == INTEL_OUTPUT_ANALOG)
975 			intel_crt_reset(&encoder->base);
976 	}
977 
978 	i915_redisable_vga_power_on(dev_priv);
979 
980 	intel_pps_unlock_regs_wa(dev_priv);
981 }
982 
983 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
984 {
985 	spin_lock_irq(&dev_priv->irq_lock);
986 	valleyview_disable_display_irqs(dev_priv);
987 	spin_unlock_irq(&dev_priv->irq_lock);
988 
989 	/* make sure we're done processing display irqs */
990 	synchronize_irq(dev_priv->drm.irq);
991 
992 	intel_power_sequencer_reset(dev_priv);
993 
994 	/* Prevent us from re-enabling polling on accident in late suspend */
995 	if (!dev_priv->drm.dev->power.is_suspended)
996 		intel_hpd_poll_init(dev_priv);
997 }
998 
999 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1000 					  struct i915_power_well *power_well)
1001 {
1002 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1003 
1004 	vlv_set_power_well(dev_priv, power_well, true);
1005 
1006 	vlv_display_power_well_init(dev_priv);
1007 }
1008 
1009 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1010 					   struct i915_power_well *power_well)
1011 {
1012 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1013 
1014 	vlv_display_power_well_deinit(dev_priv);
1015 
1016 	vlv_set_power_well(dev_priv, power_well, false);
1017 }
1018 
1019 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1020 					   struct i915_power_well *power_well)
1021 {
1022 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1023 
1024 	/* since ref/cri clock was enabled */
1025 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1026 
1027 	vlv_set_power_well(dev_priv, power_well, true);
1028 
1029 	/*
1030 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1031 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1032 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1033 	 *   b.	The other bits such as sfr settings / modesel may all
1034 	 *	be set to 0.
1035 	 *
1036 	 * This should only be done on init and resume from S3 with
1037 	 * both PLLs disabled, or we risk losing DPIO and PLL
1038 	 * synchronization.
1039 	 */
1040 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1041 }
1042 
1043 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1044 					    struct i915_power_well *power_well)
1045 {
1046 	enum pipe pipe;
1047 
1048 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1049 
1050 	for_each_pipe(dev_priv, pipe)
1051 		assert_pll_disabled(dev_priv, pipe);
1052 
1053 	/* Assert common reset */
1054 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1055 
1056 	vlv_set_power_well(dev_priv, power_well, false);
1057 }
1058 
1059 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1060 
1061 static struct i915_power_well *
1062 lookup_power_well(struct drm_i915_private *dev_priv,
1063 		  enum i915_power_well_id power_well_id)
1064 {
1065 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1066 	int i;
1067 
1068 	for (i = 0; i < power_domains->power_well_count; i++) {
1069 		struct i915_power_well *power_well;
1070 
1071 		power_well = &power_domains->power_wells[i];
1072 		if (power_well->id == power_well_id)
1073 			return power_well;
1074 	}
1075 
1076 	return NULL;
1077 }
1078 
1079 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1080 
1081 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1082 {
1083 	struct i915_power_well *cmn_bc =
1084 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1085 	struct i915_power_well *cmn_d =
1086 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1087 	u32 phy_control = dev_priv->chv_phy_control;
1088 	u32 phy_status = 0;
1089 	u32 phy_status_mask = 0xffffffff;
1090 
1091 	/*
1092 	 * The BIOS can leave the PHY is some weird state
1093 	 * where it doesn't fully power down some parts.
1094 	 * Disable the asserts until the PHY has been fully
1095 	 * reset (ie. the power well has been disabled at
1096 	 * least once).
1097 	 */
1098 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1099 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1100 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1101 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1102 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1103 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1104 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1105 
1106 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1107 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1108 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1109 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1110 
1111 	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1112 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1113 
1114 		/* this assumes override is only used to enable lanes */
1115 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1116 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1117 
1118 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1119 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1120 
1121 		/* CL1 is on whenever anything is on in either channel */
1122 		if (BITS_SET(phy_control,
1123 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1124 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1125 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1126 
1127 		/*
1128 		 * The DPLLB check accounts for the pipe B + port A usage
1129 		 * with CL2 powered up but all the lanes in the second channel
1130 		 * powered down.
1131 		 */
1132 		if (BITS_SET(phy_control,
1133 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1134 		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1135 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1136 
1137 		if (BITS_SET(phy_control,
1138 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1139 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1140 		if (BITS_SET(phy_control,
1141 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1142 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1143 
1144 		if (BITS_SET(phy_control,
1145 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1146 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1147 		if (BITS_SET(phy_control,
1148 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1149 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1150 	}
1151 
1152 	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1153 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1154 
1155 		/* this assumes override is only used to enable lanes */
1156 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1157 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1158 
1159 		if (BITS_SET(phy_control,
1160 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1161 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1162 
1163 		if (BITS_SET(phy_control,
1164 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1165 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1166 		if (BITS_SET(phy_control,
1167 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1168 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1169 	}
1170 
1171 	phy_status &= phy_status_mask;
1172 
1173 	/*
1174 	 * The PHY may be busy with some initial calibration and whatnot,
1175 	 * so the power state can take a while to actually change.
1176 	 */
1177 	if (intel_wait_for_register(dev_priv,
1178 				    DISPLAY_PHY_STATUS,
1179 				    phy_status_mask,
1180 				    phy_status,
1181 				    10))
1182 		DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1183 			  I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1184 			   phy_status, dev_priv->chv_phy_control);
1185 }
1186 
1187 #undef BITS_SET
1188 
1189 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1190 					   struct i915_power_well *power_well)
1191 {
1192 	enum dpio_phy phy;
1193 	enum pipe pipe;
1194 	uint32_t tmp;
1195 
1196 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1197 		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1198 
1199 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1200 		pipe = PIPE_A;
1201 		phy = DPIO_PHY0;
1202 	} else {
1203 		pipe = PIPE_C;
1204 		phy = DPIO_PHY1;
1205 	}
1206 
1207 	/* since ref/cri clock was enabled */
1208 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1209 	vlv_set_power_well(dev_priv, power_well, true);
1210 
1211 	/* Poll for phypwrgood signal */
1212 	if (intel_wait_for_register(dev_priv,
1213 				    DISPLAY_PHY_STATUS,
1214 				    PHY_POWERGOOD(phy),
1215 				    PHY_POWERGOOD(phy),
1216 				    1))
1217 		DRM_ERROR("Display PHY %d is not power up\n", phy);
1218 
1219 	mutex_lock(&dev_priv->sb_lock);
1220 
1221 	/* Enable dynamic power down */
1222 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1223 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1224 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1225 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1226 
1227 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1228 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1229 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1230 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1231 	} else {
1232 		/*
1233 		 * Force the non-existing CL2 off. BXT does this
1234 		 * too, so maybe it saves some power even though
1235 		 * CL2 doesn't exist?
1236 		 */
1237 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1238 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1239 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1240 	}
1241 
1242 	mutex_unlock(&dev_priv->sb_lock);
1243 
1244 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1245 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1246 
1247 	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1248 		      phy, dev_priv->chv_phy_control);
1249 
1250 	assert_chv_phy_status(dev_priv);
1251 }
1252 
1253 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1254 					    struct i915_power_well *power_well)
1255 {
1256 	enum dpio_phy phy;
1257 
1258 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1259 		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1260 
1261 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1262 		phy = DPIO_PHY0;
1263 		assert_pll_disabled(dev_priv, PIPE_A);
1264 		assert_pll_disabled(dev_priv, PIPE_B);
1265 	} else {
1266 		phy = DPIO_PHY1;
1267 		assert_pll_disabled(dev_priv, PIPE_C);
1268 	}
1269 
1270 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1271 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1272 
1273 	vlv_set_power_well(dev_priv, power_well, false);
1274 
1275 	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1276 		      phy, dev_priv->chv_phy_control);
1277 
1278 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1279 	dev_priv->chv_phy_assert[phy] = true;
1280 
1281 	assert_chv_phy_status(dev_priv);
1282 }
1283 
1284 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1285 				     enum dpio_channel ch, bool override, unsigned int mask)
1286 {
1287 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1288 	u32 reg, val, expected, actual;
1289 
1290 	/*
1291 	 * The BIOS can leave the PHY is some weird state
1292 	 * where it doesn't fully power down some parts.
1293 	 * Disable the asserts until the PHY has been fully
1294 	 * reset (ie. the power well has been disabled at
1295 	 * least once).
1296 	 */
1297 	if (!dev_priv->chv_phy_assert[phy])
1298 		return;
1299 
1300 	if (ch == DPIO_CH0)
1301 		reg = _CHV_CMN_DW0_CH0;
1302 	else
1303 		reg = _CHV_CMN_DW6_CH1;
1304 
1305 	mutex_lock(&dev_priv->sb_lock);
1306 	val = vlv_dpio_read(dev_priv, pipe, reg);
1307 	mutex_unlock(&dev_priv->sb_lock);
1308 
1309 	/*
1310 	 * This assumes !override is only used when the port is disabled.
1311 	 * All lanes should power down even without the override when
1312 	 * the port is disabled.
1313 	 */
1314 	if (!override || mask == 0xf) {
1315 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1316 		/*
1317 		 * If CH1 common lane is not active anymore
1318 		 * (eg. for pipe B DPLL) the entire channel will
1319 		 * shut down, which causes the common lane registers
1320 		 * to read as 0. That means we can't actually check
1321 		 * the lane power down status bits, but as the entire
1322 		 * register reads as 0 it's a good indication that the
1323 		 * channel is indeed entirely powered down.
1324 		 */
1325 		if (ch == DPIO_CH1 && val == 0)
1326 			expected = 0;
1327 	} else if (mask != 0x0) {
1328 		expected = DPIO_ANYDL_POWERDOWN;
1329 	} else {
1330 		expected = 0;
1331 	}
1332 
1333 	if (ch == DPIO_CH0)
1334 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1335 	else
1336 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1337 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1338 
1339 	WARN(actual != expected,
1340 	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1341 	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1342 	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1343 	     reg, val);
1344 }
1345 
1346 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1347 			  enum dpio_channel ch, bool override)
1348 {
1349 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1350 	bool was_override;
1351 
1352 	mutex_lock(&power_domains->lock);
1353 
1354 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1355 
1356 	if (override == was_override)
1357 		goto out;
1358 
1359 	if (override)
1360 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1361 	else
1362 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1363 
1364 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1365 
1366 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1367 		      phy, ch, dev_priv->chv_phy_control);
1368 
1369 	assert_chv_phy_status(dev_priv);
1370 
1371 out:
1372 	mutex_unlock(&power_domains->lock);
1373 
1374 	return was_override;
1375 }
1376 
1377 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1378 			     bool override, unsigned int mask)
1379 {
1380 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1381 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1382 	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1383 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1384 
1385 	mutex_lock(&power_domains->lock);
1386 
1387 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1388 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1389 
1390 	if (override)
1391 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1392 	else
1393 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1394 
1395 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1396 
1397 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1398 		      phy, ch, mask, dev_priv->chv_phy_control);
1399 
1400 	assert_chv_phy_status(dev_priv);
1401 
1402 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1403 
1404 	mutex_unlock(&power_domains->lock);
1405 }
1406 
1407 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1408 					struct i915_power_well *power_well)
1409 {
1410 	enum pipe pipe = PIPE_A;
1411 	bool enabled;
1412 	u32 state, ctrl;
1413 
1414 	mutex_lock(&dev_priv->pcu_lock);
1415 
1416 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1417 	/*
1418 	 * We only ever set the power-on and power-gate states, anything
1419 	 * else is unexpected.
1420 	 */
1421 	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1422 	enabled = state == DP_SSS_PWR_ON(pipe);
1423 
1424 	/*
1425 	 * A transient state at this point would mean some unexpected party
1426 	 * is poking at the power controls too.
1427 	 */
1428 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1429 	WARN_ON(ctrl << 16 != state);
1430 
1431 	mutex_unlock(&dev_priv->pcu_lock);
1432 
1433 	return enabled;
1434 }
1435 
1436 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1437 				    struct i915_power_well *power_well,
1438 				    bool enable)
1439 {
1440 	enum pipe pipe = PIPE_A;
1441 	u32 state;
1442 	u32 ctrl;
1443 
1444 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1445 
1446 	mutex_lock(&dev_priv->pcu_lock);
1447 
1448 #define COND \
1449 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1450 
1451 	if (COND)
1452 		goto out;
1453 
1454 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1455 	ctrl &= ~DP_SSC_MASK(pipe);
1456 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1457 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1458 
1459 	if (wait_for(COND, 100))
1460 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1461 			  state,
1462 			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1463 
1464 #undef COND
1465 
1466 out:
1467 	mutex_unlock(&dev_priv->pcu_lock);
1468 }
1469 
1470 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1471 				       struct i915_power_well *power_well)
1472 {
1473 	WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
1474 
1475 	chv_set_pipe_power_well(dev_priv, power_well, true);
1476 
1477 	vlv_display_power_well_init(dev_priv);
1478 }
1479 
1480 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1481 					struct i915_power_well *power_well)
1482 {
1483 	WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
1484 
1485 	vlv_display_power_well_deinit(dev_priv);
1486 
1487 	chv_set_pipe_power_well(dev_priv, power_well, false);
1488 }
1489 
1490 static void
1491 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1492 				 enum intel_display_power_domain domain)
1493 {
1494 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1495 	struct i915_power_well *power_well;
1496 
1497 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1498 		intel_power_well_get(dev_priv, power_well);
1499 
1500 	power_domains->domain_use_count[domain]++;
1501 }
1502 
1503 /**
1504  * intel_display_power_get - grab a power domain reference
1505  * @dev_priv: i915 device instance
1506  * @domain: power domain to reference
1507  *
1508  * This function grabs a power domain reference for @domain and ensures that the
1509  * power domain and all its parents are powered up. Therefore users should only
1510  * grab a reference to the innermost power domain they need.
1511  *
1512  * Any power domain reference obtained by this function must have a symmetric
1513  * call to intel_display_power_put() to release the reference again.
1514  */
1515 void intel_display_power_get(struct drm_i915_private *dev_priv,
1516 			     enum intel_display_power_domain domain)
1517 {
1518 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1519 
1520 	intel_runtime_pm_get(dev_priv);
1521 
1522 	mutex_lock(&power_domains->lock);
1523 
1524 	__intel_display_power_get_domain(dev_priv, domain);
1525 
1526 	mutex_unlock(&power_domains->lock);
1527 }
1528 
1529 /**
1530  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1531  * @dev_priv: i915 device instance
1532  * @domain: power domain to reference
1533  *
1534  * This function grabs a power domain reference for @domain and ensures that the
1535  * power domain and all its parents are powered up. Therefore users should only
1536  * grab a reference to the innermost power domain they need.
1537  *
1538  * Any power domain reference obtained by this function must have a symmetric
1539  * call to intel_display_power_put() to release the reference again.
1540  */
1541 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1542 					enum intel_display_power_domain domain)
1543 {
1544 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1545 	bool is_enabled;
1546 
1547 	if (!intel_runtime_pm_get_if_in_use(dev_priv))
1548 		return false;
1549 
1550 	mutex_lock(&power_domains->lock);
1551 
1552 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
1553 		__intel_display_power_get_domain(dev_priv, domain);
1554 		is_enabled = true;
1555 	} else {
1556 		is_enabled = false;
1557 	}
1558 
1559 	mutex_unlock(&power_domains->lock);
1560 
1561 	if (!is_enabled)
1562 		intel_runtime_pm_put(dev_priv);
1563 
1564 	return is_enabled;
1565 }
1566 
1567 /**
1568  * intel_display_power_put - release a power domain reference
1569  * @dev_priv: i915 device instance
1570  * @domain: power domain to reference
1571  *
1572  * This function drops the power domain reference obtained by
1573  * intel_display_power_get() and might power down the corresponding hardware
1574  * block right away if this is the last reference.
1575  */
1576 void intel_display_power_put(struct drm_i915_private *dev_priv,
1577 			     enum intel_display_power_domain domain)
1578 {
1579 	struct i915_power_domains *power_domains;
1580 	struct i915_power_well *power_well;
1581 
1582 	power_domains = &dev_priv->power_domains;
1583 
1584 	mutex_lock(&power_domains->lock);
1585 
1586 	WARN(!power_domains->domain_use_count[domain],
1587 	     "Use count on domain %s is already zero\n",
1588 	     intel_display_power_domain_str(domain));
1589 	power_domains->domain_use_count[domain]--;
1590 
1591 	for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
1592 		intel_power_well_put(dev_priv, power_well);
1593 
1594 	mutex_unlock(&power_domains->lock);
1595 
1596 	intel_runtime_pm_put(dev_priv);
1597 }
1598 
1599 #define I830_PIPES_POWER_DOMAINS (		\
1600 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1601 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1602 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1603 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1604 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1605 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1606 	BIT_ULL(POWER_DOMAIN_INIT))
1607 
1608 #define VLV_DISPLAY_POWER_DOMAINS (		\
1609 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1610 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1611 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1612 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1613 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1614 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1615 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1616 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1617 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1618 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1619 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1620 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1621 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1622 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1623 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1624 	BIT_ULL(POWER_DOMAIN_INIT))
1625 
1626 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1627 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1628 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1629 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1630 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1631 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1632 	BIT_ULL(POWER_DOMAIN_INIT))
1633 
1634 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
1635 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1636 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1637 	BIT_ULL(POWER_DOMAIN_INIT))
1638 
1639 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1640 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1641 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1642 	BIT_ULL(POWER_DOMAIN_INIT))
1643 
1644 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
1645 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1646 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1647 	BIT_ULL(POWER_DOMAIN_INIT))
1648 
1649 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
1650 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1651 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1652 	BIT_ULL(POWER_DOMAIN_INIT))
1653 
1654 #define CHV_DISPLAY_POWER_DOMAINS (		\
1655 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1656 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1657 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
1658 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1659 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1660 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
1661 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1662 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1663 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
1664 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1665 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1666 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1667 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1668 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1669 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1670 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1671 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1672 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
1673 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1674 	BIT_ULL(POWER_DOMAIN_INIT))
1675 
1676 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
1677 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1678 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1679 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1680 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1681 	BIT_ULL(POWER_DOMAIN_INIT))
1682 
1683 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
1684 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1685 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
1686 	BIT_ULL(POWER_DOMAIN_INIT))
1687 
1688 #define HSW_DISPLAY_POWER_DOMAINS (			\
1689 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1690 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1691 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
1692 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1693 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1694 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1695 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1696 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1697 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1698 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1699 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1700 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1701 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1702 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1703 	BIT_ULL(POWER_DOMAIN_INIT))
1704 
1705 #define BDW_DISPLAY_POWER_DOMAINS (			\
1706 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1707 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1708 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1709 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1710 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1711 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1712 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1713 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1714 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1715 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1716 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1717 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1718 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1719 	BIT_ULL(POWER_DOMAIN_INIT))
1720 
1721 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1722 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1723 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1724 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1725 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1726 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1727 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1728 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1729 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1730 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1731 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1732 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
1733 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1734 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1735 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1736 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1737 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1738 	BIT_ULL(POWER_DOMAIN_INIT))
1739 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
1740 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
1741 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
1742 	BIT_ULL(POWER_DOMAIN_INIT))
1743 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
1744 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
1745 	BIT_ULL(POWER_DOMAIN_INIT))
1746 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
1747 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
1748 	BIT_ULL(POWER_DOMAIN_INIT))
1749 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
1750 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
1751 	BIT_ULL(POWER_DOMAIN_INIT))
1752 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1753 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1754 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1755 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1756 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1757 	BIT_ULL(POWER_DOMAIN_INIT))
1758 
1759 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1760 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1761 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1762 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1763 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1764 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1765 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1766 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1767 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1768 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1769 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1770 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1771 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1772 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1773 	BIT_ULL(POWER_DOMAIN_INIT))
1774 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1775 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1776 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1777 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1778 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1779 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
1780 	BIT_ULL(POWER_DOMAIN_INIT))
1781 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
1782 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
1783 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1784 	BIT_ULL(POWER_DOMAIN_INIT))
1785 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
1786 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1787 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1788 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1789 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1790 	BIT_ULL(POWER_DOMAIN_INIT))
1791 
1792 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1793 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1794 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1795 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1796 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1797 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1798 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1799 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1800 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1801 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1802 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1803 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1804 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1805 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1806 	BIT_ULL(POWER_DOMAIN_INIT))
1807 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
1808 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
1809 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
1810 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
1811 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
1812 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
1813 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
1814 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
1815 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1816 	BIT_ULL(POWER_DOMAIN_INIT))
1817 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
1818 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1819 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1820 	BIT_ULL(POWER_DOMAIN_INIT))
1821 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
1822 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1823 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1824 	BIT_ULL(POWER_DOMAIN_INIT))
1825 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
1826 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
1827 	BIT_ULL(POWER_DOMAIN_INIT))
1828 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
1829 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1830 	BIT_ULL(POWER_DOMAIN_INIT))
1831 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
1832 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1833 	BIT_ULL(POWER_DOMAIN_INIT))
1834 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1835 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1836 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1837 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1838 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1839 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
1840 	BIT_ULL(POWER_DOMAIN_INIT))
1841 
1842 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1843 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1844 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1845 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1846 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1847 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1848 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1849 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1850 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1851 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1852 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1853 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
1854 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1855 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1856 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1857 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
1858 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1859 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1860 	BIT_ULL(POWER_DOMAIN_INIT))
1861 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
1862 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
1863 	BIT_ULL(POWER_DOMAIN_INIT))
1864 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
1865 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
1866 	BIT_ULL(POWER_DOMAIN_INIT))
1867 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
1868 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
1869 	BIT_ULL(POWER_DOMAIN_INIT))
1870 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
1871 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
1872 	BIT_ULL(POWER_DOMAIN_INIT))
1873 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
1874 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1875 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
1876 	BIT_ULL(POWER_DOMAIN_INIT))
1877 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
1878 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1879 	BIT_ULL(POWER_DOMAIN_INIT))
1880 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
1881 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1882 	BIT_ULL(POWER_DOMAIN_INIT))
1883 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
1884 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1885 	BIT_ULL(POWER_DOMAIN_INIT))
1886 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
1887 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
1888 	BIT_ULL(POWER_DOMAIN_INIT))
1889 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
1890 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
1891 	BIT_ULL(POWER_DOMAIN_INIT))
1892 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1893 	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1894 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1895 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1896 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1897 	BIT_ULL(POWER_DOMAIN_INIT))
1898 
1899 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1900 	.sync_hw = i9xx_power_well_sync_hw_noop,
1901 	.enable = i9xx_always_on_power_well_noop,
1902 	.disable = i9xx_always_on_power_well_noop,
1903 	.is_enabled = i9xx_always_on_power_well_enabled,
1904 };
1905 
1906 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1907 	.sync_hw = i9xx_power_well_sync_hw_noop,
1908 	.enable = chv_pipe_power_well_enable,
1909 	.disable = chv_pipe_power_well_disable,
1910 	.is_enabled = chv_pipe_power_well_enabled,
1911 };
1912 
1913 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1914 	.sync_hw = i9xx_power_well_sync_hw_noop,
1915 	.enable = chv_dpio_cmn_power_well_enable,
1916 	.disable = chv_dpio_cmn_power_well_disable,
1917 	.is_enabled = vlv_power_well_enabled,
1918 };
1919 
1920 static struct i915_power_well i9xx_always_on_power_well[] = {
1921 	{
1922 		.name = "always-on",
1923 		.always_on = 1,
1924 		.domains = POWER_DOMAIN_MASK,
1925 		.ops = &i9xx_always_on_power_well_ops,
1926 		.id = I915_DISP_PW_ALWAYS_ON,
1927 	},
1928 };
1929 
1930 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
1931 	.sync_hw = i830_pipes_power_well_sync_hw,
1932 	.enable = i830_pipes_power_well_enable,
1933 	.disable = i830_pipes_power_well_disable,
1934 	.is_enabled = i830_pipes_power_well_enabled,
1935 };
1936 
1937 static struct i915_power_well i830_power_wells[] = {
1938 	{
1939 		.name = "always-on",
1940 		.always_on = 1,
1941 		.domains = POWER_DOMAIN_MASK,
1942 		.ops = &i9xx_always_on_power_well_ops,
1943 		.id = I915_DISP_PW_ALWAYS_ON,
1944 	},
1945 	{
1946 		.name = "pipes",
1947 		.domains = I830_PIPES_POWER_DOMAINS,
1948 		.ops = &i830_pipes_power_well_ops,
1949 		.id = I830_DISP_PW_PIPES,
1950 	},
1951 };
1952 
1953 static const struct i915_power_well_ops hsw_power_well_ops = {
1954 	.sync_hw = hsw_power_well_sync_hw,
1955 	.enable = hsw_power_well_enable,
1956 	.disable = hsw_power_well_disable,
1957 	.is_enabled = hsw_power_well_enabled,
1958 };
1959 
1960 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1961 	.sync_hw = i9xx_power_well_sync_hw_noop,
1962 	.enable = gen9_dc_off_power_well_enable,
1963 	.disable = gen9_dc_off_power_well_disable,
1964 	.is_enabled = gen9_dc_off_power_well_enabled,
1965 };
1966 
1967 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
1968 	.sync_hw = i9xx_power_well_sync_hw_noop,
1969 	.enable = bxt_dpio_cmn_power_well_enable,
1970 	.disable = bxt_dpio_cmn_power_well_disable,
1971 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
1972 };
1973 
1974 static struct i915_power_well hsw_power_wells[] = {
1975 	{
1976 		.name = "always-on",
1977 		.always_on = 1,
1978 		.domains = POWER_DOMAIN_MASK,
1979 		.ops = &i9xx_always_on_power_well_ops,
1980 		.id = I915_DISP_PW_ALWAYS_ON,
1981 	},
1982 	{
1983 		.name = "display",
1984 		.domains = HSW_DISPLAY_POWER_DOMAINS,
1985 		.ops = &hsw_power_well_ops,
1986 		.id = HSW_DISP_PW_GLOBAL,
1987 		{
1988 			.hsw.has_vga = true,
1989 		},
1990 	},
1991 };
1992 
1993 static struct i915_power_well bdw_power_wells[] = {
1994 	{
1995 		.name = "always-on",
1996 		.always_on = 1,
1997 		.domains = POWER_DOMAIN_MASK,
1998 		.ops = &i9xx_always_on_power_well_ops,
1999 		.id = I915_DISP_PW_ALWAYS_ON,
2000 	},
2001 	{
2002 		.name = "display",
2003 		.domains = BDW_DISPLAY_POWER_DOMAINS,
2004 		.ops = &hsw_power_well_ops,
2005 		.id = HSW_DISP_PW_GLOBAL,
2006 		{
2007 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2008 			.hsw.has_vga = true,
2009 		},
2010 	},
2011 };
2012 
2013 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2014 	.sync_hw = i9xx_power_well_sync_hw_noop,
2015 	.enable = vlv_display_power_well_enable,
2016 	.disable = vlv_display_power_well_disable,
2017 	.is_enabled = vlv_power_well_enabled,
2018 };
2019 
2020 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2021 	.sync_hw = i9xx_power_well_sync_hw_noop,
2022 	.enable = vlv_dpio_cmn_power_well_enable,
2023 	.disable = vlv_dpio_cmn_power_well_disable,
2024 	.is_enabled = vlv_power_well_enabled,
2025 };
2026 
2027 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2028 	.sync_hw = i9xx_power_well_sync_hw_noop,
2029 	.enable = vlv_power_well_enable,
2030 	.disable = vlv_power_well_disable,
2031 	.is_enabled = vlv_power_well_enabled,
2032 };
2033 
2034 static struct i915_power_well vlv_power_wells[] = {
2035 	{
2036 		.name = "always-on",
2037 		.always_on = 1,
2038 		.domains = POWER_DOMAIN_MASK,
2039 		.ops = &i9xx_always_on_power_well_ops,
2040 		.id = I915_DISP_PW_ALWAYS_ON,
2041 	},
2042 	{
2043 		.name = "display",
2044 		.domains = VLV_DISPLAY_POWER_DOMAINS,
2045 		.id = PUNIT_POWER_WELL_DISP2D,
2046 		.ops = &vlv_display_power_well_ops,
2047 	},
2048 	{
2049 		.name = "dpio-tx-b-01",
2050 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2051 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2052 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2053 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2054 		.ops = &vlv_dpio_power_well_ops,
2055 		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
2056 	},
2057 	{
2058 		.name = "dpio-tx-b-23",
2059 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2060 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2061 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2062 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2063 		.ops = &vlv_dpio_power_well_ops,
2064 		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
2065 	},
2066 	{
2067 		.name = "dpio-tx-c-01",
2068 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2069 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2070 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2071 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2072 		.ops = &vlv_dpio_power_well_ops,
2073 		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
2074 	},
2075 	{
2076 		.name = "dpio-tx-c-23",
2077 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2078 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2079 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2080 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2081 		.ops = &vlv_dpio_power_well_ops,
2082 		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
2083 	},
2084 	{
2085 		.name = "dpio-common",
2086 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2087 		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2088 		.ops = &vlv_dpio_cmn_power_well_ops,
2089 	},
2090 };
2091 
2092 static struct i915_power_well chv_power_wells[] = {
2093 	{
2094 		.name = "always-on",
2095 		.always_on = 1,
2096 		.domains = POWER_DOMAIN_MASK,
2097 		.ops = &i9xx_always_on_power_well_ops,
2098 		.id = I915_DISP_PW_ALWAYS_ON,
2099 	},
2100 	{
2101 		.name = "display",
2102 		/*
2103 		 * Pipe A power well is the new disp2d well. Pipe B and C
2104 		 * power wells don't actually exist. Pipe A power well is
2105 		 * required for any pipe to work.
2106 		 */
2107 		.domains = CHV_DISPLAY_POWER_DOMAINS,
2108 		.id = CHV_DISP_PW_PIPE_A,
2109 		.ops = &chv_pipe_power_well_ops,
2110 	},
2111 	{
2112 		.name = "dpio-common-bc",
2113 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2114 		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2115 		.ops = &chv_dpio_cmn_power_well_ops,
2116 	},
2117 	{
2118 		.name = "dpio-common-d",
2119 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2120 		.id = PUNIT_POWER_WELL_DPIO_CMN_D,
2121 		.ops = &chv_dpio_cmn_power_well_ops,
2122 	},
2123 };
2124 
2125 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2126 					 enum i915_power_well_id power_well_id)
2127 {
2128 	struct i915_power_well *power_well;
2129 	bool ret;
2130 
2131 	power_well = lookup_power_well(dev_priv, power_well_id);
2132 	ret = power_well->ops->is_enabled(dev_priv, power_well);
2133 
2134 	return ret;
2135 }
2136 
2137 static struct i915_power_well skl_power_wells[] = {
2138 	{
2139 		.name = "always-on",
2140 		.always_on = 1,
2141 		.domains = POWER_DOMAIN_MASK,
2142 		.ops = &i9xx_always_on_power_well_ops,
2143 		.id = I915_DISP_PW_ALWAYS_ON,
2144 	},
2145 	{
2146 		.name = "power well 1",
2147 		/* Handled by the DMC firmware */
2148 		.domains = 0,
2149 		.ops = &hsw_power_well_ops,
2150 		.id = SKL_DISP_PW_1,
2151 		{
2152 			.hsw.has_fuses = true,
2153 		},
2154 	},
2155 	{
2156 		.name = "MISC IO power well",
2157 		/* Handled by the DMC firmware */
2158 		.domains = 0,
2159 		.ops = &hsw_power_well_ops,
2160 		.id = SKL_DISP_PW_MISC_IO,
2161 	},
2162 	{
2163 		.name = "DC off",
2164 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2165 		.ops = &gen9_dc_off_power_well_ops,
2166 		.id = SKL_DISP_PW_DC_OFF,
2167 	},
2168 	{
2169 		.name = "power well 2",
2170 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2171 		.ops = &hsw_power_well_ops,
2172 		.id = SKL_DISP_PW_2,
2173 		{
2174 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2175 			.hsw.has_vga = true,
2176 			.hsw.has_fuses = true,
2177 		},
2178 	},
2179 	{
2180 		.name = "DDI A/E IO power well",
2181 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2182 		.ops = &hsw_power_well_ops,
2183 		.id = SKL_DISP_PW_DDI_A_E,
2184 	},
2185 	{
2186 		.name = "DDI B IO power well",
2187 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2188 		.ops = &hsw_power_well_ops,
2189 		.id = SKL_DISP_PW_DDI_B,
2190 	},
2191 	{
2192 		.name = "DDI C IO power well",
2193 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2194 		.ops = &hsw_power_well_ops,
2195 		.id = SKL_DISP_PW_DDI_C,
2196 	},
2197 	{
2198 		.name = "DDI D IO power well",
2199 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2200 		.ops = &hsw_power_well_ops,
2201 		.id = SKL_DISP_PW_DDI_D,
2202 	},
2203 };
2204 
2205 static struct i915_power_well bxt_power_wells[] = {
2206 	{
2207 		.name = "always-on",
2208 		.always_on = 1,
2209 		.domains = POWER_DOMAIN_MASK,
2210 		.ops = &i9xx_always_on_power_well_ops,
2211 		.id = I915_DISP_PW_ALWAYS_ON,
2212 	},
2213 	{
2214 		.name = "power well 1",
2215 		.domains = 0,
2216 		.ops = &hsw_power_well_ops,
2217 		.id = SKL_DISP_PW_1,
2218 		{
2219 			.hsw.has_fuses = true,
2220 		},
2221 	},
2222 	{
2223 		.name = "DC off",
2224 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2225 		.ops = &gen9_dc_off_power_well_ops,
2226 		.id = SKL_DISP_PW_DC_OFF,
2227 	},
2228 	{
2229 		.name = "power well 2",
2230 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2231 		.ops = &hsw_power_well_ops,
2232 		.id = SKL_DISP_PW_2,
2233 		{
2234 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2235 			.hsw.has_vga = true,
2236 			.hsw.has_fuses = true,
2237 		},
2238 	},
2239 	{
2240 		.name = "dpio-common-a",
2241 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2242 		.ops = &bxt_dpio_cmn_power_well_ops,
2243 		.id = BXT_DPIO_CMN_A,
2244 		{
2245 			.bxt.phy = DPIO_PHY1,
2246 		},
2247 	},
2248 	{
2249 		.name = "dpio-common-bc",
2250 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2251 		.ops = &bxt_dpio_cmn_power_well_ops,
2252 		.id = BXT_DPIO_CMN_BC,
2253 		{
2254 			.bxt.phy = DPIO_PHY0,
2255 		},
2256 	},
2257 };
2258 
2259 static struct i915_power_well glk_power_wells[] = {
2260 	{
2261 		.name = "always-on",
2262 		.always_on = 1,
2263 		.domains = POWER_DOMAIN_MASK,
2264 		.ops = &i9xx_always_on_power_well_ops,
2265 		.id = I915_DISP_PW_ALWAYS_ON,
2266 	},
2267 	{
2268 		.name = "power well 1",
2269 		/* Handled by the DMC firmware */
2270 		.domains = 0,
2271 		.ops = &hsw_power_well_ops,
2272 		.id = SKL_DISP_PW_1,
2273 		{
2274 			.hsw.has_fuses = true,
2275 		},
2276 	},
2277 	{
2278 		.name = "DC off",
2279 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2280 		.ops = &gen9_dc_off_power_well_ops,
2281 		.id = SKL_DISP_PW_DC_OFF,
2282 	},
2283 	{
2284 		.name = "power well 2",
2285 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2286 		.ops = &hsw_power_well_ops,
2287 		.id = SKL_DISP_PW_2,
2288 		{
2289 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2290 			.hsw.has_vga = true,
2291 			.hsw.has_fuses = true,
2292 		},
2293 	},
2294 	{
2295 		.name = "dpio-common-a",
2296 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2297 		.ops = &bxt_dpio_cmn_power_well_ops,
2298 		.id = BXT_DPIO_CMN_A,
2299 		{
2300 			.bxt.phy = DPIO_PHY1,
2301 		},
2302 	},
2303 	{
2304 		.name = "dpio-common-b",
2305 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2306 		.ops = &bxt_dpio_cmn_power_well_ops,
2307 		.id = BXT_DPIO_CMN_BC,
2308 		{
2309 			.bxt.phy = DPIO_PHY0,
2310 		},
2311 	},
2312 	{
2313 		.name = "dpio-common-c",
2314 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2315 		.ops = &bxt_dpio_cmn_power_well_ops,
2316 		.id = GLK_DPIO_CMN_C,
2317 		{
2318 			.bxt.phy = DPIO_PHY2,
2319 		},
2320 	},
2321 	{
2322 		.name = "AUX A",
2323 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2324 		.ops = &hsw_power_well_ops,
2325 		.id = GLK_DISP_PW_AUX_A,
2326 	},
2327 	{
2328 		.name = "AUX B",
2329 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2330 		.ops = &hsw_power_well_ops,
2331 		.id = GLK_DISP_PW_AUX_B,
2332 	},
2333 	{
2334 		.name = "AUX C",
2335 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2336 		.ops = &hsw_power_well_ops,
2337 		.id = GLK_DISP_PW_AUX_C,
2338 	},
2339 	{
2340 		.name = "DDI A IO power well",
2341 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
2342 		.ops = &hsw_power_well_ops,
2343 		.id = GLK_DISP_PW_DDI_A,
2344 	},
2345 	{
2346 		.name = "DDI B IO power well",
2347 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2348 		.ops = &hsw_power_well_ops,
2349 		.id = SKL_DISP_PW_DDI_B,
2350 	},
2351 	{
2352 		.name = "DDI C IO power well",
2353 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2354 		.ops = &hsw_power_well_ops,
2355 		.id = SKL_DISP_PW_DDI_C,
2356 	},
2357 };
2358 
2359 static struct i915_power_well cnl_power_wells[] = {
2360 	{
2361 		.name = "always-on",
2362 		.always_on = 1,
2363 		.domains = POWER_DOMAIN_MASK,
2364 		.ops = &i9xx_always_on_power_well_ops,
2365 		.id = I915_DISP_PW_ALWAYS_ON,
2366 	},
2367 	{
2368 		.name = "power well 1",
2369 		/* Handled by the DMC firmware */
2370 		.domains = 0,
2371 		.ops = &hsw_power_well_ops,
2372 		.id = SKL_DISP_PW_1,
2373 		{
2374 			.hsw.has_fuses = true,
2375 		},
2376 	},
2377 	{
2378 		.name = "AUX A",
2379 		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2380 		.ops = &hsw_power_well_ops,
2381 		.id = CNL_DISP_PW_AUX_A,
2382 	},
2383 	{
2384 		.name = "AUX B",
2385 		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2386 		.ops = &hsw_power_well_ops,
2387 		.id = CNL_DISP_PW_AUX_B,
2388 	},
2389 	{
2390 		.name = "AUX C",
2391 		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
2392 		.ops = &hsw_power_well_ops,
2393 		.id = CNL_DISP_PW_AUX_C,
2394 	},
2395 	{
2396 		.name = "AUX D",
2397 		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
2398 		.ops = &hsw_power_well_ops,
2399 		.id = CNL_DISP_PW_AUX_D,
2400 	},
2401 	{
2402 		.name = "DC off",
2403 		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2404 		.ops = &gen9_dc_off_power_well_ops,
2405 		.id = SKL_DISP_PW_DC_OFF,
2406 	},
2407 	{
2408 		.name = "power well 2",
2409 		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2410 		.ops = &hsw_power_well_ops,
2411 		.id = SKL_DISP_PW_2,
2412 		{
2413 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2414 			.hsw.has_vga = true,
2415 			.hsw.has_fuses = true,
2416 		},
2417 	},
2418 	{
2419 		.name = "DDI A IO power well",
2420 		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
2421 		.ops = &hsw_power_well_ops,
2422 		.id = CNL_DISP_PW_DDI_A,
2423 	},
2424 	{
2425 		.name = "DDI B IO power well",
2426 		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
2427 		.ops = &hsw_power_well_ops,
2428 		.id = SKL_DISP_PW_DDI_B,
2429 	},
2430 	{
2431 		.name = "DDI C IO power well",
2432 		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
2433 		.ops = &hsw_power_well_ops,
2434 		.id = SKL_DISP_PW_DDI_C,
2435 	},
2436 	{
2437 		.name = "DDI D IO power well",
2438 		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
2439 		.ops = &hsw_power_well_ops,
2440 		.id = SKL_DISP_PW_DDI_D,
2441 	},
2442 	{
2443 		.name = "DDI F IO power well",
2444 		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
2445 		.ops = &hsw_power_well_ops,
2446 		.id = CNL_DISP_PW_DDI_F,
2447 	},
2448 	{
2449 		.name = "AUX F",
2450 		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
2451 		.ops = &hsw_power_well_ops,
2452 		.id = CNL_DISP_PW_AUX_F,
2453 	},
2454 };
2455 
2456 static int
2457 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2458 				   int disable_power_well)
2459 {
2460 	if (disable_power_well >= 0)
2461 		return !!disable_power_well;
2462 
2463 	return 1;
2464 }
2465 
2466 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2467 				    int enable_dc)
2468 {
2469 	uint32_t mask;
2470 	int requested_dc;
2471 	int max_dc;
2472 
2473 	if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
2474 		max_dc = 2;
2475 		mask = 0;
2476 	} else if (IS_GEN9_LP(dev_priv)) {
2477 		max_dc = 1;
2478 		/*
2479 		 * DC9 has a separate HW flow from the rest of the DC states,
2480 		 * not depending on the DMC firmware. It's needed by system
2481 		 * suspend/resume, so allow it unconditionally.
2482 		 */
2483 		mask = DC_STATE_EN_DC9;
2484 	} else {
2485 		max_dc = 0;
2486 		mask = 0;
2487 	}
2488 
2489 	if (!i915_modparams.disable_power_well)
2490 		max_dc = 0;
2491 
2492 	if (enable_dc >= 0 && enable_dc <= max_dc) {
2493 		requested_dc = enable_dc;
2494 	} else if (enable_dc == -1) {
2495 		requested_dc = max_dc;
2496 	} else if (enable_dc > max_dc && enable_dc <= 2) {
2497 		DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
2498 			      enable_dc, max_dc);
2499 		requested_dc = max_dc;
2500 	} else {
2501 		DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
2502 		requested_dc = max_dc;
2503 	}
2504 
2505 	if (requested_dc > 1)
2506 		mask |= DC_STATE_EN_UPTO_DC6;
2507 	if (requested_dc > 0)
2508 		mask |= DC_STATE_EN_UPTO_DC5;
2509 
2510 	DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
2511 
2512 	return mask;
2513 }
2514 
2515 static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv)
2516 {
2517 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2518 	u64 power_well_ids;
2519 	int i;
2520 
2521 	power_well_ids = 0;
2522 	for (i = 0; i < power_domains->power_well_count; i++) {
2523 		enum i915_power_well_id id = power_domains->power_wells[i].id;
2524 
2525 		WARN_ON(id >= sizeof(power_well_ids) * 8);
2526 		WARN_ON(power_well_ids & BIT_ULL(id));
2527 		power_well_ids |= BIT_ULL(id);
2528 	}
2529 }
2530 
2531 #define set_power_wells(power_domains, __power_wells) ({		\
2532 	(power_domains)->power_wells = (__power_wells);			\
2533 	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
2534 })
2535 
2536 /**
2537  * intel_power_domains_init - initializes the power domain structures
2538  * @dev_priv: i915 device instance
2539  *
2540  * Initializes the power domain structures for @dev_priv depending upon the
2541  * supported platform.
2542  */
2543 int intel_power_domains_init(struct drm_i915_private *dev_priv)
2544 {
2545 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2546 
2547 	i915_modparams.disable_power_well =
2548 		sanitize_disable_power_well_option(dev_priv,
2549 						   i915_modparams.disable_power_well);
2550 	dev_priv->csr.allowed_dc_mask =
2551 		get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
2552 
2553 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
2554 
2555 	mutex_init(&power_domains->lock);
2556 
2557 	/*
2558 	 * The enabling order will be from lower to higher indexed wells,
2559 	 * the disabling order is reversed.
2560 	 */
2561 	if (IS_HASWELL(dev_priv)) {
2562 		set_power_wells(power_domains, hsw_power_wells);
2563 	} else if (IS_BROADWELL(dev_priv)) {
2564 		set_power_wells(power_domains, bdw_power_wells);
2565 	} else if (IS_GEN9_BC(dev_priv)) {
2566 		set_power_wells(power_domains, skl_power_wells);
2567 	} else if (IS_CANNONLAKE(dev_priv)) {
2568 		set_power_wells(power_domains, cnl_power_wells);
2569 
2570 		/*
2571 		 * DDI and Aux IO are getting enabled for all ports
2572 		 * regardless the presence or use. So, in order to avoid
2573 		 * timeouts, lets remove them from the list
2574 		 * for the SKUs without port F.
2575 		 */
2576 		if (!IS_CNL_WITH_PORT_F(dev_priv))
2577 			power_domains->power_well_count -= 2;
2578 
2579 	} else if (IS_BROXTON(dev_priv)) {
2580 		set_power_wells(power_domains, bxt_power_wells);
2581 	} else if (IS_GEMINILAKE(dev_priv)) {
2582 		set_power_wells(power_domains, glk_power_wells);
2583 	} else if (IS_CHERRYVIEW(dev_priv)) {
2584 		set_power_wells(power_domains, chv_power_wells);
2585 	} else if (IS_VALLEYVIEW(dev_priv)) {
2586 		set_power_wells(power_domains, vlv_power_wells);
2587 	} else if (IS_I830(dev_priv)) {
2588 		set_power_wells(power_domains, i830_power_wells);
2589 	} else {
2590 		set_power_wells(power_domains, i9xx_always_on_power_well);
2591 	}
2592 
2593 	assert_power_well_ids_unique(dev_priv);
2594 
2595 	return 0;
2596 }
2597 
2598 /**
2599  * intel_power_domains_fini - finalizes the power domain structures
2600  * @dev_priv: i915 device instance
2601  *
2602  * Finalizes the power domain structures for @dev_priv depending upon the
2603  * supported platform. This function also disables runtime pm and ensures that
2604  * the device stays powered up so that the driver can be reloaded.
2605  */
2606 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2607 {
2608 	struct device *kdev = &dev_priv->drm.pdev->dev;
2609 
2610 	/*
2611 	 * The i915.ko module is still not prepared to be loaded when
2612 	 * the power well is not enabled, so just enable it in case
2613 	 * we're going to unload/reload.
2614 	 * The following also reacquires the RPM reference the core passed
2615 	 * to the driver during loading, which is dropped in
2616 	 * intel_runtime_pm_enable(). We have to hand back the control of the
2617 	 * device to the core with this reference held.
2618 	 */
2619 	intel_display_set_init_power(dev_priv, true);
2620 
2621 	/* Remove the refcount we took to keep power well support disabled. */
2622 	if (!i915_modparams.disable_power_well)
2623 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2624 
2625 	/*
2626 	 * Remove the refcount we took in intel_runtime_pm_enable() in case
2627 	 * the platform doesn't support runtime PM.
2628 	 */
2629 	if (!HAS_RUNTIME_PM(dev_priv))
2630 		pm_runtime_put(kdev);
2631 }
2632 
2633 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2634 {
2635 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2636 	struct i915_power_well *power_well;
2637 
2638 	mutex_lock(&power_domains->lock);
2639 	for_each_power_well(dev_priv, power_well) {
2640 		power_well->ops->sync_hw(dev_priv, power_well);
2641 		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2642 								     power_well);
2643 	}
2644 	mutex_unlock(&power_domains->lock);
2645 }
2646 
2647 static inline
2648 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
2649 			  i915_reg_t reg, bool enable)
2650 {
2651 	u32 val, status;
2652 
2653 	val = I915_READ(reg);
2654 	val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
2655 	I915_WRITE(reg, val);
2656 	POSTING_READ(reg);
2657 	udelay(10);
2658 
2659 	status = I915_READ(reg) & DBUF_POWER_STATE;
2660 	if ((enable && !status) || (!enable && status)) {
2661 		DRM_ERROR("DBus power %s timeout!\n",
2662 			  enable ? "enable" : "disable");
2663 		return false;
2664 	}
2665 	return true;
2666 }
2667 
2668 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2669 {
2670 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
2671 }
2672 
2673 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2674 {
2675 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
2676 }
2677 
2678 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
2679 {
2680 	if (INTEL_GEN(dev_priv) < 11)
2681 		return 1;
2682 	return 2;
2683 }
2684 
2685 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
2686 			    u8 req_slices)
2687 {
2688 	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
2689 	u32 val;
2690 	bool ret;
2691 
2692 	if (req_slices > intel_dbuf_max_slices(dev_priv)) {
2693 		DRM_ERROR("Invalid number of dbuf slices requested\n");
2694 		return;
2695 	}
2696 
2697 	if (req_slices == hw_enabled_slices || req_slices == 0)
2698 		return;
2699 
2700 	val = I915_READ(DBUF_CTL_S2);
2701 	if (req_slices > hw_enabled_slices)
2702 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
2703 	else
2704 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
2705 
2706 	if (ret)
2707 		dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
2708 }
2709 
2710 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
2711 {
2712 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
2713 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
2714 	POSTING_READ(DBUF_CTL_S2);
2715 
2716 	udelay(10);
2717 
2718 	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
2719 	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
2720 		DRM_ERROR("DBuf power enable timeout\n");
2721 	else
2722 		dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
2723 }
2724 
2725 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
2726 {
2727 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
2728 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
2729 	POSTING_READ(DBUF_CTL_S2);
2730 
2731 	udelay(10);
2732 
2733 	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
2734 	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
2735 		DRM_ERROR("DBuf power disable timeout!\n");
2736 	else
2737 		dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
2738 }
2739 
2740 static void icl_mbus_init(struct drm_i915_private *dev_priv)
2741 {
2742 	uint32_t val;
2743 
2744 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
2745 	      MBUS_ABOX_BT_CREDIT_POOL2(16) |
2746 	      MBUS_ABOX_B_CREDIT(1) |
2747 	      MBUS_ABOX_BW_CREDIT(1);
2748 
2749 	I915_WRITE(MBUS_ABOX_CTL, val);
2750 }
2751 
2752 static void skl_display_core_init(struct drm_i915_private *dev_priv,
2753 				   bool resume)
2754 {
2755 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2756 	struct i915_power_well *well;
2757 	uint32_t val;
2758 
2759 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2760 
2761 	/* enable PCH reset handshake */
2762 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
2763 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
2764 
2765 	/* enable PG1 and Misc I/O */
2766 	mutex_lock(&power_domains->lock);
2767 
2768 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2769 	intel_power_well_enable(dev_priv, well);
2770 
2771 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2772 	intel_power_well_enable(dev_priv, well);
2773 
2774 	mutex_unlock(&power_domains->lock);
2775 
2776 	skl_init_cdclk(dev_priv);
2777 
2778 	gen9_dbuf_enable(dev_priv);
2779 
2780 	if (resume && dev_priv->csr.dmc_payload)
2781 		intel_csr_load_program(dev_priv);
2782 }
2783 
2784 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2785 {
2786 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2787 	struct i915_power_well *well;
2788 
2789 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2790 
2791 	gen9_dbuf_disable(dev_priv);
2792 
2793 	skl_uninit_cdclk(dev_priv);
2794 
2795 	/* The spec doesn't call for removing the reset handshake flag */
2796 	/* disable PG1 and Misc I/O */
2797 
2798 	mutex_lock(&power_domains->lock);
2799 
2800 	/*
2801 	 * BSpec says to keep the MISC IO power well enabled here, only
2802 	 * remove our request for power well 1.
2803 	 * Note that even though the driver's request is removed power well 1
2804 	 * may stay enabled after this due to DMC's own request on it.
2805 	 */
2806 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2807 	intel_power_well_disable(dev_priv, well);
2808 
2809 	mutex_unlock(&power_domains->lock);
2810 
2811 	usleep_range(10, 30);		/* 10 us delay per Bspec */
2812 }
2813 
2814 void bxt_display_core_init(struct drm_i915_private *dev_priv,
2815 			   bool resume)
2816 {
2817 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2818 	struct i915_power_well *well;
2819 	uint32_t val;
2820 
2821 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2822 
2823 	/*
2824 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
2825 	 * or else the reset will hang because there is no PCH to respond.
2826 	 * Move the handshake programming to initialization sequence.
2827 	 * Previously was left up to BIOS.
2828 	 */
2829 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
2830 	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
2831 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2832 
2833 	/* Enable PG1 */
2834 	mutex_lock(&power_domains->lock);
2835 
2836 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2837 	intel_power_well_enable(dev_priv, well);
2838 
2839 	mutex_unlock(&power_domains->lock);
2840 
2841 	bxt_init_cdclk(dev_priv);
2842 
2843 	gen9_dbuf_enable(dev_priv);
2844 
2845 	if (resume && dev_priv->csr.dmc_payload)
2846 		intel_csr_load_program(dev_priv);
2847 }
2848 
2849 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2850 {
2851 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2852 	struct i915_power_well *well;
2853 
2854 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2855 
2856 	gen9_dbuf_disable(dev_priv);
2857 
2858 	bxt_uninit_cdclk(dev_priv);
2859 
2860 	/* The spec doesn't call for removing the reset handshake flag */
2861 
2862 	/*
2863 	 * Disable PW1 (PG1).
2864 	 * Note that even though the driver's request is removed power well 1
2865 	 * may stay enabled after this due to DMC's own request on it.
2866 	 */
2867 	mutex_lock(&power_domains->lock);
2868 
2869 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2870 	intel_power_well_disable(dev_priv, well);
2871 
2872 	mutex_unlock(&power_domains->lock);
2873 
2874 	usleep_range(10, 30);		/* 10 us delay per Bspec */
2875 }
2876 
2877 enum {
2878 	PROCMON_0_85V_DOT_0,
2879 	PROCMON_0_95V_DOT_0,
2880 	PROCMON_0_95V_DOT_1,
2881 	PROCMON_1_05V_DOT_0,
2882 	PROCMON_1_05V_DOT_1,
2883 };
2884 
2885 static const struct cnl_procmon {
2886 	u32 dw1, dw9, dw10;
2887 } cnl_procmon_values[] = {
2888 	[PROCMON_0_85V_DOT_0] =
2889 		{ .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
2890 	[PROCMON_0_95V_DOT_0] =
2891 		{ .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
2892 	[PROCMON_0_95V_DOT_1] =
2893 		{ .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
2894 	[PROCMON_1_05V_DOT_0] =
2895 		{ .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
2896 	[PROCMON_1_05V_DOT_1] =
2897 		{ .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
2898 };
2899 
2900 /*
2901  * CNL has just one set of registers, while ICL has two sets: one for port A and
2902  * the other for port B. The CNL registers are equivalent to the ICL port A
2903  * registers, that's why we call the ICL macros even though the function has CNL
2904  * on its name.
2905  */
2906 static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
2907 				       enum port port)
2908 {
2909 	const struct cnl_procmon *procmon;
2910 	u32 val;
2911 
2912 	val = I915_READ(ICL_PORT_COMP_DW3(port));
2913 	switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
2914 	default:
2915 		MISSING_CASE(val);
2916 	case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
2917 		procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
2918 		break;
2919 	case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
2920 		procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
2921 		break;
2922 	case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
2923 		procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
2924 		break;
2925 	case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
2926 		procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
2927 		break;
2928 	case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
2929 		procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
2930 		break;
2931 	}
2932 
2933 	val = I915_READ(ICL_PORT_COMP_DW1(port));
2934 	val &= ~((0xff << 16) | 0xff);
2935 	val |= procmon->dw1;
2936 	I915_WRITE(ICL_PORT_COMP_DW1(port), val);
2937 
2938 	I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
2939 	I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
2940 }
2941 
2942 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
2943 {
2944 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2945 	struct i915_power_well *well;
2946 	u32 val;
2947 
2948 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2949 
2950 	/* 1. Enable PCH Reset Handshake */
2951 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
2952 	val |= RESET_PCH_HANDSHAKE_ENABLE;
2953 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2954 
2955 	/* 2. Enable Comp */
2956 	val = I915_READ(CHICKEN_MISC_2);
2957 	val &= ~CNL_COMP_PWR_DOWN;
2958 	I915_WRITE(CHICKEN_MISC_2, val);
2959 
2960 	/* Dummy PORT_A to get the correct CNL register from the ICL macro */
2961 	cnl_set_procmon_ref_values(dev_priv, PORT_A);
2962 
2963 	val = I915_READ(CNL_PORT_COMP_DW0);
2964 	val |= COMP_INIT;
2965 	I915_WRITE(CNL_PORT_COMP_DW0, val);
2966 
2967 	/* 3. */
2968 	val = I915_READ(CNL_PORT_CL1CM_DW5);
2969 	val |= CL_POWER_DOWN_ENABLE;
2970 	I915_WRITE(CNL_PORT_CL1CM_DW5, val);
2971 
2972 	/*
2973 	 * 4. Enable Power Well 1 (PG1).
2974 	 *    The AUX IO power wells will be enabled on demand.
2975 	 */
2976 	mutex_lock(&power_domains->lock);
2977 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2978 	intel_power_well_enable(dev_priv, well);
2979 	mutex_unlock(&power_domains->lock);
2980 
2981 	/* 5. Enable CD clock */
2982 	cnl_init_cdclk(dev_priv);
2983 
2984 	/* 6. Enable DBUF */
2985 	gen9_dbuf_enable(dev_priv);
2986 
2987 	if (resume && dev_priv->csr.dmc_payload)
2988 		intel_csr_load_program(dev_priv);
2989 }
2990 
2991 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
2992 {
2993 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2994 	struct i915_power_well *well;
2995 	u32 val;
2996 
2997 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2998 
2999 	/* 1. Disable all display engine functions -> aready done */
3000 
3001 	/* 2. Disable DBUF */
3002 	gen9_dbuf_disable(dev_priv);
3003 
3004 	/* 3. Disable CD clock */
3005 	cnl_uninit_cdclk(dev_priv);
3006 
3007 	/*
3008 	 * 4. Disable Power Well 1 (PG1).
3009 	 *    The AUX IO power wells are toggled on demand, so they are already
3010 	 *    disabled at this point.
3011 	 */
3012 	mutex_lock(&power_domains->lock);
3013 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3014 	intel_power_well_disable(dev_priv, well);
3015 	mutex_unlock(&power_domains->lock);
3016 
3017 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3018 
3019 	/* 5. Disable Comp */
3020 	val = I915_READ(CHICKEN_MISC_2);
3021 	val |= CNL_COMP_PWR_DOWN;
3022 	I915_WRITE(CHICKEN_MISC_2, val);
3023 }
3024 
3025 static void icl_display_core_init(struct drm_i915_private *dev_priv,
3026 				  bool resume)
3027 {
3028 	enum port port;
3029 	u32 val;
3030 
3031 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3032 
3033 	/* 1. Enable PCH reset handshake. */
3034 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
3035 	val |= RESET_PCH_HANDSHAKE_ENABLE;
3036 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
3037 
3038 	for (port = PORT_A; port <= PORT_B; port++) {
3039 		/* 2. Enable DDI combo PHY comp. */
3040 		val = I915_READ(ICL_PHY_MISC(port));
3041 		val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
3042 		I915_WRITE(ICL_PHY_MISC(port), val);
3043 
3044 		cnl_set_procmon_ref_values(dev_priv, port);
3045 
3046 		val = I915_READ(ICL_PORT_COMP_DW0(port));
3047 		val |= COMP_INIT;
3048 		I915_WRITE(ICL_PORT_COMP_DW0(port), val);
3049 
3050 		/* 3. Set power down enable. */
3051 		val = I915_READ(ICL_PORT_CL_DW5(port));
3052 		val |= CL_POWER_DOWN_ENABLE;
3053 		I915_WRITE(ICL_PORT_CL_DW5(port), val);
3054 	}
3055 
3056 	/* 4. Enable power well 1 (PG1) and aux IO power. */
3057 	/* FIXME: ICL power wells code not here yet. */
3058 
3059 	/* 5. Enable CDCLK. */
3060 	icl_init_cdclk(dev_priv);
3061 
3062 	/* 6. Enable DBUF. */
3063 	icl_dbuf_enable(dev_priv);
3064 
3065 	/* 7. Setup MBUS. */
3066 	icl_mbus_init(dev_priv);
3067 
3068 	/* 8. CHICKEN_DCPR_1 */
3069 	I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
3070 					CNL_DDI_CLOCK_REG_ACCESS_ON);
3071 }
3072 
3073 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
3074 {
3075 	enum port port;
3076 	u32 val;
3077 
3078 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3079 
3080 	/* 1. Disable all display engine functions -> aready done */
3081 
3082 	/* 2. Disable DBUF */
3083 	icl_dbuf_disable(dev_priv);
3084 
3085 	/* 3. Disable CD clock */
3086 	icl_uninit_cdclk(dev_priv);
3087 
3088 	/* 4. Disable Power Well 1 (PG1) and Aux IO Power */
3089 	/* FIXME: ICL power wells code not here yet. */
3090 
3091 	/* 5. Disable Comp */
3092 	for (port = PORT_A; port <= PORT_B; port++) {
3093 		val = I915_READ(ICL_PHY_MISC(port));
3094 		val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
3095 		I915_WRITE(ICL_PHY_MISC(port), val);
3096 	}
3097 }
3098 
3099 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
3100 {
3101 	struct i915_power_well *cmn_bc =
3102 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
3103 	struct i915_power_well *cmn_d =
3104 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
3105 
3106 	/*
3107 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
3108 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
3109 	 * instead maintain a shadow copy ourselves. Use the actual
3110 	 * power well state and lane status to reconstruct the
3111 	 * expected initial value.
3112 	 */
3113 	dev_priv->chv_phy_control =
3114 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
3115 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
3116 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
3117 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
3118 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
3119 
3120 	/*
3121 	 * If all lanes are disabled we leave the override disabled
3122 	 * with all power down bits cleared to match the state we
3123 	 * would use after disabling the port. Otherwise enable the
3124 	 * override and set the lane powerdown bits accding to the
3125 	 * current lane status.
3126 	 */
3127 	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
3128 		uint32_t status = I915_READ(DPLL(PIPE_A));
3129 		unsigned int mask;
3130 
3131 		mask = status & DPLL_PORTB_READY_MASK;
3132 		if (mask == 0xf)
3133 			mask = 0x0;
3134 		else
3135 			dev_priv->chv_phy_control |=
3136 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
3137 
3138 		dev_priv->chv_phy_control |=
3139 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
3140 
3141 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
3142 		if (mask == 0xf)
3143 			mask = 0x0;
3144 		else
3145 			dev_priv->chv_phy_control |=
3146 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
3147 
3148 		dev_priv->chv_phy_control |=
3149 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
3150 
3151 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
3152 
3153 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
3154 	} else {
3155 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
3156 	}
3157 
3158 	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
3159 		uint32_t status = I915_READ(DPIO_PHY_STATUS);
3160 		unsigned int mask;
3161 
3162 		mask = status & DPLL_PORTD_READY_MASK;
3163 
3164 		if (mask == 0xf)
3165 			mask = 0x0;
3166 		else
3167 			dev_priv->chv_phy_control |=
3168 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
3169 
3170 		dev_priv->chv_phy_control |=
3171 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
3172 
3173 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
3174 
3175 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
3176 	} else {
3177 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
3178 	}
3179 
3180 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
3181 
3182 	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
3183 		      dev_priv->chv_phy_control);
3184 }
3185 
3186 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
3187 {
3188 	struct i915_power_well *cmn =
3189 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
3190 	struct i915_power_well *disp2d =
3191 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
3192 
3193 	/* If the display might be already active skip this */
3194 	if (cmn->ops->is_enabled(dev_priv, cmn) &&
3195 	    disp2d->ops->is_enabled(dev_priv, disp2d) &&
3196 	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
3197 		return;
3198 
3199 	DRM_DEBUG_KMS("toggling display PHY side reset\n");
3200 
3201 	/* cmnlane needs DPLL registers */
3202 	disp2d->ops->enable(dev_priv, disp2d);
3203 
3204 	/*
3205 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
3206 	 * Need to assert and de-assert PHY SB reset by gating the
3207 	 * common lane power, then un-gating it.
3208 	 * Simply ungating isn't enough to reset the PHY enough to get
3209 	 * ports and lanes running.
3210 	 */
3211 	cmn->ops->disable(dev_priv, cmn);
3212 }
3213 
3214 /**
3215  * intel_power_domains_init_hw - initialize hardware power domain state
3216  * @dev_priv: i915 device instance
3217  * @resume: Called from resume code paths or not
3218  *
3219  * This function initializes the hardware power domain state and enables all
3220  * power wells belonging to the INIT power domain. Power wells in other
3221  * domains (and not in the INIT domain) are referenced or disabled during the
3222  * modeset state HW readout. After that the reference count of each power well
3223  * must match its HW enabled state, see intel_power_domains_verify_state().
3224  */
3225 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
3226 {
3227 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3228 
3229 	power_domains->initializing = true;
3230 
3231 	if (IS_ICELAKE(dev_priv)) {
3232 		icl_display_core_init(dev_priv, resume);
3233 	} else if (IS_CANNONLAKE(dev_priv)) {
3234 		cnl_display_core_init(dev_priv, resume);
3235 	} else if (IS_GEN9_BC(dev_priv)) {
3236 		skl_display_core_init(dev_priv, resume);
3237 	} else if (IS_GEN9_LP(dev_priv)) {
3238 		bxt_display_core_init(dev_priv, resume);
3239 	} else if (IS_CHERRYVIEW(dev_priv)) {
3240 		mutex_lock(&power_domains->lock);
3241 		chv_phy_control_init(dev_priv);
3242 		mutex_unlock(&power_domains->lock);
3243 	} else if (IS_VALLEYVIEW(dev_priv)) {
3244 		mutex_lock(&power_domains->lock);
3245 		vlv_cmnlane_wa(dev_priv);
3246 		mutex_unlock(&power_domains->lock);
3247 	}
3248 
3249 	/* For now, we need the power well to be always enabled. */
3250 	intel_display_set_init_power(dev_priv, true);
3251 	/* Disable power support if the user asked so. */
3252 	if (!i915_modparams.disable_power_well)
3253 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
3254 	intel_power_domains_sync_hw(dev_priv);
3255 	power_domains->initializing = false;
3256 }
3257 
3258 /**
3259  * intel_power_domains_suspend - suspend power domain state
3260  * @dev_priv: i915 device instance
3261  *
3262  * This function prepares the hardware power domain state before entering
3263  * system suspend. It must be paired with intel_power_domains_init_hw().
3264  */
3265 void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
3266 {
3267 	/*
3268 	 * Even if power well support was disabled we still want to disable
3269 	 * power wells while we are system suspended.
3270 	 */
3271 	if (!i915_modparams.disable_power_well)
3272 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3273 
3274 	if (IS_ICELAKE(dev_priv))
3275 		icl_display_core_uninit(dev_priv);
3276 	else if (IS_CANNONLAKE(dev_priv))
3277 		cnl_display_core_uninit(dev_priv);
3278 	else if (IS_GEN9_BC(dev_priv))
3279 		skl_display_core_uninit(dev_priv);
3280 	else if (IS_GEN9_LP(dev_priv))
3281 		bxt_display_core_uninit(dev_priv);
3282 }
3283 
3284 static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
3285 {
3286 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3287 	struct i915_power_well *power_well;
3288 
3289 	for_each_power_well(dev_priv, power_well) {
3290 		enum intel_display_power_domain domain;
3291 
3292 		DRM_DEBUG_DRIVER("%-25s %d\n",
3293 				 power_well->name, power_well->count);
3294 
3295 		for_each_power_domain(domain, power_well->domains)
3296 			DRM_DEBUG_DRIVER("  %-23s %d\n",
3297 					 intel_display_power_domain_str(domain),
3298 					 power_domains->domain_use_count[domain]);
3299 	}
3300 }
3301 
3302 /**
3303  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
3304  * @dev_priv: i915 device instance
3305  *
3306  * Verify if the reference count of each power well matches its HW enabled
3307  * state and the total refcount of the domains it belongs to. This must be
3308  * called after modeset HW state sanitization, which is responsible for
3309  * acquiring reference counts for any power wells in use and disabling the
3310  * ones left on by BIOS but not required by any active output.
3311  */
3312 void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
3313 {
3314 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3315 	struct i915_power_well *power_well;
3316 	bool dump_domain_info;
3317 
3318 	mutex_lock(&power_domains->lock);
3319 
3320 	dump_domain_info = false;
3321 	for_each_power_well(dev_priv, power_well) {
3322 		enum intel_display_power_domain domain;
3323 		int domains_count;
3324 		bool enabled;
3325 
3326 		/*
3327 		 * Power wells not belonging to any domain (like the MISC_IO
3328 		 * and PW1 power wells) are under FW control, so ignore them,
3329 		 * since their state can change asynchronously.
3330 		 */
3331 		if (!power_well->domains)
3332 			continue;
3333 
3334 		enabled = power_well->ops->is_enabled(dev_priv, power_well);
3335 		if ((power_well->count || power_well->always_on) != enabled)
3336 			DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
3337 				  power_well->name, power_well->count, enabled);
3338 
3339 		domains_count = 0;
3340 		for_each_power_domain(domain, power_well->domains)
3341 			domains_count += power_domains->domain_use_count[domain];
3342 
3343 		if (power_well->count != domains_count) {
3344 			DRM_ERROR("power well %s refcount/domain refcount mismatch "
3345 				  "(refcount %d/domains refcount %d)\n",
3346 				  power_well->name, power_well->count,
3347 				  domains_count);
3348 			dump_domain_info = true;
3349 		}
3350 	}
3351 
3352 	if (dump_domain_info) {
3353 		static bool dumped;
3354 
3355 		if (!dumped) {
3356 			intel_power_domains_dump_info(dev_priv);
3357 			dumped = true;
3358 		}
3359 	}
3360 
3361 	mutex_unlock(&power_domains->lock);
3362 }
3363 
3364 /**
3365  * intel_runtime_pm_get - grab a runtime pm reference
3366  * @dev_priv: i915 device instance
3367  *
3368  * This function grabs a device-level runtime pm reference (mostly used for GEM
3369  * code to ensure the GTT or GT is on) and ensures that it is powered up.
3370  *
3371  * Any runtime pm reference obtained by this function must have a symmetric
3372  * call to intel_runtime_pm_put() to release the reference again.
3373  */
3374 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
3375 {
3376 	struct pci_dev *pdev = dev_priv->drm.pdev;
3377 	struct device *kdev = &pdev->dev;
3378 	int ret;
3379 
3380 	ret = pm_runtime_get_sync(kdev);
3381 	WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
3382 
3383 	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
3384 	assert_rpm_wakelock_held(dev_priv);
3385 }
3386 
3387 /**
3388  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
3389  * @dev_priv: i915 device instance
3390  *
3391  * This function grabs a device-level runtime pm reference if the device is
3392  * already in use and ensures that it is powered up. It is illegal to try
3393  * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
3394  *
3395  * Any runtime pm reference obtained by this function must have a symmetric
3396  * call to intel_runtime_pm_put() to release the reference again.
3397  *
3398  * Returns: True if the wakeref was acquired, or False otherwise.
3399  */
3400 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
3401 {
3402 	if (IS_ENABLED(CONFIG_PM)) {
3403 		struct pci_dev *pdev = dev_priv->drm.pdev;
3404 		struct device *kdev = &pdev->dev;
3405 
3406 		/*
3407 		 * In cases runtime PM is disabled by the RPM core and we get
3408 		 * an -EINVAL return value we are not supposed to call this
3409 		 * function, since the power state is undefined. This applies
3410 		 * atm to the late/early system suspend/resume handlers.
3411 		 */
3412 		if (pm_runtime_get_if_in_use(kdev) <= 0)
3413 			return false;
3414 	}
3415 
3416 	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
3417 	assert_rpm_wakelock_held(dev_priv);
3418 
3419 	return true;
3420 }
3421 
3422 /**
3423  * intel_runtime_pm_get_noresume - grab a runtime pm reference
3424  * @dev_priv: i915 device instance
3425  *
3426  * This function grabs a device-level runtime pm reference (mostly used for GEM
3427  * code to ensure the GTT or GT is on).
3428  *
3429  * It will _not_ power up the device but instead only check that it's powered
3430  * on.  Therefore it is only valid to call this functions from contexts where
3431  * the device is known to be powered up and where trying to power it up would
3432  * result in hilarity and deadlocks. That pretty much means only the system
3433  * suspend/resume code where this is used to grab runtime pm references for
3434  * delayed setup down in work items.
3435  *
3436  * Any runtime pm reference obtained by this function must have a symmetric
3437  * call to intel_runtime_pm_put() to release the reference again.
3438  */
3439 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
3440 {
3441 	struct pci_dev *pdev = dev_priv->drm.pdev;
3442 	struct device *kdev = &pdev->dev;
3443 
3444 	assert_rpm_wakelock_held(dev_priv);
3445 	pm_runtime_get_noresume(kdev);
3446 
3447 	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
3448 }
3449 
3450 /**
3451  * intel_runtime_pm_put - release a runtime pm reference
3452  * @dev_priv: i915 device instance
3453  *
3454  * This function drops the device-level runtime pm reference obtained by
3455  * intel_runtime_pm_get() and might power down the corresponding
3456  * hardware block right away if this is the last reference.
3457  */
3458 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
3459 {
3460 	struct pci_dev *pdev = dev_priv->drm.pdev;
3461 	struct device *kdev = &pdev->dev;
3462 
3463 	assert_rpm_wakelock_held(dev_priv);
3464 	atomic_dec(&dev_priv->runtime_pm.wakeref_count);
3465 
3466 	pm_runtime_mark_last_busy(kdev);
3467 	pm_runtime_put_autosuspend(kdev);
3468 }
3469 
3470 /**
3471  * intel_runtime_pm_enable - enable runtime pm
3472  * @dev_priv: i915 device instance
3473  *
3474  * This function enables runtime pm at the end of the driver load sequence.
3475  *
3476  * Note that this function does currently not enable runtime pm for the
3477  * subordinate display power domains. That is only done on the first modeset
3478  * using intel_display_set_init_power().
3479  */
3480 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
3481 {
3482 	struct pci_dev *pdev = dev_priv->drm.pdev;
3483 	struct device *kdev = &pdev->dev;
3484 
3485 	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
3486 	pm_runtime_mark_last_busy(kdev);
3487 
3488 	/*
3489 	 * Take a permanent reference to disable the RPM functionality and drop
3490 	 * it only when unloading the driver. Use the low level get/put helpers,
3491 	 * so the driver's own RPM reference tracking asserts also work on
3492 	 * platforms without RPM support.
3493 	 */
3494 	if (!HAS_RUNTIME_PM(dev_priv)) {
3495 		int ret;
3496 
3497 		pm_runtime_dont_use_autosuspend(kdev);
3498 		ret = pm_runtime_get_sync(kdev);
3499 		WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
3500 	} else {
3501 		pm_runtime_use_autosuspend(kdev);
3502 	}
3503 
3504 	/*
3505 	 * The core calls the driver load handler with an RPM reference held.
3506 	 * We drop that here and will reacquire it during unloading in
3507 	 * intel_power_domains_fini().
3508 	 */
3509 	pm_runtime_put_autosuspend(kdev);
3510 }
3511