1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28 
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31 
32 #include <drm/drm_print.h>
33 
34 #include "i915_drv.h"
35 #include "intel_cdclk.h"
36 #include "intel_crt.h"
37 #include "intel_csr.h"
38 #include "intel_dp.h"
39 #include "intel_drv.h"
40 
41 /**
42  * DOC: runtime pm
43  *
44  * The i915 driver supports dynamic enabling and disabling of entire hardware
45  * blocks at runtime. This is especially important on the display side where
46  * software is supposed to control many power gates manually on recent hardware,
47  * since on the GT side a lot of the power management is done by the hardware.
48  * But even there some manual control at the device level is required.
49  *
50  * Since i915 supports a diverse set of platforms with a unified codebase and
51  * hardware engineers just love to shuffle functionality around between power
52  * domains there's a sizeable amount of indirection required. This file provides
53  * generic functions to the driver for grabbing and releasing references for
54  * abstract power domains. It then maps those to the actual power wells
55  * present for a given platform.
56  */
57 
58 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
59 
60 #include <linux/sort.h>
61 
62 #define STACKDEPTH 8
63 
64 static noinline depot_stack_handle_t __save_depot_stack(void)
65 {
66 	unsigned long entries[STACKDEPTH];
67 	unsigned int n;
68 
69 	n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
70 	return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
71 }
72 
73 static void __print_depot_stack(depot_stack_handle_t stack,
74 				char *buf, int sz, int indent)
75 {
76 	unsigned long *entries;
77 	unsigned int nr_entries;
78 
79 	nr_entries = stack_depot_fetch(stack, &entries);
80 	stack_trace_snprint(buf, sz, entries, nr_entries, indent);
81 }
82 
83 static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
84 {
85 	struct i915_runtime_pm *rpm = &i915->runtime_pm;
86 
87 	spin_lock_init(&rpm->debug.lock);
88 }
89 
90 static noinline depot_stack_handle_t
91 track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
92 {
93 	struct i915_runtime_pm *rpm = &i915->runtime_pm;
94 	depot_stack_handle_t stack, *stacks;
95 	unsigned long flags;
96 
97 	atomic_inc(&rpm->wakeref_count);
98 	assert_rpm_wakelock_held(i915);
99 
100 	if (!HAS_RUNTIME_PM(i915))
101 		return -1;
102 
103 	stack = __save_depot_stack();
104 	if (!stack)
105 		return -1;
106 
107 	spin_lock_irqsave(&rpm->debug.lock, flags);
108 
109 	if (!rpm->debug.count)
110 		rpm->debug.last_acquire = stack;
111 
112 	stacks = krealloc(rpm->debug.owners,
113 			  (rpm->debug.count + 1) * sizeof(*stacks),
114 			  GFP_NOWAIT | __GFP_NOWARN);
115 	if (stacks) {
116 		stacks[rpm->debug.count++] = stack;
117 		rpm->debug.owners = stacks;
118 	} else {
119 		stack = -1;
120 	}
121 
122 	spin_unlock_irqrestore(&rpm->debug.lock, flags);
123 
124 	return stack;
125 }
126 
127 static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
128 					    depot_stack_handle_t stack)
129 {
130 	struct i915_runtime_pm *rpm = &i915->runtime_pm;
131 	unsigned long flags, n;
132 	bool found = false;
133 
134 	if (unlikely(stack == -1))
135 		return;
136 
137 	spin_lock_irqsave(&rpm->debug.lock, flags);
138 	for (n = rpm->debug.count; n--; ) {
139 		if (rpm->debug.owners[n] == stack) {
140 			memmove(rpm->debug.owners + n,
141 				rpm->debug.owners + n + 1,
142 				(--rpm->debug.count - n) * sizeof(stack));
143 			found = true;
144 			break;
145 		}
146 	}
147 	spin_unlock_irqrestore(&rpm->debug.lock, flags);
148 
149 	if (WARN(!found,
150 		 "Unmatched wakeref (tracking %lu), count %u\n",
151 		 rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
152 		char *buf;
153 
154 		buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
155 		if (!buf)
156 			return;
157 
158 		__print_depot_stack(stack, buf, PAGE_SIZE, 2);
159 		DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
160 
161 		stack = READ_ONCE(rpm->debug.last_release);
162 		if (stack) {
163 			__print_depot_stack(stack, buf, PAGE_SIZE, 2);
164 			DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
165 		}
166 
167 		kfree(buf);
168 	}
169 }
170 
171 static int cmphandle(const void *_a, const void *_b)
172 {
173 	const depot_stack_handle_t * const a = _a, * const b = _b;
174 
175 	if (*a < *b)
176 		return -1;
177 	else if (*a > *b)
178 		return 1;
179 	else
180 		return 0;
181 }
182 
183 static void
184 __print_intel_runtime_pm_wakeref(struct drm_printer *p,
185 				 const struct intel_runtime_pm_debug *dbg)
186 {
187 	unsigned long i;
188 	char *buf;
189 
190 	buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
191 	if (!buf)
192 		return;
193 
194 	if (dbg->last_acquire) {
195 		__print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
196 		drm_printf(p, "Wakeref last acquired:\n%s", buf);
197 	}
198 
199 	if (dbg->last_release) {
200 		__print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
201 		drm_printf(p, "Wakeref last released:\n%s", buf);
202 	}
203 
204 	drm_printf(p, "Wakeref count: %lu\n", dbg->count);
205 
206 	sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
207 
208 	for (i = 0; i < dbg->count; i++) {
209 		depot_stack_handle_t stack = dbg->owners[i];
210 		unsigned long rep;
211 
212 		rep = 1;
213 		while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
214 			rep++, i++;
215 		__print_depot_stack(stack, buf, PAGE_SIZE, 2);
216 		drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
217 	}
218 
219 	kfree(buf);
220 }
221 
222 static noinline void
223 untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
224 {
225 	struct i915_runtime_pm *rpm = &i915->runtime_pm;
226 	struct intel_runtime_pm_debug dbg = {};
227 	struct drm_printer p;
228 	unsigned long flags;
229 
230 	assert_rpm_wakelock_held(i915);
231 	if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
232 					&rpm->debug.lock,
233 					flags)) {
234 		dbg = rpm->debug;
235 
236 		rpm->debug.owners = NULL;
237 		rpm->debug.count = 0;
238 		rpm->debug.last_release = __save_depot_stack();
239 
240 		spin_unlock_irqrestore(&rpm->debug.lock, flags);
241 	}
242 	if (!dbg.count)
243 		return;
244 
245 	p = drm_debug_printer("i915");
246 	__print_intel_runtime_pm_wakeref(&p, &dbg);
247 
248 	kfree(dbg.owners);
249 }
250 
251 void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
252 				    struct drm_printer *p)
253 {
254 	struct intel_runtime_pm_debug dbg = {};
255 
256 	do {
257 		struct i915_runtime_pm *rpm = &i915->runtime_pm;
258 		unsigned long alloc = dbg.count;
259 		depot_stack_handle_t *s;
260 
261 		spin_lock_irq(&rpm->debug.lock);
262 		dbg.count = rpm->debug.count;
263 		if (dbg.count <= alloc) {
264 			memcpy(dbg.owners,
265 			       rpm->debug.owners,
266 			       dbg.count * sizeof(*s));
267 		}
268 		dbg.last_acquire = rpm->debug.last_acquire;
269 		dbg.last_release = rpm->debug.last_release;
270 		spin_unlock_irq(&rpm->debug.lock);
271 		if (dbg.count <= alloc)
272 			break;
273 
274 		s = krealloc(dbg.owners,
275 			     dbg.count * sizeof(*s),
276 			     GFP_NOWAIT | __GFP_NOWARN);
277 		if (!s)
278 			goto out;
279 
280 		dbg.owners = s;
281 	} while (1);
282 
283 	__print_intel_runtime_pm_wakeref(p, &dbg);
284 
285 out:
286 	kfree(dbg.owners);
287 }
288 
289 #else
290 
291 static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
292 {
293 }
294 
295 static depot_stack_handle_t
296 track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
297 {
298 	atomic_inc(&i915->runtime_pm.wakeref_count);
299 	assert_rpm_wakelock_held(i915);
300 	return -1;
301 }
302 
303 static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
304 {
305 	assert_rpm_wakelock_held(i915);
306 	atomic_dec(&i915->runtime_pm.wakeref_count);
307 }
308 
309 #endif
310 
311 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
312 					 enum i915_power_well_id power_well_id);
313 
314 const char *
315 intel_display_power_domain_str(enum intel_display_power_domain domain)
316 {
317 	switch (domain) {
318 	case POWER_DOMAIN_PIPE_A:
319 		return "PIPE_A";
320 	case POWER_DOMAIN_PIPE_B:
321 		return "PIPE_B";
322 	case POWER_DOMAIN_PIPE_C:
323 		return "PIPE_C";
324 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
325 		return "PIPE_A_PANEL_FITTER";
326 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
327 		return "PIPE_B_PANEL_FITTER";
328 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
329 		return "PIPE_C_PANEL_FITTER";
330 	case POWER_DOMAIN_TRANSCODER_A:
331 		return "TRANSCODER_A";
332 	case POWER_DOMAIN_TRANSCODER_B:
333 		return "TRANSCODER_B";
334 	case POWER_DOMAIN_TRANSCODER_C:
335 		return "TRANSCODER_C";
336 	case POWER_DOMAIN_TRANSCODER_EDP:
337 		return "TRANSCODER_EDP";
338 	case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
339 		return "TRANSCODER_EDP_VDSC";
340 	case POWER_DOMAIN_TRANSCODER_DSI_A:
341 		return "TRANSCODER_DSI_A";
342 	case POWER_DOMAIN_TRANSCODER_DSI_C:
343 		return "TRANSCODER_DSI_C";
344 	case POWER_DOMAIN_PORT_DDI_A_LANES:
345 		return "PORT_DDI_A_LANES";
346 	case POWER_DOMAIN_PORT_DDI_B_LANES:
347 		return "PORT_DDI_B_LANES";
348 	case POWER_DOMAIN_PORT_DDI_C_LANES:
349 		return "PORT_DDI_C_LANES";
350 	case POWER_DOMAIN_PORT_DDI_D_LANES:
351 		return "PORT_DDI_D_LANES";
352 	case POWER_DOMAIN_PORT_DDI_E_LANES:
353 		return "PORT_DDI_E_LANES";
354 	case POWER_DOMAIN_PORT_DDI_F_LANES:
355 		return "PORT_DDI_F_LANES";
356 	case POWER_DOMAIN_PORT_DDI_A_IO:
357 		return "PORT_DDI_A_IO";
358 	case POWER_DOMAIN_PORT_DDI_B_IO:
359 		return "PORT_DDI_B_IO";
360 	case POWER_DOMAIN_PORT_DDI_C_IO:
361 		return "PORT_DDI_C_IO";
362 	case POWER_DOMAIN_PORT_DDI_D_IO:
363 		return "PORT_DDI_D_IO";
364 	case POWER_DOMAIN_PORT_DDI_E_IO:
365 		return "PORT_DDI_E_IO";
366 	case POWER_DOMAIN_PORT_DDI_F_IO:
367 		return "PORT_DDI_F_IO";
368 	case POWER_DOMAIN_PORT_DSI:
369 		return "PORT_DSI";
370 	case POWER_DOMAIN_PORT_CRT:
371 		return "PORT_CRT";
372 	case POWER_DOMAIN_PORT_OTHER:
373 		return "PORT_OTHER";
374 	case POWER_DOMAIN_VGA:
375 		return "VGA";
376 	case POWER_DOMAIN_AUDIO:
377 		return "AUDIO";
378 	case POWER_DOMAIN_PLLS:
379 		return "PLLS";
380 	case POWER_DOMAIN_AUX_A:
381 		return "AUX_A";
382 	case POWER_DOMAIN_AUX_B:
383 		return "AUX_B";
384 	case POWER_DOMAIN_AUX_C:
385 		return "AUX_C";
386 	case POWER_DOMAIN_AUX_D:
387 		return "AUX_D";
388 	case POWER_DOMAIN_AUX_E:
389 		return "AUX_E";
390 	case POWER_DOMAIN_AUX_F:
391 		return "AUX_F";
392 	case POWER_DOMAIN_AUX_IO_A:
393 		return "AUX_IO_A";
394 	case POWER_DOMAIN_AUX_TBT1:
395 		return "AUX_TBT1";
396 	case POWER_DOMAIN_AUX_TBT2:
397 		return "AUX_TBT2";
398 	case POWER_DOMAIN_AUX_TBT3:
399 		return "AUX_TBT3";
400 	case POWER_DOMAIN_AUX_TBT4:
401 		return "AUX_TBT4";
402 	case POWER_DOMAIN_GMBUS:
403 		return "GMBUS";
404 	case POWER_DOMAIN_INIT:
405 		return "INIT";
406 	case POWER_DOMAIN_MODESET:
407 		return "MODESET";
408 	case POWER_DOMAIN_GT_IRQ:
409 		return "GT_IRQ";
410 	default:
411 		MISSING_CASE(domain);
412 		return "?";
413 	}
414 }
415 
416 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
417 				    struct i915_power_well *power_well)
418 {
419 	DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
420 	power_well->desc->ops->enable(dev_priv, power_well);
421 	power_well->hw_enabled = true;
422 }
423 
424 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
425 				     struct i915_power_well *power_well)
426 {
427 	DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
428 	power_well->hw_enabled = false;
429 	power_well->desc->ops->disable(dev_priv, power_well);
430 }
431 
432 static void intel_power_well_get(struct drm_i915_private *dev_priv,
433 				 struct i915_power_well *power_well)
434 {
435 	if (!power_well->count++)
436 		intel_power_well_enable(dev_priv, power_well);
437 }
438 
439 static void intel_power_well_put(struct drm_i915_private *dev_priv,
440 				 struct i915_power_well *power_well)
441 {
442 	WARN(!power_well->count, "Use count on power well %s is already zero",
443 	     power_well->desc->name);
444 
445 	if (!--power_well->count)
446 		intel_power_well_disable(dev_priv, power_well);
447 }
448 
449 /**
450  * __intel_display_power_is_enabled - unlocked check for a power domain
451  * @dev_priv: i915 device instance
452  * @domain: power domain to check
453  *
454  * This is the unlocked version of intel_display_power_is_enabled() and should
455  * only be used from error capture and recovery code where deadlocks are
456  * possible.
457  *
458  * Returns:
459  * True when the power domain is enabled, false otherwise.
460  */
461 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
462 				      enum intel_display_power_domain domain)
463 {
464 	struct i915_power_well *power_well;
465 	bool is_enabled;
466 
467 	if (dev_priv->runtime_pm.suspended)
468 		return false;
469 
470 	is_enabled = true;
471 
472 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
473 		if (power_well->desc->always_on)
474 			continue;
475 
476 		if (!power_well->hw_enabled) {
477 			is_enabled = false;
478 			break;
479 		}
480 	}
481 
482 	return is_enabled;
483 }
484 
485 /**
486  * intel_display_power_is_enabled - check for a power domain
487  * @dev_priv: i915 device instance
488  * @domain: power domain to check
489  *
490  * This function can be used to check the hw power domain state. It is mostly
491  * used in hardware state readout functions. Everywhere else code should rely
492  * upon explicit power domain reference counting to ensure that the hardware
493  * block is powered up before accessing it.
494  *
495  * Callers must hold the relevant modesetting locks to ensure that concurrent
496  * threads can't disable the power well while the caller tries to read a few
497  * registers.
498  *
499  * Returns:
500  * True when the power domain is enabled, false otherwise.
501  */
502 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
503 				    enum intel_display_power_domain domain)
504 {
505 	struct i915_power_domains *power_domains;
506 	bool ret;
507 
508 	power_domains = &dev_priv->power_domains;
509 
510 	mutex_lock(&power_domains->lock);
511 	ret = __intel_display_power_is_enabled(dev_priv, domain);
512 	mutex_unlock(&power_domains->lock);
513 
514 	return ret;
515 }
516 
517 /*
518  * Starting with Haswell, we have a "Power Down Well" that can be turned off
519  * when not needed anymore. We have 4 registers that can request the power well
520  * to be enabled, and it will only be disabled if none of the registers is
521  * requesting it to be enabled.
522  */
523 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
524 				       u8 irq_pipe_mask, bool has_vga)
525 {
526 	struct pci_dev *pdev = dev_priv->drm.pdev;
527 
528 	/*
529 	 * After we re-enable the power well, if we touch VGA register 0x3d5
530 	 * we'll get unclaimed register interrupts. This stops after we write
531 	 * anything to the VGA MSR register. The vgacon module uses this
532 	 * register all the time, so if we unbind our driver and, as a
533 	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
534 	 * console_unlock(). So make here we touch the VGA MSR register, making
535 	 * sure vgacon can keep working normally without triggering interrupts
536 	 * and error messages.
537 	 */
538 	if (has_vga) {
539 		vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
540 		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
541 		vga_put(pdev, VGA_RSRC_LEGACY_IO);
542 	}
543 
544 	if (irq_pipe_mask)
545 		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
546 }
547 
548 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
549 				       u8 irq_pipe_mask)
550 {
551 	if (irq_pipe_mask)
552 		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
553 }
554 
555 
556 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
557 					   struct i915_power_well *power_well)
558 {
559 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
560 	int pw_idx = power_well->desc->hsw.idx;
561 
562 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
563 	WARN_ON(intel_wait_for_register(&dev_priv->uncore,
564 					regs->driver,
565 					HSW_PWR_WELL_CTL_STATE(pw_idx),
566 					HSW_PWR_WELL_CTL_STATE(pw_idx),
567 					1));
568 }
569 
570 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
571 				     const struct i915_power_well_regs *regs,
572 				     int pw_idx)
573 {
574 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
575 	u32 ret;
576 
577 	ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
578 	ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
579 	if (regs->kvmr.reg)
580 		ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
581 	ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
582 
583 	return ret;
584 }
585 
586 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
587 					    struct i915_power_well *power_well)
588 {
589 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
590 	int pw_idx = power_well->desc->hsw.idx;
591 	bool disabled;
592 	u32 reqs;
593 
594 	/*
595 	 * Bspec doesn't require waiting for PWs to get disabled, but still do
596 	 * this for paranoia. The known cases where a PW will be forced on:
597 	 * - a KVMR request on any power well via the KVMR request register
598 	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
599 	 *   DEBUG request registers
600 	 * Skip the wait in case any of the request bits are set and print a
601 	 * diagnostic message.
602 	 */
603 	wait_for((disabled = !(I915_READ(regs->driver) &
604 			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
605 		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
606 	if (disabled)
607 		return;
608 
609 	DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
610 		      power_well->desc->name,
611 		      !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
612 }
613 
614 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
615 					   enum skl_power_gate pg)
616 {
617 	/* Timeout 5us for PG#0, for other PGs 1us */
618 	WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
619 					SKL_FUSE_PG_DIST_STATUS(pg),
620 					SKL_FUSE_PG_DIST_STATUS(pg), 1));
621 }
622 
623 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
624 				  struct i915_power_well *power_well)
625 {
626 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
627 	int pw_idx = power_well->desc->hsw.idx;
628 	bool wait_fuses = power_well->desc->hsw.has_fuses;
629 	enum skl_power_gate uninitialized_var(pg);
630 	u32 val;
631 
632 	if (wait_fuses) {
633 		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
634 						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
635 		/*
636 		 * For PW1 we have to wait both for the PW0/PG0 fuse state
637 		 * before enabling the power well and PW1/PG1's own fuse
638 		 * state after the enabling. For all other power wells with
639 		 * fuses we only have to wait for that PW/PG's fuse state
640 		 * after the enabling.
641 		 */
642 		if (pg == SKL_PG1)
643 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
644 	}
645 
646 	val = I915_READ(regs->driver);
647 	I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
648 	hsw_wait_for_power_well_enable(dev_priv, power_well);
649 
650 	/* Display WA #1178: cnl */
651 	if (IS_CANNONLAKE(dev_priv) &&
652 	    pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
653 	    pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
654 		val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
655 		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
656 		I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
657 	}
658 
659 	if (wait_fuses)
660 		gen9_wait_for_power_well_fuses(dev_priv, pg);
661 
662 	hsw_power_well_post_enable(dev_priv,
663 				   power_well->desc->hsw.irq_pipe_mask,
664 				   power_well->desc->hsw.has_vga);
665 }
666 
667 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
668 				   struct i915_power_well *power_well)
669 {
670 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
671 	int pw_idx = power_well->desc->hsw.idx;
672 	u32 val;
673 
674 	hsw_power_well_pre_disable(dev_priv,
675 				   power_well->desc->hsw.irq_pipe_mask);
676 
677 	val = I915_READ(regs->driver);
678 	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
679 	hsw_wait_for_power_well_disable(dev_priv, power_well);
680 }
681 
682 #define ICL_AUX_PW_TO_PORT(pw_idx)	((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
683 
684 static void
685 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
686 				    struct i915_power_well *power_well)
687 {
688 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
689 	int pw_idx = power_well->desc->hsw.idx;
690 	enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
691 	u32 val;
692 
693 	val = I915_READ(regs->driver);
694 	I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
695 
696 	val = I915_READ(ICL_PORT_CL_DW12(port));
697 	I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
698 
699 	hsw_wait_for_power_well_enable(dev_priv, power_well);
700 
701 	/* Display WA #1178: icl */
702 	if (IS_ICELAKE(dev_priv) &&
703 	    pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
704 	    !intel_bios_is_port_edp(dev_priv, port)) {
705 		val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
706 		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
707 		I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
708 	}
709 }
710 
711 static void
712 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
713 				     struct i915_power_well *power_well)
714 {
715 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
716 	int pw_idx = power_well->desc->hsw.idx;
717 	enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
718 	u32 val;
719 
720 	val = I915_READ(ICL_PORT_CL_DW12(port));
721 	I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
722 
723 	val = I915_READ(regs->driver);
724 	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
725 
726 	hsw_wait_for_power_well_disable(dev_priv, power_well);
727 }
728 
729 #define ICL_AUX_PW_TO_CH(pw_idx)	\
730 	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
731 
732 static void
733 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
734 				 struct i915_power_well *power_well)
735 {
736 	enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
737 	u32 val;
738 
739 	val = I915_READ(DP_AUX_CH_CTL(aux_ch));
740 	val &= ~DP_AUX_CH_CTL_TBT_IO;
741 	if (power_well->desc->hsw.is_tc_tbt)
742 		val |= DP_AUX_CH_CTL_TBT_IO;
743 	I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
744 
745 	hsw_power_well_enable(dev_priv, power_well);
746 }
747 
748 /*
749  * We should only use the power well if we explicitly asked the hardware to
750  * enable it, so check if it's enabled and also check if we've requested it to
751  * be enabled.
752  */
753 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
754 				   struct i915_power_well *power_well)
755 {
756 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
757 	enum i915_power_well_id id = power_well->desc->id;
758 	int pw_idx = power_well->desc->hsw.idx;
759 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
760 		   HSW_PWR_WELL_CTL_STATE(pw_idx);
761 	u32 val;
762 
763 	val = I915_READ(regs->driver);
764 
765 	/*
766 	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
767 	 * and the MISC_IO PW will be not restored, so check instead for the
768 	 * BIOS's own request bits, which are forced-on for these power wells
769 	 * when exiting DC5/6.
770 	 */
771 	if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
772 	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
773 		val |= I915_READ(regs->bios);
774 
775 	return (val & mask) == mask;
776 }
777 
778 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
779 {
780 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
781 		  "DC9 already programmed to be enabled.\n");
782 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
783 		  "DC5 still not disabled to enable DC9.\n");
784 	WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
785 		  HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
786 		  "Power well 2 on.\n");
787 	WARN_ONCE(intel_irqs_enabled(dev_priv),
788 		  "Interrupts not disabled yet.\n");
789 
790 	 /*
791 	  * TODO: check for the following to verify the conditions to enter DC9
792 	  * state are satisfied:
793 	  * 1] Check relevant display engine registers to verify if mode set
794 	  * disable sequence was followed.
795 	  * 2] Check if display uninitialize sequence is initialized.
796 	  */
797 }
798 
799 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
800 {
801 	WARN_ONCE(intel_irqs_enabled(dev_priv),
802 		  "Interrupts not disabled yet.\n");
803 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
804 		  "DC5 still not disabled.\n");
805 
806 	 /*
807 	  * TODO: check for the following to verify DC9 state was indeed
808 	  * entered before programming to disable it:
809 	  * 1] Check relevant display engine registers to verify if mode
810 	  *  set disable sequence was followed.
811 	  * 2] Check if display uninitialize sequence is initialized.
812 	  */
813 }
814 
815 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
816 				u32 state)
817 {
818 	int rewrites = 0;
819 	int rereads = 0;
820 	u32 v;
821 
822 	I915_WRITE(DC_STATE_EN, state);
823 
824 	/* It has been observed that disabling the dc6 state sometimes
825 	 * doesn't stick and dmc keeps returning old value. Make sure
826 	 * the write really sticks enough times and also force rewrite until
827 	 * we are confident that state is exactly what we want.
828 	 */
829 	do  {
830 		v = I915_READ(DC_STATE_EN);
831 
832 		if (v != state) {
833 			I915_WRITE(DC_STATE_EN, state);
834 			rewrites++;
835 			rereads = 0;
836 		} else if (rereads++ > 5) {
837 			break;
838 		}
839 
840 	} while (rewrites < 100);
841 
842 	if (v != state)
843 		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
844 			  state, v);
845 
846 	/* Most of the times we need one retry, avoid spam */
847 	if (rewrites > 1)
848 		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
849 			      state, rewrites);
850 }
851 
852 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
853 {
854 	u32 mask;
855 
856 	mask = DC_STATE_EN_UPTO_DC5;
857 	if (INTEL_GEN(dev_priv) >= 11)
858 		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
859 	else if (IS_GEN9_LP(dev_priv))
860 		mask |= DC_STATE_EN_DC9;
861 	else
862 		mask |= DC_STATE_EN_UPTO_DC6;
863 
864 	return mask;
865 }
866 
867 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
868 {
869 	u32 val;
870 
871 	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
872 
873 	DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
874 		      dev_priv->csr.dc_state, val);
875 	dev_priv->csr.dc_state = val;
876 }
877 
878 /**
879  * gen9_set_dc_state - set target display C power state
880  * @dev_priv: i915 device instance
881  * @state: target DC power state
882  * - DC_STATE_DISABLE
883  * - DC_STATE_EN_UPTO_DC5
884  * - DC_STATE_EN_UPTO_DC6
885  * - DC_STATE_EN_DC9
886  *
887  * Signal to DMC firmware/HW the target DC power state passed in @state.
888  * DMC/HW can turn off individual display clocks and power rails when entering
889  * a deeper DC power state (higher in number) and turns these back when exiting
890  * that state to a shallower power state (lower in number). The HW will decide
891  * when to actually enter a given state on an on-demand basis, for instance
892  * depending on the active state of display pipes. The state of display
893  * registers backed by affected power rails are saved/restored as needed.
894  *
895  * Based on the above enabling a deeper DC power state is asynchronous wrt.
896  * enabling it. Disabling a deeper power state is synchronous: for instance
897  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
898  * back on and register state is restored. This is guaranteed by the MMIO write
899  * to DC_STATE_EN blocking until the state is restored.
900  */
901 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
902 {
903 	u32 val;
904 	u32 mask;
905 
906 	if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
907 		state &= dev_priv->csr.allowed_dc_mask;
908 
909 	val = I915_READ(DC_STATE_EN);
910 	mask = gen9_dc_mask(dev_priv);
911 	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
912 		      val & mask, state);
913 
914 	/* Check if DMC is ignoring our DC state requests */
915 	if ((val & mask) != dev_priv->csr.dc_state)
916 		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
917 			  dev_priv->csr.dc_state, val & mask);
918 
919 	val &= ~mask;
920 	val |= state;
921 
922 	gen9_write_dc_state(dev_priv, val);
923 
924 	dev_priv->csr.dc_state = val & mask;
925 }
926 
927 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
928 {
929 	assert_can_enable_dc9(dev_priv);
930 
931 	DRM_DEBUG_KMS("Enabling DC9\n");
932 	/*
933 	 * Power sequencer reset is not needed on
934 	 * platforms with South Display Engine on PCH,
935 	 * because PPS registers are always on.
936 	 */
937 	if (!HAS_PCH_SPLIT(dev_priv))
938 		intel_power_sequencer_reset(dev_priv);
939 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
940 }
941 
942 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
943 {
944 	assert_can_disable_dc9(dev_priv);
945 
946 	DRM_DEBUG_KMS("Disabling DC9\n");
947 
948 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
949 
950 	intel_pps_unlock_regs_wa(dev_priv);
951 }
952 
953 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
954 {
955 	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
956 		  "CSR program storage start is NULL\n");
957 	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
958 	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
959 }
960 
961 static struct i915_power_well *
962 lookup_power_well(struct drm_i915_private *dev_priv,
963 		  enum i915_power_well_id power_well_id)
964 {
965 	struct i915_power_well *power_well;
966 
967 	for_each_power_well(dev_priv, power_well)
968 		if (power_well->desc->id == power_well_id)
969 			return power_well;
970 
971 	/*
972 	 * It's not feasible to add error checking code to the callers since
973 	 * this condition really shouldn't happen and it doesn't even make sense
974 	 * to abort things like display initialization sequences. Just return
975 	 * the first power well and hope the WARN gets reported so we can fix
976 	 * our driver.
977 	 */
978 	WARN(1, "Power well %d not defined for this platform\n", power_well_id);
979 	return &dev_priv->power_domains.power_wells[0];
980 }
981 
982 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
983 {
984 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
985 					SKL_DISP_PW_2);
986 
987 	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
988 
989 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
990 		  "DC5 already programmed to be enabled.\n");
991 	assert_rpm_wakelock_held(dev_priv);
992 
993 	assert_csr_loaded(dev_priv);
994 }
995 
996 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
997 {
998 	assert_can_enable_dc5(dev_priv);
999 
1000 	DRM_DEBUG_KMS("Enabling DC5\n");
1001 
1002 	/* Wa Display #1183: skl,kbl,cfl */
1003 	if (IS_GEN9_BC(dev_priv))
1004 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1005 			   SKL_SELECT_ALTERNATE_DC_EXIT);
1006 
1007 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1008 }
1009 
1010 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1011 {
1012 	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1013 		  "Backlight is not disabled.\n");
1014 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
1015 		  "DC6 already programmed to be enabled.\n");
1016 
1017 	assert_csr_loaded(dev_priv);
1018 }
1019 
1020 void skl_enable_dc6(struct drm_i915_private *dev_priv)
1021 {
1022 	assert_can_enable_dc6(dev_priv);
1023 
1024 	DRM_DEBUG_KMS("Enabling DC6\n");
1025 
1026 	/* Wa Display #1183: skl,kbl,cfl */
1027 	if (IS_GEN9_BC(dev_priv))
1028 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1029 			   SKL_SELECT_ALTERNATE_DC_EXIT);
1030 
1031 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1032 }
1033 
1034 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1035 				   struct i915_power_well *power_well)
1036 {
1037 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1038 	int pw_idx = power_well->desc->hsw.idx;
1039 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1040 	u32 bios_req = I915_READ(regs->bios);
1041 
1042 	/* Take over the request bit if set by BIOS. */
1043 	if (bios_req & mask) {
1044 		u32 drv_req = I915_READ(regs->driver);
1045 
1046 		if (!(drv_req & mask))
1047 			I915_WRITE(regs->driver, drv_req | mask);
1048 		I915_WRITE(regs->bios, bios_req & ~mask);
1049 	}
1050 }
1051 
1052 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1053 					   struct i915_power_well *power_well)
1054 {
1055 	bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1056 }
1057 
1058 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1059 					    struct i915_power_well *power_well)
1060 {
1061 	bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1062 }
1063 
1064 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1065 					    struct i915_power_well *power_well)
1066 {
1067 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1068 }
1069 
1070 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1071 {
1072 	struct i915_power_well *power_well;
1073 
1074 	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1075 	if (power_well->count > 0)
1076 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1077 
1078 	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1079 	if (power_well->count > 0)
1080 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1081 
1082 	if (IS_GEMINILAKE(dev_priv)) {
1083 		power_well = lookup_power_well(dev_priv,
1084 					       GLK_DISP_PW_DPIO_CMN_C);
1085 		if (power_well->count > 0)
1086 			bxt_ddi_phy_verify_state(dev_priv,
1087 						 power_well->desc->bxt.phy);
1088 	}
1089 }
1090 
1091 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1092 					   struct i915_power_well *power_well)
1093 {
1094 	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
1095 }
1096 
1097 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1098 {
1099 	u32 tmp = I915_READ(DBUF_CTL);
1100 
1101 	WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
1102 	     (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
1103 	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
1104 }
1105 
1106 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1107 					  struct i915_power_well *power_well)
1108 {
1109 	struct intel_cdclk_state cdclk_state = {};
1110 
1111 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1112 
1113 	dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
1114 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
1115 	WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
1116 
1117 	gen9_assert_dbuf_enabled(dev_priv);
1118 
1119 	if (IS_GEN9_LP(dev_priv))
1120 		bxt_verify_ddi_phy_power_wells(dev_priv);
1121 
1122 	if (INTEL_GEN(dev_priv) >= 11)
1123 		/*
1124 		 * DMC retains HW context only for port A, the other combo
1125 		 * PHY's HW context for port B is lost after DC transitions,
1126 		 * so we need to restore it manually.
1127 		 */
1128 		icl_combo_phys_init(dev_priv);
1129 }
1130 
1131 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1132 					   struct i915_power_well *power_well)
1133 {
1134 	if (!dev_priv->csr.dmc_payload)
1135 		return;
1136 
1137 	if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
1138 		skl_enable_dc6(dev_priv);
1139 	else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
1140 		gen9_enable_dc5(dev_priv);
1141 }
1142 
1143 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1144 					 struct i915_power_well *power_well)
1145 {
1146 }
1147 
1148 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1149 					   struct i915_power_well *power_well)
1150 {
1151 }
1152 
1153 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1154 					     struct i915_power_well *power_well)
1155 {
1156 	return true;
1157 }
1158 
1159 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1160 					 struct i915_power_well *power_well)
1161 {
1162 	if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1163 		i830_enable_pipe(dev_priv, PIPE_A);
1164 	if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1165 		i830_enable_pipe(dev_priv, PIPE_B);
1166 }
1167 
1168 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1169 					  struct i915_power_well *power_well)
1170 {
1171 	i830_disable_pipe(dev_priv, PIPE_B);
1172 	i830_disable_pipe(dev_priv, PIPE_A);
1173 }
1174 
1175 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1176 					  struct i915_power_well *power_well)
1177 {
1178 	return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1179 		I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1180 }
1181 
1182 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1183 					  struct i915_power_well *power_well)
1184 {
1185 	if (power_well->count > 0)
1186 		i830_pipes_power_well_enable(dev_priv, power_well);
1187 	else
1188 		i830_pipes_power_well_disable(dev_priv, power_well);
1189 }
1190 
1191 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1192 			       struct i915_power_well *power_well, bool enable)
1193 {
1194 	int pw_idx = power_well->desc->vlv.idx;
1195 	u32 mask;
1196 	u32 state;
1197 	u32 ctrl;
1198 
1199 	mask = PUNIT_PWRGT_MASK(pw_idx);
1200 	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1201 			 PUNIT_PWRGT_PWR_GATE(pw_idx);
1202 
1203 	mutex_lock(&dev_priv->pcu_lock);
1204 
1205 #define COND \
1206 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1207 
1208 	if (COND)
1209 		goto out;
1210 
1211 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1212 	ctrl &= ~mask;
1213 	ctrl |= state;
1214 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1215 
1216 	if (wait_for(COND, 100))
1217 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1218 			  state,
1219 			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1220 
1221 #undef COND
1222 
1223 out:
1224 	mutex_unlock(&dev_priv->pcu_lock);
1225 }
1226 
1227 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1228 				  struct i915_power_well *power_well)
1229 {
1230 	vlv_set_power_well(dev_priv, power_well, true);
1231 }
1232 
1233 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1234 				   struct i915_power_well *power_well)
1235 {
1236 	vlv_set_power_well(dev_priv, power_well, false);
1237 }
1238 
1239 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1240 				   struct i915_power_well *power_well)
1241 {
1242 	int pw_idx = power_well->desc->vlv.idx;
1243 	bool enabled = false;
1244 	u32 mask;
1245 	u32 state;
1246 	u32 ctrl;
1247 
1248 	mask = PUNIT_PWRGT_MASK(pw_idx);
1249 	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1250 
1251 	mutex_lock(&dev_priv->pcu_lock);
1252 
1253 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1254 	/*
1255 	 * We only ever set the power-on and power-gate states, anything
1256 	 * else is unexpected.
1257 	 */
1258 	WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1259 		state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1260 	if (state == ctrl)
1261 		enabled = true;
1262 
1263 	/*
1264 	 * A transient state at this point would mean some unexpected party
1265 	 * is poking at the power controls too.
1266 	 */
1267 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1268 	WARN_ON(ctrl != state);
1269 
1270 	mutex_unlock(&dev_priv->pcu_lock);
1271 
1272 	return enabled;
1273 }
1274 
1275 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1276 {
1277 	u32 val;
1278 
1279 	/*
1280 	 * On driver load, a pipe may be active and driving a DSI display.
1281 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1282 	 * (and never recovering) in this case. intel_dsi_post_disable() will
1283 	 * clear it when we turn off the display.
1284 	 */
1285 	val = I915_READ(DSPCLK_GATE_D);
1286 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1287 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1288 	I915_WRITE(DSPCLK_GATE_D, val);
1289 
1290 	/*
1291 	 * Disable trickle feed and enable pnd deadline calculation
1292 	 */
1293 	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1294 	I915_WRITE(CBR1_VLV, 0);
1295 
1296 	WARN_ON(dev_priv->rawclk_freq == 0);
1297 
1298 	I915_WRITE(RAWCLK_FREQ_VLV,
1299 		   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1300 }
1301 
1302 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1303 {
1304 	struct intel_encoder *encoder;
1305 	enum pipe pipe;
1306 
1307 	/*
1308 	 * Enable the CRI clock source so we can get at the
1309 	 * display and the reference clock for VGA
1310 	 * hotplug / manual detection. Supposedly DSI also
1311 	 * needs the ref clock up and running.
1312 	 *
1313 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1314 	 */
1315 	for_each_pipe(dev_priv, pipe) {
1316 		u32 val = I915_READ(DPLL(pipe));
1317 
1318 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1319 		if (pipe != PIPE_A)
1320 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1321 
1322 		I915_WRITE(DPLL(pipe), val);
1323 	}
1324 
1325 	vlv_init_display_clock_gating(dev_priv);
1326 
1327 	spin_lock_irq(&dev_priv->irq_lock);
1328 	valleyview_enable_display_irqs(dev_priv);
1329 	spin_unlock_irq(&dev_priv->irq_lock);
1330 
1331 	/*
1332 	 * During driver initialization/resume we can avoid restoring the
1333 	 * part of the HW/SW state that will be inited anyway explicitly.
1334 	 */
1335 	if (dev_priv->power_domains.initializing)
1336 		return;
1337 
1338 	intel_hpd_init(dev_priv);
1339 
1340 	/* Re-enable the ADPA, if we have one */
1341 	for_each_intel_encoder(&dev_priv->drm, encoder) {
1342 		if (encoder->type == INTEL_OUTPUT_ANALOG)
1343 			intel_crt_reset(&encoder->base);
1344 	}
1345 
1346 	i915_redisable_vga_power_on(dev_priv);
1347 
1348 	intel_pps_unlock_regs_wa(dev_priv);
1349 }
1350 
1351 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1352 {
1353 	spin_lock_irq(&dev_priv->irq_lock);
1354 	valleyview_disable_display_irqs(dev_priv);
1355 	spin_unlock_irq(&dev_priv->irq_lock);
1356 
1357 	/* make sure we're done processing display irqs */
1358 	synchronize_irq(dev_priv->drm.irq);
1359 
1360 	intel_power_sequencer_reset(dev_priv);
1361 
1362 	/* Prevent us from re-enabling polling on accident in late suspend */
1363 	if (!dev_priv->drm.dev->power.is_suspended)
1364 		intel_hpd_poll_init(dev_priv);
1365 }
1366 
1367 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1368 					  struct i915_power_well *power_well)
1369 {
1370 	vlv_set_power_well(dev_priv, power_well, true);
1371 
1372 	vlv_display_power_well_init(dev_priv);
1373 }
1374 
1375 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1376 					   struct i915_power_well *power_well)
1377 {
1378 	vlv_display_power_well_deinit(dev_priv);
1379 
1380 	vlv_set_power_well(dev_priv, power_well, false);
1381 }
1382 
1383 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1384 					   struct i915_power_well *power_well)
1385 {
1386 	/* since ref/cri clock was enabled */
1387 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1388 
1389 	vlv_set_power_well(dev_priv, power_well, true);
1390 
1391 	/*
1392 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1393 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1394 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1395 	 *   b.	The other bits such as sfr settings / modesel may all
1396 	 *	be set to 0.
1397 	 *
1398 	 * This should only be done on init and resume from S3 with
1399 	 * both PLLs disabled, or we risk losing DPIO and PLL
1400 	 * synchronization.
1401 	 */
1402 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1403 }
1404 
1405 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1406 					    struct i915_power_well *power_well)
1407 {
1408 	enum pipe pipe;
1409 
1410 	for_each_pipe(dev_priv, pipe)
1411 		assert_pll_disabled(dev_priv, pipe);
1412 
1413 	/* Assert common reset */
1414 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1415 
1416 	vlv_set_power_well(dev_priv, power_well, false);
1417 }
1418 
1419 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1420 
1421 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1422 
1423 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1424 {
1425 	struct i915_power_well *cmn_bc =
1426 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1427 	struct i915_power_well *cmn_d =
1428 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1429 	u32 phy_control = dev_priv->chv_phy_control;
1430 	u32 phy_status = 0;
1431 	u32 phy_status_mask = 0xffffffff;
1432 
1433 	/*
1434 	 * The BIOS can leave the PHY is some weird state
1435 	 * where it doesn't fully power down some parts.
1436 	 * Disable the asserts until the PHY has been fully
1437 	 * reset (ie. the power well has been disabled at
1438 	 * least once).
1439 	 */
1440 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1441 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1442 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1443 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1444 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1445 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1446 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1447 
1448 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1449 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1450 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1451 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1452 
1453 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1454 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1455 
1456 		/* this assumes override is only used to enable lanes */
1457 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1458 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1459 
1460 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1461 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1462 
1463 		/* CL1 is on whenever anything is on in either channel */
1464 		if (BITS_SET(phy_control,
1465 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1466 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1467 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1468 
1469 		/*
1470 		 * The DPLLB check accounts for the pipe B + port A usage
1471 		 * with CL2 powered up but all the lanes in the second channel
1472 		 * powered down.
1473 		 */
1474 		if (BITS_SET(phy_control,
1475 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1476 		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1477 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1478 
1479 		if (BITS_SET(phy_control,
1480 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1481 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1482 		if (BITS_SET(phy_control,
1483 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1484 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1485 
1486 		if (BITS_SET(phy_control,
1487 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1488 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1489 		if (BITS_SET(phy_control,
1490 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1491 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1492 	}
1493 
1494 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1495 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1496 
1497 		/* this assumes override is only used to enable lanes */
1498 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1499 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1500 
1501 		if (BITS_SET(phy_control,
1502 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1503 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1504 
1505 		if (BITS_SET(phy_control,
1506 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1507 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1508 		if (BITS_SET(phy_control,
1509 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1510 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1511 	}
1512 
1513 	phy_status &= phy_status_mask;
1514 
1515 	/*
1516 	 * The PHY may be busy with some initial calibration and whatnot,
1517 	 * so the power state can take a while to actually change.
1518 	 */
1519 	if (intel_wait_for_register(&dev_priv->uncore,
1520 				    DISPLAY_PHY_STATUS,
1521 				    phy_status_mask,
1522 				    phy_status,
1523 				    10))
1524 		DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1525 			  I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1526 			   phy_status, dev_priv->chv_phy_control);
1527 }
1528 
1529 #undef BITS_SET
1530 
1531 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1532 					   struct i915_power_well *power_well)
1533 {
1534 	enum dpio_phy phy;
1535 	enum pipe pipe;
1536 	u32 tmp;
1537 
1538 	WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1539 		     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1540 
1541 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1542 		pipe = PIPE_A;
1543 		phy = DPIO_PHY0;
1544 	} else {
1545 		pipe = PIPE_C;
1546 		phy = DPIO_PHY1;
1547 	}
1548 
1549 	/* since ref/cri clock was enabled */
1550 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1551 	vlv_set_power_well(dev_priv, power_well, true);
1552 
1553 	/* Poll for phypwrgood signal */
1554 	if (intel_wait_for_register(&dev_priv->uncore,
1555 				    DISPLAY_PHY_STATUS,
1556 				    PHY_POWERGOOD(phy),
1557 				    PHY_POWERGOOD(phy),
1558 				    1))
1559 		DRM_ERROR("Display PHY %d is not power up\n", phy);
1560 
1561 	mutex_lock(&dev_priv->sb_lock);
1562 
1563 	/* Enable dynamic power down */
1564 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1565 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1566 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1567 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1568 
1569 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1570 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1571 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1572 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1573 	} else {
1574 		/*
1575 		 * Force the non-existing CL2 off. BXT does this
1576 		 * too, so maybe it saves some power even though
1577 		 * CL2 doesn't exist?
1578 		 */
1579 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1580 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1581 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1582 	}
1583 
1584 	mutex_unlock(&dev_priv->sb_lock);
1585 
1586 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1587 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1588 
1589 	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1590 		      phy, dev_priv->chv_phy_control);
1591 
1592 	assert_chv_phy_status(dev_priv);
1593 }
1594 
1595 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1596 					    struct i915_power_well *power_well)
1597 {
1598 	enum dpio_phy phy;
1599 
1600 	WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1601 		     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1602 
1603 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1604 		phy = DPIO_PHY0;
1605 		assert_pll_disabled(dev_priv, PIPE_A);
1606 		assert_pll_disabled(dev_priv, PIPE_B);
1607 	} else {
1608 		phy = DPIO_PHY1;
1609 		assert_pll_disabled(dev_priv, PIPE_C);
1610 	}
1611 
1612 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1613 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1614 
1615 	vlv_set_power_well(dev_priv, power_well, false);
1616 
1617 	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1618 		      phy, dev_priv->chv_phy_control);
1619 
1620 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1621 	dev_priv->chv_phy_assert[phy] = true;
1622 
1623 	assert_chv_phy_status(dev_priv);
1624 }
1625 
1626 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1627 				     enum dpio_channel ch, bool override, unsigned int mask)
1628 {
1629 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1630 	u32 reg, val, expected, actual;
1631 
1632 	/*
1633 	 * The BIOS can leave the PHY is some weird state
1634 	 * where it doesn't fully power down some parts.
1635 	 * Disable the asserts until the PHY has been fully
1636 	 * reset (ie. the power well has been disabled at
1637 	 * least once).
1638 	 */
1639 	if (!dev_priv->chv_phy_assert[phy])
1640 		return;
1641 
1642 	if (ch == DPIO_CH0)
1643 		reg = _CHV_CMN_DW0_CH0;
1644 	else
1645 		reg = _CHV_CMN_DW6_CH1;
1646 
1647 	mutex_lock(&dev_priv->sb_lock);
1648 	val = vlv_dpio_read(dev_priv, pipe, reg);
1649 	mutex_unlock(&dev_priv->sb_lock);
1650 
1651 	/*
1652 	 * This assumes !override is only used when the port is disabled.
1653 	 * All lanes should power down even without the override when
1654 	 * the port is disabled.
1655 	 */
1656 	if (!override || mask == 0xf) {
1657 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1658 		/*
1659 		 * If CH1 common lane is not active anymore
1660 		 * (eg. for pipe B DPLL) the entire channel will
1661 		 * shut down, which causes the common lane registers
1662 		 * to read as 0. That means we can't actually check
1663 		 * the lane power down status bits, but as the entire
1664 		 * register reads as 0 it's a good indication that the
1665 		 * channel is indeed entirely powered down.
1666 		 */
1667 		if (ch == DPIO_CH1 && val == 0)
1668 			expected = 0;
1669 	} else if (mask != 0x0) {
1670 		expected = DPIO_ANYDL_POWERDOWN;
1671 	} else {
1672 		expected = 0;
1673 	}
1674 
1675 	if (ch == DPIO_CH0)
1676 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1677 	else
1678 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1679 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1680 
1681 	WARN(actual != expected,
1682 	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1683 	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1684 	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1685 	     reg, val);
1686 }
1687 
1688 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1689 			  enum dpio_channel ch, bool override)
1690 {
1691 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1692 	bool was_override;
1693 
1694 	mutex_lock(&power_domains->lock);
1695 
1696 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1697 
1698 	if (override == was_override)
1699 		goto out;
1700 
1701 	if (override)
1702 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1703 	else
1704 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1705 
1706 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1707 
1708 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1709 		      phy, ch, dev_priv->chv_phy_control);
1710 
1711 	assert_chv_phy_status(dev_priv);
1712 
1713 out:
1714 	mutex_unlock(&power_domains->lock);
1715 
1716 	return was_override;
1717 }
1718 
1719 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1720 			     bool override, unsigned int mask)
1721 {
1722 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1723 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1724 	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1725 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1726 
1727 	mutex_lock(&power_domains->lock);
1728 
1729 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1730 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1731 
1732 	if (override)
1733 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1734 	else
1735 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1736 
1737 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1738 
1739 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1740 		      phy, ch, mask, dev_priv->chv_phy_control);
1741 
1742 	assert_chv_phy_status(dev_priv);
1743 
1744 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1745 
1746 	mutex_unlock(&power_domains->lock);
1747 }
1748 
1749 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1750 					struct i915_power_well *power_well)
1751 {
1752 	enum pipe pipe = PIPE_A;
1753 	bool enabled;
1754 	u32 state, ctrl;
1755 
1756 	mutex_lock(&dev_priv->pcu_lock);
1757 
1758 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1759 	/*
1760 	 * We only ever set the power-on and power-gate states, anything
1761 	 * else is unexpected.
1762 	 */
1763 	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1764 	enabled = state == DP_SSS_PWR_ON(pipe);
1765 
1766 	/*
1767 	 * A transient state at this point would mean some unexpected party
1768 	 * is poking at the power controls too.
1769 	 */
1770 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1771 	WARN_ON(ctrl << 16 != state);
1772 
1773 	mutex_unlock(&dev_priv->pcu_lock);
1774 
1775 	return enabled;
1776 }
1777 
1778 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1779 				    struct i915_power_well *power_well,
1780 				    bool enable)
1781 {
1782 	enum pipe pipe = PIPE_A;
1783 	u32 state;
1784 	u32 ctrl;
1785 
1786 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1787 
1788 	mutex_lock(&dev_priv->pcu_lock);
1789 
1790 #define COND \
1791 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1792 
1793 	if (COND)
1794 		goto out;
1795 
1796 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1797 	ctrl &= ~DP_SSC_MASK(pipe);
1798 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1799 	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1800 
1801 	if (wait_for(COND, 100))
1802 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1803 			  state,
1804 			  vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1805 
1806 #undef COND
1807 
1808 out:
1809 	mutex_unlock(&dev_priv->pcu_lock);
1810 }
1811 
1812 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1813 				       struct i915_power_well *power_well)
1814 {
1815 	chv_set_pipe_power_well(dev_priv, power_well, true);
1816 
1817 	vlv_display_power_well_init(dev_priv);
1818 }
1819 
1820 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1821 					struct i915_power_well *power_well)
1822 {
1823 	vlv_display_power_well_deinit(dev_priv);
1824 
1825 	chv_set_pipe_power_well(dev_priv, power_well, false);
1826 }
1827 
1828 static void
1829 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1830 				 enum intel_display_power_domain domain)
1831 {
1832 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1833 	struct i915_power_well *power_well;
1834 
1835 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1836 		intel_power_well_get(dev_priv, power_well);
1837 
1838 	power_domains->domain_use_count[domain]++;
1839 }
1840 
1841 /**
1842  * intel_display_power_get - grab a power domain reference
1843  * @dev_priv: i915 device instance
1844  * @domain: power domain to reference
1845  *
1846  * This function grabs a power domain reference for @domain and ensures that the
1847  * power domain and all its parents are powered up. Therefore users should only
1848  * grab a reference to the innermost power domain they need.
1849  *
1850  * Any power domain reference obtained by this function must have a symmetric
1851  * call to intel_display_power_put() to release the reference again.
1852  */
1853 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
1854 					enum intel_display_power_domain domain)
1855 {
1856 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1857 	intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
1858 
1859 	mutex_lock(&power_domains->lock);
1860 
1861 	__intel_display_power_get_domain(dev_priv, domain);
1862 
1863 	mutex_unlock(&power_domains->lock);
1864 
1865 	return wakeref;
1866 }
1867 
1868 /**
1869  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1870  * @dev_priv: i915 device instance
1871  * @domain: power domain to reference
1872  *
1873  * This function grabs a power domain reference for @domain and ensures that the
1874  * power domain and all its parents are powered up. Therefore users should only
1875  * grab a reference to the innermost power domain they need.
1876  *
1877  * Any power domain reference obtained by this function must have a symmetric
1878  * call to intel_display_power_put() to release the reference again.
1879  */
1880 intel_wakeref_t
1881 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1882 				   enum intel_display_power_domain domain)
1883 {
1884 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1885 	intel_wakeref_t wakeref;
1886 	bool is_enabled;
1887 
1888 	wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
1889 	if (!wakeref)
1890 		return false;
1891 
1892 	mutex_lock(&power_domains->lock);
1893 
1894 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
1895 		__intel_display_power_get_domain(dev_priv, domain);
1896 		is_enabled = true;
1897 	} else {
1898 		is_enabled = false;
1899 	}
1900 
1901 	mutex_unlock(&power_domains->lock);
1902 
1903 	if (!is_enabled) {
1904 		intel_runtime_pm_put(dev_priv, wakeref);
1905 		wakeref = 0;
1906 	}
1907 
1908 	return wakeref;
1909 }
1910 
1911 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
1912 				      enum intel_display_power_domain domain)
1913 {
1914 	struct i915_power_domains *power_domains;
1915 	struct i915_power_well *power_well;
1916 
1917 	power_domains = &dev_priv->power_domains;
1918 
1919 	mutex_lock(&power_domains->lock);
1920 
1921 	WARN(!power_domains->domain_use_count[domain],
1922 	     "Use count on domain %s is already zero\n",
1923 	     intel_display_power_domain_str(domain));
1924 	power_domains->domain_use_count[domain]--;
1925 
1926 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
1927 		intel_power_well_put(dev_priv, power_well);
1928 
1929 	mutex_unlock(&power_domains->lock);
1930 }
1931 
1932 /**
1933  * intel_display_power_put - release a power domain reference
1934  * @dev_priv: i915 device instance
1935  * @domain: power domain to reference
1936  *
1937  * This function drops the power domain reference obtained by
1938  * intel_display_power_get() and might power down the corresponding hardware
1939  * block right away if this is the last reference.
1940  */
1941 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
1942 				       enum intel_display_power_domain domain)
1943 {
1944 	__intel_display_power_put(dev_priv, domain);
1945 	intel_runtime_pm_put_unchecked(dev_priv);
1946 }
1947 
1948 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1949 void intel_display_power_put(struct drm_i915_private *dev_priv,
1950 			     enum intel_display_power_domain domain,
1951 			     intel_wakeref_t wakeref)
1952 {
1953 	__intel_display_power_put(dev_priv, domain);
1954 	intel_runtime_pm_put(dev_priv, wakeref);
1955 }
1956 #endif
1957 
1958 #define I830_PIPES_POWER_DOMAINS (		\
1959 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1960 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1961 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1962 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1963 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1964 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1965 	BIT_ULL(POWER_DOMAIN_INIT))
1966 
1967 #define VLV_DISPLAY_POWER_DOMAINS (		\
1968 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1969 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1970 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1971 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1972 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1973 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1974 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1975 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1976 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1977 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1978 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1979 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1980 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1981 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1982 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1983 	BIT_ULL(POWER_DOMAIN_INIT))
1984 
1985 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1986 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1987 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1988 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1989 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1990 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1991 	BIT_ULL(POWER_DOMAIN_INIT))
1992 
1993 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
1994 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1995 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1996 	BIT_ULL(POWER_DOMAIN_INIT))
1997 
1998 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1999 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2000 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2001 	BIT_ULL(POWER_DOMAIN_INIT))
2002 
2003 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
2004 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2005 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2006 	BIT_ULL(POWER_DOMAIN_INIT))
2007 
2008 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
2009 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2010 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2011 	BIT_ULL(POWER_DOMAIN_INIT))
2012 
2013 #define CHV_DISPLAY_POWER_DOMAINS (		\
2014 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2015 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2016 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
2017 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2018 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2019 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2020 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2021 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2022 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
2023 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2024 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2025 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2026 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2027 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2028 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2029 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2030 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2031 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2032 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2033 	BIT_ULL(POWER_DOMAIN_INIT))
2034 
2035 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
2036 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2037 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2038 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2039 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2040 	BIT_ULL(POWER_DOMAIN_INIT))
2041 
2042 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
2043 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2044 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2045 	BIT_ULL(POWER_DOMAIN_INIT))
2046 
2047 #define HSW_DISPLAY_POWER_DOMAINS (			\
2048 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2049 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2050 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
2051 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2052 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2053 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2054 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2055 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2056 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2057 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2058 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2059 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2060 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2061 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2062 	BIT_ULL(POWER_DOMAIN_INIT))
2063 
2064 #define BDW_DISPLAY_POWER_DOMAINS (			\
2065 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2066 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2067 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2068 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2069 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2070 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2071 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2072 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2073 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2074 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2075 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2076 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2077 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2078 	BIT_ULL(POWER_DOMAIN_INIT))
2079 
2080 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2081 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2082 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2083 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2084 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2085 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2086 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2087 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2088 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2089 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2090 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2091 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
2092 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2093 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2094 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2095 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2096 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2097 	BIT_ULL(POWER_DOMAIN_INIT))
2098 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
2099 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2100 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2101 	BIT_ULL(POWER_DOMAIN_INIT))
2102 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2103 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2104 	BIT_ULL(POWER_DOMAIN_INIT))
2105 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2106 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2107 	BIT_ULL(POWER_DOMAIN_INIT))
2108 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
2109 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2110 	BIT_ULL(POWER_DOMAIN_INIT))
2111 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2112 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2113 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2114 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2115 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2116 	BIT_ULL(POWER_DOMAIN_INIT))
2117 
2118 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2119 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2120 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2121 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2122 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2123 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2124 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2125 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2126 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2127 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2128 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2129 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2130 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2131 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2132 	BIT_ULL(POWER_DOMAIN_INIT))
2133 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2134 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2135 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2136 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2137 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2138 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2139 	BIT_ULL(POWER_DOMAIN_INIT))
2140 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
2141 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2142 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2143 	BIT_ULL(POWER_DOMAIN_INIT))
2144 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
2145 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2146 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2147 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2148 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2149 	BIT_ULL(POWER_DOMAIN_INIT))
2150 
2151 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2152 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2153 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2154 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2155 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2156 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2157 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2158 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2159 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2160 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2161 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2162 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2163 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2164 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2165 	BIT_ULL(POWER_DOMAIN_INIT))
2166 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
2167 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2168 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2169 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2170 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2171 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2172 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
2173 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2174 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2175 	BIT_ULL(POWER_DOMAIN_INIT))
2176 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
2177 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2178 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2179 	BIT_ULL(POWER_DOMAIN_INIT))
2180 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
2181 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2182 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2183 	BIT_ULL(POWER_DOMAIN_INIT))
2184 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
2185 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
2186 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2187 	BIT_ULL(POWER_DOMAIN_INIT))
2188 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
2189 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2190 	BIT_ULL(POWER_DOMAIN_INIT))
2191 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
2192 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2193 	BIT_ULL(POWER_DOMAIN_INIT))
2194 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2195 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2196 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2197 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2198 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2199 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2200 	BIT_ULL(POWER_DOMAIN_INIT))
2201 
2202 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2203 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2204 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2205 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2206 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2207 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2208 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2209 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2210 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2211 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2212 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2213 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
2214 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2215 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2216 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2217 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2218 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2219 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2220 	BIT_ULL(POWER_DOMAIN_INIT))
2221 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
2222 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2223 	BIT_ULL(POWER_DOMAIN_INIT))
2224 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
2225 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2226 	BIT_ULL(POWER_DOMAIN_INIT))
2227 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
2228 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2229 	BIT_ULL(POWER_DOMAIN_INIT))
2230 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
2231 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2232 	BIT_ULL(POWER_DOMAIN_INIT))
2233 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
2234 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2235 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2236 	BIT_ULL(POWER_DOMAIN_INIT))
2237 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
2238 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2239 	BIT_ULL(POWER_DOMAIN_INIT))
2240 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
2241 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2242 	BIT_ULL(POWER_DOMAIN_INIT))
2243 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
2244 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2245 	BIT_ULL(POWER_DOMAIN_INIT))
2246 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
2247 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2248 	BIT_ULL(POWER_DOMAIN_INIT))
2249 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
2250 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2251 	BIT_ULL(POWER_DOMAIN_INIT))
2252 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2253 	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2254 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2255 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2256 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2257 	BIT_ULL(POWER_DOMAIN_INIT))
2258 
2259 /*
2260  * ICL PW_0/PG_0 domains (HW/DMC control):
2261  * - PCI
2262  * - clocks except port PLL
2263  * - central power except FBC
2264  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2265  * ICL PW_1/PG_1 domains (HW/DMC control):
2266  * - DBUF function
2267  * - PIPE_A and its planes, except VGA
2268  * - transcoder EDP + PSR
2269  * - transcoder DSI
2270  * - DDI_A
2271  * - FBC
2272  */
2273 #define ICL_PW_4_POWER_DOMAINS (			\
2274 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2275 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2276 	BIT_ULL(POWER_DOMAIN_INIT))
2277 	/* VDSC/joining */
2278 #define ICL_PW_3_POWER_DOMAINS (			\
2279 	ICL_PW_4_POWER_DOMAINS |			\
2280 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2281 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2282 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2283 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2284 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2285 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2286 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2287 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2288 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2289 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2290 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2291 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2292 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2293 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2294 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2295 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2296 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2297 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2298 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2299 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2300 	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |		\
2301 	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |		\
2302 	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |		\
2303 	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |		\
2304 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2305 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2306 	BIT_ULL(POWER_DOMAIN_INIT))
2307 	/*
2308 	 * - transcoder WD
2309 	 * - KVMR (HW control)
2310 	 */
2311 #define ICL_PW_2_POWER_DOMAINS (			\
2312 	ICL_PW_3_POWER_DOMAINS |			\
2313 	BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) |		\
2314 	BIT_ULL(POWER_DOMAIN_INIT))
2315 	/*
2316 	 * - KVMR (HW control)
2317 	 */
2318 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2319 	ICL_PW_2_POWER_DOMAINS |			\
2320 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2321 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2322 	BIT_ULL(POWER_DOMAIN_INIT))
2323 
2324 #define ICL_DDI_IO_A_POWER_DOMAINS (			\
2325 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2326 #define ICL_DDI_IO_B_POWER_DOMAINS (			\
2327 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2328 #define ICL_DDI_IO_C_POWER_DOMAINS (			\
2329 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2330 #define ICL_DDI_IO_D_POWER_DOMAINS (			\
2331 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2332 #define ICL_DDI_IO_E_POWER_DOMAINS (			\
2333 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2334 #define ICL_DDI_IO_F_POWER_DOMAINS (			\
2335 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2336 
2337 #define ICL_AUX_A_IO_POWER_DOMAINS (			\
2338 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2339 	BIT_ULL(POWER_DOMAIN_AUX_A))
2340 #define ICL_AUX_B_IO_POWER_DOMAINS (			\
2341 	BIT_ULL(POWER_DOMAIN_AUX_B))
2342 #define ICL_AUX_C_IO_POWER_DOMAINS (			\
2343 	BIT_ULL(POWER_DOMAIN_AUX_C))
2344 #define ICL_AUX_D_IO_POWER_DOMAINS (			\
2345 	BIT_ULL(POWER_DOMAIN_AUX_D))
2346 #define ICL_AUX_E_IO_POWER_DOMAINS (			\
2347 	BIT_ULL(POWER_DOMAIN_AUX_E))
2348 #define ICL_AUX_F_IO_POWER_DOMAINS (			\
2349 	BIT_ULL(POWER_DOMAIN_AUX_F))
2350 #define ICL_AUX_TBT1_IO_POWER_DOMAINS (			\
2351 	BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2352 #define ICL_AUX_TBT2_IO_POWER_DOMAINS (			\
2353 	BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2354 #define ICL_AUX_TBT3_IO_POWER_DOMAINS (			\
2355 	BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2356 #define ICL_AUX_TBT4_IO_POWER_DOMAINS (			\
2357 	BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2358 
2359 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2360 	.sync_hw = i9xx_power_well_sync_hw_noop,
2361 	.enable = i9xx_always_on_power_well_noop,
2362 	.disable = i9xx_always_on_power_well_noop,
2363 	.is_enabled = i9xx_always_on_power_well_enabled,
2364 };
2365 
2366 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2367 	.sync_hw = i9xx_power_well_sync_hw_noop,
2368 	.enable = chv_pipe_power_well_enable,
2369 	.disable = chv_pipe_power_well_disable,
2370 	.is_enabled = chv_pipe_power_well_enabled,
2371 };
2372 
2373 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2374 	.sync_hw = i9xx_power_well_sync_hw_noop,
2375 	.enable = chv_dpio_cmn_power_well_enable,
2376 	.disable = chv_dpio_cmn_power_well_disable,
2377 	.is_enabled = vlv_power_well_enabled,
2378 };
2379 
2380 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2381 	{
2382 		.name = "always-on",
2383 		.always_on = true,
2384 		.domains = POWER_DOMAIN_MASK,
2385 		.ops = &i9xx_always_on_power_well_ops,
2386 		.id = DISP_PW_ID_NONE,
2387 	},
2388 };
2389 
2390 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2391 	.sync_hw = i830_pipes_power_well_sync_hw,
2392 	.enable = i830_pipes_power_well_enable,
2393 	.disable = i830_pipes_power_well_disable,
2394 	.is_enabled = i830_pipes_power_well_enabled,
2395 };
2396 
2397 static const struct i915_power_well_desc i830_power_wells[] = {
2398 	{
2399 		.name = "always-on",
2400 		.always_on = true,
2401 		.domains = POWER_DOMAIN_MASK,
2402 		.ops = &i9xx_always_on_power_well_ops,
2403 		.id = DISP_PW_ID_NONE,
2404 	},
2405 	{
2406 		.name = "pipes",
2407 		.domains = I830_PIPES_POWER_DOMAINS,
2408 		.ops = &i830_pipes_power_well_ops,
2409 		.id = DISP_PW_ID_NONE,
2410 	},
2411 };
2412 
2413 static const struct i915_power_well_ops hsw_power_well_ops = {
2414 	.sync_hw = hsw_power_well_sync_hw,
2415 	.enable = hsw_power_well_enable,
2416 	.disable = hsw_power_well_disable,
2417 	.is_enabled = hsw_power_well_enabled,
2418 };
2419 
2420 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2421 	.sync_hw = i9xx_power_well_sync_hw_noop,
2422 	.enable = gen9_dc_off_power_well_enable,
2423 	.disable = gen9_dc_off_power_well_disable,
2424 	.is_enabled = gen9_dc_off_power_well_enabled,
2425 };
2426 
2427 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2428 	.sync_hw = i9xx_power_well_sync_hw_noop,
2429 	.enable = bxt_dpio_cmn_power_well_enable,
2430 	.disable = bxt_dpio_cmn_power_well_disable,
2431 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
2432 };
2433 
2434 static const struct i915_power_well_regs hsw_power_well_regs = {
2435 	.bios	= HSW_PWR_WELL_CTL1,
2436 	.driver	= HSW_PWR_WELL_CTL2,
2437 	.kvmr	= HSW_PWR_WELL_CTL3,
2438 	.debug	= HSW_PWR_WELL_CTL4,
2439 };
2440 
2441 static const struct i915_power_well_desc hsw_power_wells[] = {
2442 	{
2443 		.name = "always-on",
2444 		.always_on = true,
2445 		.domains = POWER_DOMAIN_MASK,
2446 		.ops = &i9xx_always_on_power_well_ops,
2447 		.id = DISP_PW_ID_NONE,
2448 	},
2449 	{
2450 		.name = "display",
2451 		.domains = HSW_DISPLAY_POWER_DOMAINS,
2452 		.ops = &hsw_power_well_ops,
2453 		.id = HSW_DISP_PW_GLOBAL,
2454 		{
2455 			.hsw.regs = &hsw_power_well_regs,
2456 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2457 			.hsw.has_vga = true,
2458 		},
2459 	},
2460 };
2461 
2462 static const struct i915_power_well_desc bdw_power_wells[] = {
2463 	{
2464 		.name = "always-on",
2465 		.always_on = true,
2466 		.domains = POWER_DOMAIN_MASK,
2467 		.ops = &i9xx_always_on_power_well_ops,
2468 		.id = DISP_PW_ID_NONE,
2469 	},
2470 	{
2471 		.name = "display",
2472 		.domains = BDW_DISPLAY_POWER_DOMAINS,
2473 		.ops = &hsw_power_well_ops,
2474 		.id = HSW_DISP_PW_GLOBAL,
2475 		{
2476 			.hsw.regs = &hsw_power_well_regs,
2477 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2478 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2479 			.hsw.has_vga = true,
2480 		},
2481 	},
2482 };
2483 
2484 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2485 	.sync_hw = i9xx_power_well_sync_hw_noop,
2486 	.enable = vlv_display_power_well_enable,
2487 	.disable = vlv_display_power_well_disable,
2488 	.is_enabled = vlv_power_well_enabled,
2489 };
2490 
2491 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2492 	.sync_hw = i9xx_power_well_sync_hw_noop,
2493 	.enable = vlv_dpio_cmn_power_well_enable,
2494 	.disable = vlv_dpio_cmn_power_well_disable,
2495 	.is_enabled = vlv_power_well_enabled,
2496 };
2497 
2498 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2499 	.sync_hw = i9xx_power_well_sync_hw_noop,
2500 	.enable = vlv_power_well_enable,
2501 	.disable = vlv_power_well_disable,
2502 	.is_enabled = vlv_power_well_enabled,
2503 };
2504 
2505 static const struct i915_power_well_desc vlv_power_wells[] = {
2506 	{
2507 		.name = "always-on",
2508 		.always_on = true,
2509 		.domains = POWER_DOMAIN_MASK,
2510 		.ops = &i9xx_always_on_power_well_ops,
2511 		.id = DISP_PW_ID_NONE,
2512 	},
2513 	{
2514 		.name = "display",
2515 		.domains = VLV_DISPLAY_POWER_DOMAINS,
2516 		.ops = &vlv_display_power_well_ops,
2517 		.id = VLV_DISP_PW_DISP2D,
2518 		{
2519 			.vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2520 		},
2521 	},
2522 	{
2523 		.name = "dpio-tx-b-01",
2524 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2525 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2526 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2527 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2528 		.ops = &vlv_dpio_power_well_ops,
2529 		.id = DISP_PW_ID_NONE,
2530 		{
2531 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2532 		},
2533 	},
2534 	{
2535 		.name = "dpio-tx-b-23",
2536 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2537 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2538 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2539 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2540 		.ops = &vlv_dpio_power_well_ops,
2541 		.id = DISP_PW_ID_NONE,
2542 		{
2543 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2544 		},
2545 	},
2546 	{
2547 		.name = "dpio-tx-c-01",
2548 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2549 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2550 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2551 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2552 		.ops = &vlv_dpio_power_well_ops,
2553 		.id = DISP_PW_ID_NONE,
2554 		{
2555 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2556 		},
2557 	},
2558 	{
2559 		.name = "dpio-tx-c-23",
2560 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2561 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2562 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2563 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2564 		.ops = &vlv_dpio_power_well_ops,
2565 		.id = DISP_PW_ID_NONE,
2566 		{
2567 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2568 		},
2569 	},
2570 	{
2571 		.name = "dpio-common",
2572 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2573 		.ops = &vlv_dpio_cmn_power_well_ops,
2574 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2575 		{
2576 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2577 		},
2578 	},
2579 };
2580 
2581 static const struct i915_power_well_desc chv_power_wells[] = {
2582 	{
2583 		.name = "always-on",
2584 		.always_on = true,
2585 		.domains = POWER_DOMAIN_MASK,
2586 		.ops = &i9xx_always_on_power_well_ops,
2587 		.id = DISP_PW_ID_NONE,
2588 	},
2589 	{
2590 		.name = "display",
2591 		/*
2592 		 * Pipe A power well is the new disp2d well. Pipe B and C
2593 		 * power wells don't actually exist. Pipe A power well is
2594 		 * required for any pipe to work.
2595 		 */
2596 		.domains = CHV_DISPLAY_POWER_DOMAINS,
2597 		.ops = &chv_pipe_power_well_ops,
2598 		.id = DISP_PW_ID_NONE,
2599 	},
2600 	{
2601 		.name = "dpio-common-bc",
2602 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2603 		.ops = &chv_dpio_cmn_power_well_ops,
2604 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2605 		{
2606 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2607 		},
2608 	},
2609 	{
2610 		.name = "dpio-common-d",
2611 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2612 		.ops = &chv_dpio_cmn_power_well_ops,
2613 		.id = CHV_DISP_PW_DPIO_CMN_D,
2614 		{
2615 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
2616 		},
2617 	},
2618 };
2619 
2620 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2621 					 enum i915_power_well_id power_well_id)
2622 {
2623 	struct i915_power_well *power_well;
2624 	bool ret;
2625 
2626 	power_well = lookup_power_well(dev_priv, power_well_id);
2627 	ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
2628 
2629 	return ret;
2630 }
2631 
2632 static const struct i915_power_well_desc skl_power_wells[] = {
2633 	{
2634 		.name = "always-on",
2635 		.always_on = true,
2636 		.domains = POWER_DOMAIN_MASK,
2637 		.ops = &i9xx_always_on_power_well_ops,
2638 		.id = DISP_PW_ID_NONE,
2639 	},
2640 	{
2641 		.name = "power well 1",
2642 		/* Handled by the DMC firmware */
2643 		.always_on = true,
2644 		.domains = 0,
2645 		.ops = &hsw_power_well_ops,
2646 		.id = SKL_DISP_PW_1,
2647 		{
2648 			.hsw.regs = &hsw_power_well_regs,
2649 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2650 			.hsw.has_fuses = true,
2651 		},
2652 	},
2653 	{
2654 		.name = "MISC IO power well",
2655 		/* Handled by the DMC firmware */
2656 		.always_on = true,
2657 		.domains = 0,
2658 		.ops = &hsw_power_well_ops,
2659 		.id = SKL_DISP_PW_MISC_IO,
2660 		{
2661 			.hsw.regs = &hsw_power_well_regs,
2662 			.hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
2663 		},
2664 	},
2665 	{
2666 		.name = "DC off",
2667 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2668 		.ops = &gen9_dc_off_power_well_ops,
2669 		.id = DISP_PW_ID_NONE,
2670 	},
2671 	{
2672 		.name = "power well 2",
2673 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2674 		.ops = &hsw_power_well_ops,
2675 		.id = SKL_DISP_PW_2,
2676 		{
2677 			.hsw.regs = &hsw_power_well_regs,
2678 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
2679 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2680 			.hsw.has_vga = true,
2681 			.hsw.has_fuses = true,
2682 		},
2683 	},
2684 	{
2685 		.name = "DDI A/E IO power well",
2686 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2687 		.ops = &hsw_power_well_ops,
2688 		.id = DISP_PW_ID_NONE,
2689 		{
2690 			.hsw.regs = &hsw_power_well_regs,
2691 			.hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
2692 		},
2693 	},
2694 	{
2695 		.name = "DDI B IO power well",
2696 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2697 		.ops = &hsw_power_well_ops,
2698 		.id = DISP_PW_ID_NONE,
2699 		{
2700 			.hsw.regs = &hsw_power_well_regs,
2701 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2702 		},
2703 	},
2704 	{
2705 		.name = "DDI C IO power well",
2706 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2707 		.ops = &hsw_power_well_ops,
2708 		.id = DISP_PW_ID_NONE,
2709 		{
2710 			.hsw.regs = &hsw_power_well_regs,
2711 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2712 		},
2713 	},
2714 	{
2715 		.name = "DDI D IO power well",
2716 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2717 		.ops = &hsw_power_well_ops,
2718 		.id = DISP_PW_ID_NONE,
2719 		{
2720 			.hsw.regs = &hsw_power_well_regs,
2721 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
2722 		},
2723 	},
2724 };
2725 
2726 static const struct i915_power_well_desc bxt_power_wells[] = {
2727 	{
2728 		.name = "always-on",
2729 		.always_on = true,
2730 		.domains = POWER_DOMAIN_MASK,
2731 		.ops = &i9xx_always_on_power_well_ops,
2732 		.id = DISP_PW_ID_NONE,
2733 	},
2734 	{
2735 		.name = "power well 1",
2736 		/* Handled by the DMC firmware */
2737 		.always_on = true,
2738 		.domains = 0,
2739 		.ops = &hsw_power_well_ops,
2740 		.id = SKL_DISP_PW_1,
2741 		{
2742 			.hsw.regs = &hsw_power_well_regs,
2743 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2744 			.hsw.has_fuses = true,
2745 		},
2746 	},
2747 	{
2748 		.name = "DC off",
2749 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2750 		.ops = &gen9_dc_off_power_well_ops,
2751 		.id = DISP_PW_ID_NONE,
2752 	},
2753 	{
2754 		.name = "power well 2",
2755 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2756 		.ops = &hsw_power_well_ops,
2757 		.id = SKL_DISP_PW_2,
2758 		{
2759 			.hsw.regs = &hsw_power_well_regs,
2760 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
2761 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2762 			.hsw.has_vga = true,
2763 			.hsw.has_fuses = true,
2764 		},
2765 	},
2766 	{
2767 		.name = "dpio-common-a",
2768 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2769 		.ops = &bxt_dpio_cmn_power_well_ops,
2770 		.id = BXT_DISP_PW_DPIO_CMN_A,
2771 		{
2772 			.bxt.phy = DPIO_PHY1,
2773 		},
2774 	},
2775 	{
2776 		.name = "dpio-common-bc",
2777 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2778 		.ops = &bxt_dpio_cmn_power_well_ops,
2779 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2780 		{
2781 			.bxt.phy = DPIO_PHY0,
2782 		},
2783 	},
2784 };
2785 
2786 static const struct i915_power_well_desc glk_power_wells[] = {
2787 	{
2788 		.name = "always-on",
2789 		.always_on = true,
2790 		.domains = POWER_DOMAIN_MASK,
2791 		.ops = &i9xx_always_on_power_well_ops,
2792 		.id = DISP_PW_ID_NONE,
2793 	},
2794 	{
2795 		.name = "power well 1",
2796 		/* Handled by the DMC firmware */
2797 		.always_on = true,
2798 		.domains = 0,
2799 		.ops = &hsw_power_well_ops,
2800 		.id = SKL_DISP_PW_1,
2801 		{
2802 			.hsw.regs = &hsw_power_well_regs,
2803 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2804 			.hsw.has_fuses = true,
2805 		},
2806 	},
2807 	{
2808 		.name = "DC off",
2809 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2810 		.ops = &gen9_dc_off_power_well_ops,
2811 		.id = DISP_PW_ID_NONE,
2812 	},
2813 	{
2814 		.name = "power well 2",
2815 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2816 		.ops = &hsw_power_well_ops,
2817 		.id = SKL_DISP_PW_2,
2818 		{
2819 			.hsw.regs = &hsw_power_well_regs,
2820 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
2821 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2822 			.hsw.has_vga = true,
2823 			.hsw.has_fuses = true,
2824 		},
2825 	},
2826 	{
2827 		.name = "dpio-common-a",
2828 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2829 		.ops = &bxt_dpio_cmn_power_well_ops,
2830 		.id = BXT_DISP_PW_DPIO_CMN_A,
2831 		{
2832 			.bxt.phy = DPIO_PHY1,
2833 		},
2834 	},
2835 	{
2836 		.name = "dpio-common-b",
2837 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2838 		.ops = &bxt_dpio_cmn_power_well_ops,
2839 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2840 		{
2841 			.bxt.phy = DPIO_PHY0,
2842 		},
2843 	},
2844 	{
2845 		.name = "dpio-common-c",
2846 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2847 		.ops = &bxt_dpio_cmn_power_well_ops,
2848 		.id = GLK_DISP_PW_DPIO_CMN_C,
2849 		{
2850 			.bxt.phy = DPIO_PHY2,
2851 		},
2852 	},
2853 	{
2854 		.name = "AUX A",
2855 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2856 		.ops = &hsw_power_well_ops,
2857 		.id = DISP_PW_ID_NONE,
2858 		{
2859 			.hsw.regs = &hsw_power_well_regs,
2860 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2861 		},
2862 	},
2863 	{
2864 		.name = "AUX B",
2865 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2866 		.ops = &hsw_power_well_ops,
2867 		.id = DISP_PW_ID_NONE,
2868 		{
2869 			.hsw.regs = &hsw_power_well_regs,
2870 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2871 		},
2872 	},
2873 	{
2874 		.name = "AUX C",
2875 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2876 		.ops = &hsw_power_well_ops,
2877 		.id = DISP_PW_ID_NONE,
2878 		{
2879 			.hsw.regs = &hsw_power_well_regs,
2880 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
2881 		},
2882 	},
2883 	{
2884 		.name = "DDI A IO power well",
2885 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
2886 		.ops = &hsw_power_well_ops,
2887 		.id = DISP_PW_ID_NONE,
2888 		{
2889 			.hsw.regs = &hsw_power_well_regs,
2890 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
2891 		},
2892 	},
2893 	{
2894 		.name = "DDI B IO power well",
2895 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2896 		.ops = &hsw_power_well_ops,
2897 		.id = DISP_PW_ID_NONE,
2898 		{
2899 			.hsw.regs = &hsw_power_well_regs,
2900 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2901 		},
2902 	},
2903 	{
2904 		.name = "DDI C IO power well",
2905 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2906 		.ops = &hsw_power_well_ops,
2907 		.id = DISP_PW_ID_NONE,
2908 		{
2909 			.hsw.regs = &hsw_power_well_regs,
2910 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2911 		},
2912 	},
2913 };
2914 
2915 static const struct i915_power_well_desc cnl_power_wells[] = {
2916 	{
2917 		.name = "always-on",
2918 		.always_on = true,
2919 		.domains = POWER_DOMAIN_MASK,
2920 		.ops = &i9xx_always_on_power_well_ops,
2921 		.id = DISP_PW_ID_NONE,
2922 	},
2923 	{
2924 		.name = "power well 1",
2925 		/* Handled by the DMC firmware */
2926 		.always_on = true,
2927 		.domains = 0,
2928 		.ops = &hsw_power_well_ops,
2929 		.id = SKL_DISP_PW_1,
2930 		{
2931 			.hsw.regs = &hsw_power_well_regs,
2932 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2933 			.hsw.has_fuses = true,
2934 		},
2935 	},
2936 	{
2937 		.name = "AUX A",
2938 		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2939 		.ops = &hsw_power_well_ops,
2940 		.id = DISP_PW_ID_NONE,
2941 		{
2942 			.hsw.regs = &hsw_power_well_regs,
2943 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2944 		},
2945 	},
2946 	{
2947 		.name = "AUX B",
2948 		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2949 		.ops = &hsw_power_well_ops,
2950 		.id = DISP_PW_ID_NONE,
2951 		{
2952 			.hsw.regs = &hsw_power_well_regs,
2953 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2954 		},
2955 	},
2956 	{
2957 		.name = "AUX C",
2958 		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
2959 		.ops = &hsw_power_well_ops,
2960 		.id = DISP_PW_ID_NONE,
2961 		{
2962 			.hsw.regs = &hsw_power_well_regs,
2963 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
2964 		},
2965 	},
2966 	{
2967 		.name = "AUX D",
2968 		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
2969 		.ops = &hsw_power_well_ops,
2970 		.id = DISP_PW_ID_NONE,
2971 		{
2972 			.hsw.regs = &hsw_power_well_regs,
2973 			.hsw.idx = CNL_PW_CTL_IDX_AUX_D,
2974 		},
2975 	},
2976 	{
2977 		.name = "DC off",
2978 		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2979 		.ops = &gen9_dc_off_power_well_ops,
2980 		.id = DISP_PW_ID_NONE,
2981 	},
2982 	{
2983 		.name = "power well 2",
2984 		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2985 		.ops = &hsw_power_well_ops,
2986 		.id = SKL_DISP_PW_2,
2987 		{
2988 			.hsw.regs = &hsw_power_well_regs,
2989 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
2990 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2991 			.hsw.has_vga = true,
2992 			.hsw.has_fuses = true,
2993 		},
2994 	},
2995 	{
2996 		.name = "DDI A IO power well",
2997 		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
2998 		.ops = &hsw_power_well_ops,
2999 		.id = DISP_PW_ID_NONE,
3000 		{
3001 			.hsw.regs = &hsw_power_well_regs,
3002 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3003 		},
3004 	},
3005 	{
3006 		.name = "DDI B IO power well",
3007 		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3008 		.ops = &hsw_power_well_ops,
3009 		.id = DISP_PW_ID_NONE,
3010 		{
3011 			.hsw.regs = &hsw_power_well_regs,
3012 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3013 		},
3014 	},
3015 	{
3016 		.name = "DDI C IO power well",
3017 		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3018 		.ops = &hsw_power_well_ops,
3019 		.id = DISP_PW_ID_NONE,
3020 		{
3021 			.hsw.regs = &hsw_power_well_regs,
3022 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3023 		},
3024 	},
3025 	{
3026 		.name = "DDI D IO power well",
3027 		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3028 		.ops = &hsw_power_well_ops,
3029 		.id = DISP_PW_ID_NONE,
3030 		{
3031 			.hsw.regs = &hsw_power_well_regs,
3032 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3033 		},
3034 	},
3035 	{
3036 		.name = "DDI F IO power well",
3037 		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3038 		.ops = &hsw_power_well_ops,
3039 		.id = DISP_PW_ID_NONE,
3040 		{
3041 			.hsw.regs = &hsw_power_well_regs,
3042 			.hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3043 		},
3044 	},
3045 	{
3046 		.name = "AUX F",
3047 		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3048 		.ops = &hsw_power_well_ops,
3049 		.id = DISP_PW_ID_NONE,
3050 		{
3051 			.hsw.regs = &hsw_power_well_regs,
3052 			.hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3053 		},
3054 	},
3055 };
3056 
3057 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3058 	.sync_hw = hsw_power_well_sync_hw,
3059 	.enable = icl_combo_phy_aux_power_well_enable,
3060 	.disable = icl_combo_phy_aux_power_well_disable,
3061 	.is_enabled = hsw_power_well_enabled,
3062 };
3063 
3064 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3065 	.sync_hw = hsw_power_well_sync_hw,
3066 	.enable = icl_tc_phy_aux_power_well_enable,
3067 	.disable = hsw_power_well_disable,
3068 	.is_enabled = hsw_power_well_enabled,
3069 };
3070 
3071 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3072 	.bios	= ICL_PWR_WELL_CTL_AUX1,
3073 	.driver	= ICL_PWR_WELL_CTL_AUX2,
3074 	.debug	= ICL_PWR_WELL_CTL_AUX4,
3075 };
3076 
3077 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3078 	.bios	= ICL_PWR_WELL_CTL_DDI1,
3079 	.driver	= ICL_PWR_WELL_CTL_DDI2,
3080 	.debug	= ICL_PWR_WELL_CTL_DDI4,
3081 };
3082 
3083 static const struct i915_power_well_desc icl_power_wells[] = {
3084 	{
3085 		.name = "always-on",
3086 		.always_on = true,
3087 		.domains = POWER_DOMAIN_MASK,
3088 		.ops = &i9xx_always_on_power_well_ops,
3089 		.id = DISP_PW_ID_NONE,
3090 	},
3091 	{
3092 		.name = "power well 1",
3093 		/* Handled by the DMC firmware */
3094 		.always_on = true,
3095 		.domains = 0,
3096 		.ops = &hsw_power_well_ops,
3097 		.id = SKL_DISP_PW_1,
3098 		{
3099 			.hsw.regs = &hsw_power_well_regs,
3100 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3101 			.hsw.has_fuses = true,
3102 		},
3103 	},
3104 	{
3105 		.name = "DC off",
3106 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3107 		.ops = &gen9_dc_off_power_well_ops,
3108 		.id = DISP_PW_ID_NONE,
3109 	},
3110 	{
3111 		.name = "power well 2",
3112 		.domains = ICL_PW_2_POWER_DOMAINS,
3113 		.ops = &hsw_power_well_ops,
3114 		.id = SKL_DISP_PW_2,
3115 		{
3116 			.hsw.regs = &hsw_power_well_regs,
3117 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3118 			.hsw.has_fuses = true,
3119 		},
3120 	},
3121 	{
3122 		.name = "power well 3",
3123 		.domains = ICL_PW_3_POWER_DOMAINS,
3124 		.ops = &hsw_power_well_ops,
3125 		.id = DISP_PW_ID_NONE,
3126 		{
3127 			.hsw.regs = &hsw_power_well_regs,
3128 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3129 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3130 			.hsw.has_vga = true,
3131 			.hsw.has_fuses = true,
3132 		},
3133 	},
3134 	{
3135 		.name = "DDI A IO",
3136 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3137 		.ops = &hsw_power_well_ops,
3138 		.id = DISP_PW_ID_NONE,
3139 		{
3140 			.hsw.regs = &icl_ddi_power_well_regs,
3141 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3142 		},
3143 	},
3144 	{
3145 		.name = "DDI B IO",
3146 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3147 		.ops = &hsw_power_well_ops,
3148 		.id = DISP_PW_ID_NONE,
3149 		{
3150 			.hsw.regs = &icl_ddi_power_well_regs,
3151 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3152 		},
3153 	},
3154 	{
3155 		.name = "DDI C IO",
3156 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3157 		.ops = &hsw_power_well_ops,
3158 		.id = DISP_PW_ID_NONE,
3159 		{
3160 			.hsw.regs = &icl_ddi_power_well_regs,
3161 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3162 		},
3163 	},
3164 	{
3165 		.name = "DDI D IO",
3166 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3167 		.ops = &hsw_power_well_ops,
3168 		.id = DISP_PW_ID_NONE,
3169 		{
3170 			.hsw.regs = &icl_ddi_power_well_regs,
3171 			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3172 		},
3173 	},
3174 	{
3175 		.name = "DDI E IO",
3176 		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
3177 		.ops = &hsw_power_well_ops,
3178 		.id = DISP_PW_ID_NONE,
3179 		{
3180 			.hsw.regs = &icl_ddi_power_well_regs,
3181 			.hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3182 		},
3183 	},
3184 	{
3185 		.name = "DDI F IO",
3186 		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
3187 		.ops = &hsw_power_well_ops,
3188 		.id = DISP_PW_ID_NONE,
3189 		{
3190 			.hsw.regs = &icl_ddi_power_well_regs,
3191 			.hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3192 		},
3193 	},
3194 	{
3195 		.name = "AUX A",
3196 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3197 		.ops = &icl_combo_phy_aux_power_well_ops,
3198 		.id = DISP_PW_ID_NONE,
3199 		{
3200 			.hsw.regs = &icl_aux_power_well_regs,
3201 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3202 		},
3203 	},
3204 	{
3205 		.name = "AUX B",
3206 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3207 		.ops = &icl_combo_phy_aux_power_well_ops,
3208 		.id = DISP_PW_ID_NONE,
3209 		{
3210 			.hsw.regs = &icl_aux_power_well_regs,
3211 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3212 		},
3213 	},
3214 	{
3215 		.name = "AUX C",
3216 		.domains = ICL_AUX_C_IO_POWER_DOMAINS,
3217 		.ops = &icl_tc_phy_aux_power_well_ops,
3218 		.id = DISP_PW_ID_NONE,
3219 		{
3220 			.hsw.regs = &icl_aux_power_well_regs,
3221 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3222 			.hsw.is_tc_tbt = false,
3223 		},
3224 	},
3225 	{
3226 		.name = "AUX D",
3227 		.domains = ICL_AUX_D_IO_POWER_DOMAINS,
3228 		.ops = &icl_tc_phy_aux_power_well_ops,
3229 		.id = DISP_PW_ID_NONE,
3230 		{
3231 			.hsw.regs = &icl_aux_power_well_regs,
3232 			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3233 			.hsw.is_tc_tbt = false,
3234 		},
3235 	},
3236 	{
3237 		.name = "AUX E",
3238 		.domains = ICL_AUX_E_IO_POWER_DOMAINS,
3239 		.ops = &icl_tc_phy_aux_power_well_ops,
3240 		.id = DISP_PW_ID_NONE,
3241 		{
3242 			.hsw.regs = &icl_aux_power_well_regs,
3243 			.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3244 			.hsw.is_tc_tbt = false,
3245 		},
3246 	},
3247 	{
3248 		.name = "AUX F",
3249 		.domains = ICL_AUX_F_IO_POWER_DOMAINS,
3250 		.ops = &icl_tc_phy_aux_power_well_ops,
3251 		.id = DISP_PW_ID_NONE,
3252 		{
3253 			.hsw.regs = &icl_aux_power_well_regs,
3254 			.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3255 			.hsw.is_tc_tbt = false,
3256 		},
3257 	},
3258 	{
3259 		.name = "AUX TBT1",
3260 		.domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
3261 		.ops = &icl_tc_phy_aux_power_well_ops,
3262 		.id = DISP_PW_ID_NONE,
3263 		{
3264 			.hsw.regs = &icl_aux_power_well_regs,
3265 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3266 			.hsw.is_tc_tbt = true,
3267 		},
3268 	},
3269 	{
3270 		.name = "AUX TBT2",
3271 		.domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
3272 		.ops = &icl_tc_phy_aux_power_well_ops,
3273 		.id = DISP_PW_ID_NONE,
3274 		{
3275 			.hsw.regs = &icl_aux_power_well_regs,
3276 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3277 			.hsw.is_tc_tbt = true,
3278 		},
3279 	},
3280 	{
3281 		.name = "AUX TBT3",
3282 		.domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
3283 		.ops = &icl_tc_phy_aux_power_well_ops,
3284 		.id = DISP_PW_ID_NONE,
3285 		{
3286 			.hsw.regs = &icl_aux_power_well_regs,
3287 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3288 			.hsw.is_tc_tbt = true,
3289 		},
3290 	},
3291 	{
3292 		.name = "AUX TBT4",
3293 		.domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
3294 		.ops = &icl_tc_phy_aux_power_well_ops,
3295 		.id = DISP_PW_ID_NONE,
3296 		{
3297 			.hsw.regs = &icl_aux_power_well_regs,
3298 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3299 			.hsw.is_tc_tbt = true,
3300 		},
3301 	},
3302 	{
3303 		.name = "power well 4",
3304 		.domains = ICL_PW_4_POWER_DOMAINS,
3305 		.ops = &hsw_power_well_ops,
3306 		.id = DISP_PW_ID_NONE,
3307 		{
3308 			.hsw.regs = &hsw_power_well_regs,
3309 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3310 			.hsw.has_fuses = true,
3311 			.hsw.irq_pipe_mask = BIT(PIPE_C),
3312 		},
3313 	},
3314 };
3315 
3316 static int
3317 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
3318 				   int disable_power_well)
3319 {
3320 	if (disable_power_well >= 0)
3321 		return !!disable_power_well;
3322 
3323 	return 1;
3324 }
3325 
3326 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
3327 			       int enable_dc)
3328 {
3329 	u32 mask;
3330 	int requested_dc;
3331 	int max_dc;
3332 
3333 	if (INTEL_GEN(dev_priv) >= 11) {
3334 		max_dc = 2;
3335 		/*
3336 		 * DC9 has a separate HW flow from the rest of the DC states,
3337 		 * not depending on the DMC firmware. It's needed by system
3338 		 * suspend/resume, so allow it unconditionally.
3339 		 */
3340 		mask = DC_STATE_EN_DC9;
3341 	} else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
3342 		max_dc = 2;
3343 		mask = 0;
3344 	} else if (IS_GEN9_LP(dev_priv)) {
3345 		max_dc = 1;
3346 		mask = DC_STATE_EN_DC9;
3347 	} else {
3348 		max_dc = 0;
3349 		mask = 0;
3350 	}
3351 
3352 	if (!i915_modparams.disable_power_well)
3353 		max_dc = 0;
3354 
3355 	if (enable_dc >= 0 && enable_dc <= max_dc) {
3356 		requested_dc = enable_dc;
3357 	} else if (enable_dc == -1) {
3358 		requested_dc = max_dc;
3359 	} else if (enable_dc > max_dc && enable_dc <= 2) {
3360 		DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
3361 			      enable_dc, max_dc);
3362 		requested_dc = max_dc;
3363 	} else {
3364 		DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
3365 		requested_dc = max_dc;
3366 	}
3367 
3368 	if (requested_dc > 1)
3369 		mask |= DC_STATE_EN_UPTO_DC6;
3370 	if (requested_dc > 0)
3371 		mask |= DC_STATE_EN_UPTO_DC5;
3372 
3373 	DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
3374 
3375 	return mask;
3376 }
3377 
3378 static int
3379 __set_power_wells(struct i915_power_domains *power_domains,
3380 		  const struct i915_power_well_desc *power_well_descs,
3381 		  int power_well_count)
3382 {
3383 	u64 power_well_ids = 0;
3384 	int i;
3385 
3386 	power_domains->power_well_count = power_well_count;
3387 	power_domains->power_wells =
3388 				kcalloc(power_well_count,
3389 					sizeof(*power_domains->power_wells),
3390 					GFP_KERNEL);
3391 	if (!power_domains->power_wells)
3392 		return -ENOMEM;
3393 
3394 	for (i = 0; i < power_well_count; i++) {
3395 		enum i915_power_well_id id = power_well_descs[i].id;
3396 
3397 		power_domains->power_wells[i].desc = &power_well_descs[i];
3398 
3399 		if (id == DISP_PW_ID_NONE)
3400 			continue;
3401 
3402 		WARN_ON(id >= sizeof(power_well_ids) * 8);
3403 		WARN_ON(power_well_ids & BIT_ULL(id));
3404 		power_well_ids |= BIT_ULL(id);
3405 	}
3406 
3407 	return 0;
3408 }
3409 
3410 #define set_power_wells(power_domains, __power_well_descs) \
3411 	__set_power_wells(power_domains, __power_well_descs, \
3412 			  ARRAY_SIZE(__power_well_descs))
3413 
3414 /**
3415  * intel_power_domains_init - initializes the power domain structures
3416  * @dev_priv: i915 device instance
3417  *
3418  * Initializes the power domain structures for @dev_priv depending upon the
3419  * supported platform.
3420  */
3421 int intel_power_domains_init(struct drm_i915_private *dev_priv)
3422 {
3423 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3424 	int err;
3425 
3426 	i915_modparams.disable_power_well =
3427 		sanitize_disable_power_well_option(dev_priv,
3428 						   i915_modparams.disable_power_well);
3429 	dev_priv->csr.allowed_dc_mask =
3430 		get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
3431 
3432 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
3433 
3434 	mutex_init(&power_domains->lock);
3435 
3436 	/*
3437 	 * The enabling order will be from lower to higher indexed wells,
3438 	 * the disabling order is reversed.
3439 	 */
3440 	if (IS_GEN(dev_priv, 11)) {
3441 		err = set_power_wells(power_domains, icl_power_wells);
3442 	} else if (IS_CANNONLAKE(dev_priv)) {
3443 		err = set_power_wells(power_domains, cnl_power_wells);
3444 
3445 		/*
3446 		 * DDI and Aux IO are getting enabled for all ports
3447 		 * regardless the presence or use. So, in order to avoid
3448 		 * timeouts, lets remove them from the list
3449 		 * for the SKUs without port F.
3450 		 */
3451 		if (!IS_CNL_WITH_PORT_F(dev_priv))
3452 			power_domains->power_well_count -= 2;
3453 	} else if (IS_GEMINILAKE(dev_priv)) {
3454 		err = set_power_wells(power_domains, glk_power_wells);
3455 	} else if (IS_BROXTON(dev_priv)) {
3456 		err = set_power_wells(power_domains, bxt_power_wells);
3457 	} else if (IS_GEN9_BC(dev_priv)) {
3458 		err = set_power_wells(power_domains, skl_power_wells);
3459 	} else if (IS_CHERRYVIEW(dev_priv)) {
3460 		err = set_power_wells(power_domains, chv_power_wells);
3461 	} else if (IS_BROADWELL(dev_priv)) {
3462 		err = set_power_wells(power_domains, bdw_power_wells);
3463 	} else if (IS_HASWELL(dev_priv)) {
3464 		err = set_power_wells(power_domains, hsw_power_wells);
3465 	} else if (IS_VALLEYVIEW(dev_priv)) {
3466 		err = set_power_wells(power_domains, vlv_power_wells);
3467 	} else if (IS_I830(dev_priv)) {
3468 		err = set_power_wells(power_domains, i830_power_wells);
3469 	} else {
3470 		err = set_power_wells(power_domains, i9xx_always_on_power_well);
3471 	}
3472 
3473 	return err;
3474 }
3475 
3476 /**
3477  * intel_power_domains_cleanup - clean up power domains resources
3478  * @dev_priv: i915 device instance
3479  *
3480  * Release any resources acquired by intel_power_domains_init()
3481  */
3482 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
3483 {
3484 	kfree(dev_priv->power_domains.power_wells);
3485 }
3486 
3487 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
3488 {
3489 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3490 	struct i915_power_well *power_well;
3491 
3492 	mutex_lock(&power_domains->lock);
3493 	for_each_power_well(dev_priv, power_well) {
3494 		power_well->desc->ops->sync_hw(dev_priv, power_well);
3495 		power_well->hw_enabled =
3496 			power_well->desc->ops->is_enabled(dev_priv, power_well);
3497 	}
3498 	mutex_unlock(&power_domains->lock);
3499 }
3500 
3501 static inline
3502 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
3503 			  i915_reg_t reg, bool enable)
3504 {
3505 	u32 val, status;
3506 
3507 	val = I915_READ(reg);
3508 	val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
3509 	I915_WRITE(reg, val);
3510 	POSTING_READ(reg);
3511 	udelay(10);
3512 
3513 	status = I915_READ(reg) & DBUF_POWER_STATE;
3514 	if ((enable && !status) || (!enable && status)) {
3515 		DRM_ERROR("DBus power %s timeout!\n",
3516 			  enable ? "enable" : "disable");
3517 		return false;
3518 	}
3519 	return true;
3520 }
3521 
3522 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
3523 {
3524 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
3525 }
3526 
3527 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
3528 {
3529 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
3530 }
3531 
3532 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
3533 {
3534 	if (INTEL_GEN(dev_priv) < 11)
3535 		return 1;
3536 	return 2;
3537 }
3538 
3539 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3540 			    u8 req_slices)
3541 {
3542 	const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
3543 	bool ret;
3544 
3545 	if (req_slices > intel_dbuf_max_slices(dev_priv)) {
3546 		DRM_ERROR("Invalid number of dbuf slices requested\n");
3547 		return;
3548 	}
3549 
3550 	if (req_slices == hw_enabled_slices || req_slices == 0)
3551 		return;
3552 
3553 	if (req_slices > hw_enabled_slices)
3554 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3555 	else
3556 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
3557 
3558 	if (ret)
3559 		dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
3560 }
3561 
3562 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
3563 {
3564 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
3565 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
3566 	POSTING_READ(DBUF_CTL_S2);
3567 
3568 	udelay(10);
3569 
3570 	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3571 	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3572 		DRM_ERROR("DBuf power enable timeout\n");
3573 	else
3574 		/*
3575 		 * FIXME: for now pretend that we only have 1 slice, see
3576 		 * intel_enabled_dbuf_slices_num().
3577 		 */
3578 		dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
3579 }
3580 
3581 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
3582 {
3583 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
3584 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
3585 	POSTING_READ(DBUF_CTL_S2);
3586 
3587 	udelay(10);
3588 
3589 	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3590 	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3591 		DRM_ERROR("DBuf power disable timeout!\n");
3592 	else
3593 		/*
3594 		 * FIXME: for now pretend that the first slice is always
3595 		 * enabled, see intel_enabled_dbuf_slices_num().
3596 		 */
3597 		dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
3598 }
3599 
3600 static void icl_mbus_init(struct drm_i915_private *dev_priv)
3601 {
3602 	u32 val;
3603 
3604 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
3605 	      MBUS_ABOX_BT_CREDIT_POOL2(16) |
3606 	      MBUS_ABOX_B_CREDIT(1) |
3607 	      MBUS_ABOX_BW_CREDIT(1);
3608 
3609 	I915_WRITE(MBUS_ABOX_CTL, val);
3610 }
3611 
3612 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
3613 				      bool enable)
3614 {
3615 	i915_reg_t reg;
3616 	u32 reset_bits, val;
3617 
3618 	if (IS_IVYBRIDGE(dev_priv)) {
3619 		reg = GEN7_MSG_CTL;
3620 		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
3621 	} else {
3622 		reg = HSW_NDE_RSTWRN_OPT;
3623 		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
3624 	}
3625 
3626 	val = I915_READ(reg);
3627 
3628 	if (enable)
3629 		val |= reset_bits;
3630 	else
3631 		val &= ~reset_bits;
3632 
3633 	I915_WRITE(reg, val);
3634 }
3635 
3636 static void skl_display_core_init(struct drm_i915_private *dev_priv,
3637 				   bool resume)
3638 {
3639 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3640 	struct i915_power_well *well;
3641 
3642 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3643 
3644 	/* enable PCH reset handshake */
3645 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3646 
3647 	/* enable PG1 and Misc I/O */
3648 	mutex_lock(&power_domains->lock);
3649 
3650 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3651 	intel_power_well_enable(dev_priv, well);
3652 
3653 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
3654 	intel_power_well_enable(dev_priv, well);
3655 
3656 	mutex_unlock(&power_domains->lock);
3657 
3658 	intel_cdclk_init(dev_priv);
3659 
3660 	gen9_dbuf_enable(dev_priv);
3661 
3662 	if (resume && dev_priv->csr.dmc_payload)
3663 		intel_csr_load_program(dev_priv);
3664 }
3665 
3666 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
3667 {
3668 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3669 	struct i915_power_well *well;
3670 
3671 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3672 
3673 	gen9_dbuf_disable(dev_priv);
3674 
3675 	intel_cdclk_uninit(dev_priv);
3676 
3677 	/* The spec doesn't call for removing the reset handshake flag */
3678 	/* disable PG1 and Misc I/O */
3679 
3680 	mutex_lock(&power_domains->lock);
3681 
3682 	/*
3683 	 * BSpec says to keep the MISC IO power well enabled here, only
3684 	 * remove our request for power well 1.
3685 	 * Note that even though the driver's request is removed power well 1
3686 	 * may stay enabled after this due to DMC's own request on it.
3687 	 */
3688 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3689 	intel_power_well_disable(dev_priv, well);
3690 
3691 	mutex_unlock(&power_domains->lock);
3692 
3693 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3694 }
3695 
3696 void bxt_display_core_init(struct drm_i915_private *dev_priv,
3697 			   bool resume)
3698 {
3699 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3700 	struct i915_power_well *well;
3701 
3702 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3703 
3704 	/*
3705 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
3706 	 * or else the reset will hang because there is no PCH to respond.
3707 	 * Move the handshake programming to initialization sequence.
3708 	 * Previously was left up to BIOS.
3709 	 */
3710 	intel_pch_reset_handshake(dev_priv, false);
3711 
3712 	/* Enable PG1 */
3713 	mutex_lock(&power_domains->lock);
3714 
3715 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3716 	intel_power_well_enable(dev_priv, well);
3717 
3718 	mutex_unlock(&power_domains->lock);
3719 
3720 	intel_cdclk_init(dev_priv);
3721 
3722 	gen9_dbuf_enable(dev_priv);
3723 
3724 	if (resume && dev_priv->csr.dmc_payload)
3725 		intel_csr_load_program(dev_priv);
3726 }
3727 
3728 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
3729 {
3730 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3731 	struct i915_power_well *well;
3732 
3733 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3734 
3735 	gen9_dbuf_disable(dev_priv);
3736 
3737 	intel_cdclk_uninit(dev_priv);
3738 
3739 	/* The spec doesn't call for removing the reset handshake flag */
3740 
3741 	/*
3742 	 * Disable PW1 (PG1).
3743 	 * Note that even though the driver's request is removed power well 1
3744 	 * may stay enabled after this due to DMC's own request on it.
3745 	 */
3746 	mutex_lock(&power_domains->lock);
3747 
3748 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3749 	intel_power_well_disable(dev_priv, well);
3750 
3751 	mutex_unlock(&power_domains->lock);
3752 
3753 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3754 }
3755 
3756 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
3757 {
3758 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3759 	struct i915_power_well *well;
3760 
3761 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3762 
3763 	/* 1. Enable PCH Reset Handshake */
3764 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3765 
3766 	/* 2-3. */
3767 	cnl_combo_phys_init(dev_priv);
3768 
3769 	/*
3770 	 * 4. Enable Power Well 1 (PG1).
3771 	 *    The AUX IO power wells will be enabled on demand.
3772 	 */
3773 	mutex_lock(&power_domains->lock);
3774 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3775 	intel_power_well_enable(dev_priv, well);
3776 	mutex_unlock(&power_domains->lock);
3777 
3778 	/* 5. Enable CD clock */
3779 	intel_cdclk_init(dev_priv);
3780 
3781 	/* 6. Enable DBUF */
3782 	gen9_dbuf_enable(dev_priv);
3783 
3784 	if (resume && dev_priv->csr.dmc_payload)
3785 		intel_csr_load_program(dev_priv);
3786 }
3787 
3788 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
3789 {
3790 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3791 	struct i915_power_well *well;
3792 
3793 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3794 
3795 	/* 1. Disable all display engine functions -> aready done */
3796 
3797 	/* 2. Disable DBUF */
3798 	gen9_dbuf_disable(dev_priv);
3799 
3800 	/* 3. Disable CD clock */
3801 	intel_cdclk_uninit(dev_priv);
3802 
3803 	/*
3804 	 * 4. Disable Power Well 1 (PG1).
3805 	 *    The AUX IO power wells are toggled on demand, so they are already
3806 	 *    disabled at this point.
3807 	 */
3808 	mutex_lock(&power_domains->lock);
3809 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3810 	intel_power_well_disable(dev_priv, well);
3811 	mutex_unlock(&power_domains->lock);
3812 
3813 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3814 
3815 	/* 5. */
3816 	cnl_combo_phys_uninit(dev_priv);
3817 }
3818 
3819 void icl_display_core_init(struct drm_i915_private *dev_priv,
3820 			   bool resume)
3821 {
3822 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3823 	struct i915_power_well *well;
3824 
3825 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3826 
3827 	/* 1. Enable PCH reset handshake. */
3828 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3829 
3830 	/* 2-3. */
3831 	icl_combo_phys_init(dev_priv);
3832 
3833 	/*
3834 	 * 4. Enable Power Well 1 (PG1).
3835 	 *    The AUX IO power wells will be enabled on demand.
3836 	 */
3837 	mutex_lock(&power_domains->lock);
3838 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3839 	intel_power_well_enable(dev_priv, well);
3840 	mutex_unlock(&power_domains->lock);
3841 
3842 	/* 5. Enable CDCLK. */
3843 	intel_cdclk_init(dev_priv);
3844 
3845 	/* 6. Enable DBUF. */
3846 	icl_dbuf_enable(dev_priv);
3847 
3848 	/* 7. Setup MBUS. */
3849 	icl_mbus_init(dev_priv);
3850 
3851 	if (resume && dev_priv->csr.dmc_payload)
3852 		intel_csr_load_program(dev_priv);
3853 }
3854 
3855 void icl_display_core_uninit(struct drm_i915_private *dev_priv)
3856 {
3857 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3858 	struct i915_power_well *well;
3859 
3860 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3861 
3862 	/* 1. Disable all display engine functions -> aready done */
3863 
3864 	/* 2. Disable DBUF */
3865 	icl_dbuf_disable(dev_priv);
3866 
3867 	/* 3. Disable CD clock */
3868 	intel_cdclk_uninit(dev_priv);
3869 
3870 	/*
3871 	 * 4. Disable Power Well 1 (PG1).
3872 	 *    The AUX IO power wells are toggled on demand, so they are already
3873 	 *    disabled at this point.
3874 	 */
3875 	mutex_lock(&power_domains->lock);
3876 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3877 	intel_power_well_disable(dev_priv, well);
3878 	mutex_unlock(&power_domains->lock);
3879 
3880 	/* 5. */
3881 	icl_combo_phys_uninit(dev_priv);
3882 }
3883 
3884 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
3885 {
3886 	struct i915_power_well *cmn_bc =
3887 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
3888 	struct i915_power_well *cmn_d =
3889 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
3890 
3891 	/*
3892 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
3893 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
3894 	 * instead maintain a shadow copy ourselves. Use the actual
3895 	 * power well state and lane status to reconstruct the
3896 	 * expected initial value.
3897 	 */
3898 	dev_priv->chv_phy_control =
3899 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
3900 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
3901 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
3902 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
3903 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
3904 
3905 	/*
3906 	 * If all lanes are disabled we leave the override disabled
3907 	 * with all power down bits cleared to match the state we
3908 	 * would use after disabling the port. Otherwise enable the
3909 	 * override and set the lane powerdown bits accding to the
3910 	 * current lane status.
3911 	 */
3912 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
3913 		u32 status = I915_READ(DPLL(PIPE_A));
3914 		unsigned int mask;
3915 
3916 		mask = status & DPLL_PORTB_READY_MASK;
3917 		if (mask == 0xf)
3918 			mask = 0x0;
3919 		else
3920 			dev_priv->chv_phy_control |=
3921 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
3922 
3923 		dev_priv->chv_phy_control |=
3924 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
3925 
3926 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
3927 		if (mask == 0xf)
3928 			mask = 0x0;
3929 		else
3930 			dev_priv->chv_phy_control |=
3931 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
3932 
3933 		dev_priv->chv_phy_control |=
3934 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
3935 
3936 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
3937 
3938 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
3939 	} else {
3940 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
3941 	}
3942 
3943 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
3944 		u32 status = I915_READ(DPIO_PHY_STATUS);
3945 		unsigned int mask;
3946 
3947 		mask = status & DPLL_PORTD_READY_MASK;
3948 
3949 		if (mask == 0xf)
3950 			mask = 0x0;
3951 		else
3952 			dev_priv->chv_phy_control |=
3953 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
3954 
3955 		dev_priv->chv_phy_control |=
3956 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
3957 
3958 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
3959 
3960 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
3961 	} else {
3962 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
3963 	}
3964 
3965 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
3966 
3967 	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
3968 		      dev_priv->chv_phy_control);
3969 }
3970 
3971 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
3972 {
3973 	struct i915_power_well *cmn =
3974 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
3975 	struct i915_power_well *disp2d =
3976 		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
3977 
3978 	/* If the display might be already active skip this */
3979 	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
3980 	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
3981 	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
3982 		return;
3983 
3984 	DRM_DEBUG_KMS("toggling display PHY side reset\n");
3985 
3986 	/* cmnlane needs DPLL registers */
3987 	disp2d->desc->ops->enable(dev_priv, disp2d);
3988 
3989 	/*
3990 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
3991 	 * Need to assert and de-assert PHY SB reset by gating the
3992 	 * common lane power, then un-gating it.
3993 	 * Simply ungating isn't enough to reset the PHY enough to get
3994 	 * ports and lanes running.
3995 	 */
3996 	cmn->desc->ops->disable(dev_priv, cmn);
3997 }
3998 
3999 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
4000 {
4001 	bool ret;
4002 
4003 	mutex_lock(&dev_priv->pcu_lock);
4004 	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
4005 	mutex_unlock(&dev_priv->pcu_lock);
4006 
4007 	return ret;
4008 }
4009 
4010 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
4011 {
4012 	WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
4013 	     "VED not power gated\n");
4014 }
4015 
4016 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
4017 {
4018 	static const struct pci_device_id isp_ids[] = {
4019 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
4020 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
4021 		{}
4022 	};
4023 
4024 	WARN(!pci_dev_present(isp_ids) &&
4025 	     !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
4026 	     "ISP not power gated\n");
4027 }
4028 
4029 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
4030 
4031 /**
4032  * intel_power_domains_init_hw - initialize hardware power domain state
4033  * @i915: i915 device instance
4034  * @resume: Called from resume code paths or not
4035  *
4036  * This function initializes the hardware power domain state and enables all
4037  * power wells belonging to the INIT power domain. Power wells in other
4038  * domains (and not in the INIT domain) are referenced or disabled by
4039  * intel_modeset_readout_hw_state(). After that the reference count of each
4040  * power well must match its HW enabled state, see
4041  * intel_power_domains_verify_state().
4042  *
4043  * It will return with power domains disabled (to be enabled later by
4044  * intel_power_domains_enable()) and must be paired with
4045  * intel_power_domains_fini_hw().
4046  */
4047 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
4048 {
4049 	struct i915_power_domains *power_domains = &i915->power_domains;
4050 
4051 	power_domains->initializing = true;
4052 
4053 	if (INTEL_GEN(i915) >= 11) {
4054 		icl_display_core_init(i915, resume);
4055 	} else if (IS_CANNONLAKE(i915)) {
4056 		cnl_display_core_init(i915, resume);
4057 	} else if (IS_GEN9_BC(i915)) {
4058 		skl_display_core_init(i915, resume);
4059 	} else if (IS_GEN9_LP(i915)) {
4060 		bxt_display_core_init(i915, resume);
4061 	} else if (IS_CHERRYVIEW(i915)) {
4062 		mutex_lock(&power_domains->lock);
4063 		chv_phy_control_init(i915);
4064 		mutex_unlock(&power_domains->lock);
4065 		assert_isp_power_gated(i915);
4066 	} else if (IS_VALLEYVIEW(i915)) {
4067 		mutex_lock(&power_domains->lock);
4068 		vlv_cmnlane_wa(i915);
4069 		mutex_unlock(&power_domains->lock);
4070 		assert_ved_power_gated(i915);
4071 		assert_isp_power_gated(i915);
4072 	} else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) {
4073 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4074 	}
4075 
4076 	/*
4077 	 * Keep all power wells enabled for any dependent HW access during
4078 	 * initialization and to make sure we keep BIOS enabled display HW
4079 	 * resources powered until display HW readout is complete. We drop
4080 	 * this reference in intel_power_domains_enable().
4081 	 */
4082 	power_domains->wakeref =
4083 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
4084 
4085 	/* Disable power support if the user asked so. */
4086 	if (!i915_modparams.disable_power_well)
4087 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
4088 	intel_power_domains_sync_hw(i915);
4089 
4090 	power_domains->initializing = false;
4091 }
4092 
4093 /**
4094  * intel_power_domains_fini_hw - deinitialize hw power domain state
4095  * @i915: i915 device instance
4096  *
4097  * De-initializes the display power domain HW state. It also ensures that the
4098  * device stays powered up so that the driver can be reloaded.
4099  *
4100  * It must be called with power domains already disabled (after a call to
4101  * intel_power_domains_disable()) and must be paired with
4102  * intel_power_domains_init_hw().
4103  */
4104 void intel_power_domains_fini_hw(struct drm_i915_private *i915)
4105 {
4106 	intel_wakeref_t wakeref __maybe_unused =
4107 		fetch_and_zero(&i915->power_domains.wakeref);
4108 
4109 	/* Remove the refcount we took to keep power well support disabled. */
4110 	if (!i915_modparams.disable_power_well)
4111 		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4112 
4113 	intel_power_domains_verify_state(i915);
4114 
4115 	/* Keep the power well enabled, but cancel its rpm wakeref. */
4116 	intel_runtime_pm_put(i915, wakeref);
4117 }
4118 
4119 /**
4120  * intel_power_domains_enable - enable toggling of display power wells
4121  * @i915: i915 device instance
4122  *
4123  * Enable the ondemand enabling/disabling of the display power wells. Note that
4124  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
4125  * only at specific points of the display modeset sequence, thus they are not
4126  * affected by the intel_power_domains_enable()/disable() calls. The purpose
4127  * of these function is to keep the rest of power wells enabled until the end
4128  * of display HW readout (which will acquire the power references reflecting
4129  * the current HW state).
4130  */
4131 void intel_power_domains_enable(struct drm_i915_private *i915)
4132 {
4133 	intel_wakeref_t wakeref __maybe_unused =
4134 		fetch_and_zero(&i915->power_domains.wakeref);
4135 
4136 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
4137 	intel_power_domains_verify_state(i915);
4138 }
4139 
4140 /**
4141  * intel_power_domains_disable - disable toggling of display power wells
4142  * @i915: i915 device instance
4143  *
4144  * Disable the ondemand enabling/disabling of the display power wells. See
4145  * intel_power_domains_enable() for which power wells this call controls.
4146  */
4147 void intel_power_domains_disable(struct drm_i915_private *i915)
4148 {
4149 	struct i915_power_domains *power_domains = &i915->power_domains;
4150 
4151 	WARN_ON(power_domains->wakeref);
4152 	power_domains->wakeref =
4153 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
4154 
4155 	intel_power_domains_verify_state(i915);
4156 }
4157 
4158 /**
4159  * intel_power_domains_suspend - suspend power domain state
4160  * @i915: i915 device instance
4161  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
4162  *
4163  * This function prepares the hardware power domain state before entering
4164  * system suspend.
4165  *
4166  * It must be called with power domains already disabled (after a call to
4167  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
4168  */
4169 void intel_power_domains_suspend(struct drm_i915_private *i915,
4170 				 enum i915_drm_suspend_mode suspend_mode)
4171 {
4172 	struct i915_power_domains *power_domains = &i915->power_domains;
4173 	intel_wakeref_t wakeref __maybe_unused =
4174 		fetch_and_zero(&power_domains->wakeref);
4175 
4176 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
4177 
4178 	/*
4179 	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
4180 	 * support don't manually deinit the power domains. This also means the
4181 	 * CSR/DMC firmware will stay active, it will power down any HW
4182 	 * resources as required and also enable deeper system power states
4183 	 * that would be blocked if the firmware was inactive.
4184 	 */
4185 	if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
4186 	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
4187 	    i915->csr.dmc_payload) {
4188 		intel_power_domains_verify_state(i915);
4189 		return;
4190 	}
4191 
4192 	/*
4193 	 * Even if power well support was disabled we still want to disable
4194 	 * power wells if power domains must be deinitialized for suspend.
4195 	 */
4196 	if (!i915_modparams.disable_power_well) {
4197 		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4198 		intel_power_domains_verify_state(i915);
4199 	}
4200 
4201 	if (INTEL_GEN(i915) >= 11)
4202 		icl_display_core_uninit(i915);
4203 	else if (IS_CANNONLAKE(i915))
4204 		cnl_display_core_uninit(i915);
4205 	else if (IS_GEN9_BC(i915))
4206 		skl_display_core_uninit(i915);
4207 	else if (IS_GEN9_LP(i915))
4208 		bxt_display_core_uninit(i915);
4209 
4210 	power_domains->display_core_suspended = true;
4211 }
4212 
4213 /**
4214  * intel_power_domains_resume - resume power domain state
4215  * @i915: i915 device instance
4216  *
4217  * This function resume the hardware power domain state during system resume.
4218  *
4219  * It will return with power domain support disabled (to be enabled later by
4220  * intel_power_domains_enable()) and must be paired with
4221  * intel_power_domains_suspend().
4222  */
4223 void intel_power_domains_resume(struct drm_i915_private *i915)
4224 {
4225 	struct i915_power_domains *power_domains = &i915->power_domains;
4226 
4227 	if (power_domains->display_core_suspended) {
4228 		intel_power_domains_init_hw(i915, true);
4229 		power_domains->display_core_suspended = false;
4230 	} else {
4231 		WARN_ON(power_domains->wakeref);
4232 		power_domains->wakeref =
4233 			intel_display_power_get(i915, POWER_DOMAIN_INIT);
4234 	}
4235 
4236 	intel_power_domains_verify_state(i915);
4237 }
4238 
4239 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4240 
4241 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
4242 {
4243 	struct i915_power_domains *power_domains = &i915->power_domains;
4244 	struct i915_power_well *power_well;
4245 
4246 	for_each_power_well(i915, power_well) {
4247 		enum intel_display_power_domain domain;
4248 
4249 		DRM_DEBUG_DRIVER("%-25s %d\n",
4250 				 power_well->desc->name, power_well->count);
4251 
4252 		for_each_power_domain(domain, power_well->desc->domains)
4253 			DRM_DEBUG_DRIVER("  %-23s %d\n",
4254 					 intel_display_power_domain_str(domain),
4255 					 power_domains->domain_use_count[domain]);
4256 	}
4257 }
4258 
4259 /**
4260  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
4261  * @i915: i915 device instance
4262  *
4263  * Verify if the reference count of each power well matches its HW enabled
4264  * state and the total refcount of the domains it belongs to. This must be
4265  * called after modeset HW state sanitization, which is responsible for
4266  * acquiring reference counts for any power wells in use and disabling the
4267  * ones left on by BIOS but not required by any active output.
4268  */
4269 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
4270 {
4271 	struct i915_power_domains *power_domains = &i915->power_domains;
4272 	struct i915_power_well *power_well;
4273 	bool dump_domain_info;
4274 
4275 	mutex_lock(&power_domains->lock);
4276 
4277 	dump_domain_info = false;
4278 	for_each_power_well(i915, power_well) {
4279 		enum intel_display_power_domain domain;
4280 		int domains_count;
4281 		bool enabled;
4282 
4283 		enabled = power_well->desc->ops->is_enabled(i915, power_well);
4284 		if ((power_well->count || power_well->desc->always_on) !=
4285 		    enabled)
4286 			DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
4287 				  power_well->desc->name,
4288 				  power_well->count, enabled);
4289 
4290 		domains_count = 0;
4291 		for_each_power_domain(domain, power_well->desc->domains)
4292 			domains_count += power_domains->domain_use_count[domain];
4293 
4294 		if (power_well->count != domains_count) {
4295 			DRM_ERROR("power well %s refcount/domain refcount mismatch "
4296 				  "(refcount %d/domains refcount %d)\n",
4297 				  power_well->desc->name, power_well->count,
4298 				  domains_count);
4299 			dump_domain_info = true;
4300 		}
4301 	}
4302 
4303 	if (dump_domain_info) {
4304 		static bool dumped;
4305 
4306 		if (!dumped) {
4307 			intel_power_domains_dump_info(i915);
4308 			dumped = true;
4309 		}
4310 	}
4311 
4312 	mutex_unlock(&power_domains->lock);
4313 }
4314 
4315 #else
4316 
4317 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
4318 {
4319 }
4320 
4321 #endif
4322 
4323 /**
4324  * intel_runtime_pm_get - grab a runtime pm reference
4325  * @i915: i915 device instance
4326  *
4327  * This function grabs a device-level runtime pm reference (mostly used for GEM
4328  * code to ensure the GTT or GT is on) and ensures that it is powered up.
4329  *
4330  * Any runtime pm reference obtained by this function must have a symmetric
4331  * call to intel_runtime_pm_put() to release the reference again.
4332  *
4333  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
4334  */
4335 intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
4336 {
4337 	struct pci_dev *pdev = i915->drm.pdev;
4338 	struct device *kdev = &pdev->dev;
4339 	int ret;
4340 
4341 	ret = pm_runtime_get_sync(kdev);
4342 	WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
4343 
4344 	return track_intel_runtime_pm_wakeref(i915);
4345 }
4346 
4347 /**
4348  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
4349  * @i915: i915 device instance
4350  *
4351  * This function grabs a device-level runtime pm reference if the device is
4352  * already in use and ensures that it is powered up. It is illegal to try
4353  * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
4354  *
4355  * Any runtime pm reference obtained by this function must have a symmetric
4356  * call to intel_runtime_pm_put() to release the reference again.
4357  *
4358  * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
4359  * as True if the wakeref was acquired, or False otherwise.
4360  */
4361 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
4362 {
4363 	if (IS_ENABLED(CONFIG_PM)) {
4364 		struct pci_dev *pdev = i915->drm.pdev;
4365 		struct device *kdev = &pdev->dev;
4366 
4367 		/*
4368 		 * In cases runtime PM is disabled by the RPM core and we get
4369 		 * an -EINVAL return value we are not supposed to call this
4370 		 * function, since the power state is undefined. This applies
4371 		 * atm to the late/early system suspend/resume handlers.
4372 		 */
4373 		if (pm_runtime_get_if_in_use(kdev) <= 0)
4374 			return 0;
4375 	}
4376 
4377 	return track_intel_runtime_pm_wakeref(i915);
4378 }
4379 
4380 /**
4381  * intel_runtime_pm_get_noresume - grab a runtime pm reference
4382  * @i915: i915 device instance
4383  *
4384  * This function grabs a device-level runtime pm reference (mostly used for GEM
4385  * code to ensure the GTT or GT is on).
4386  *
4387  * It will _not_ power up the device but instead only check that it's powered
4388  * on.  Therefore it is only valid to call this functions from contexts where
4389  * the device is known to be powered up and where trying to power it up would
4390  * result in hilarity and deadlocks. That pretty much means only the system
4391  * suspend/resume code where this is used to grab runtime pm references for
4392  * delayed setup down in work items.
4393  *
4394  * Any runtime pm reference obtained by this function must have a symmetric
4395  * call to intel_runtime_pm_put() to release the reference again.
4396  *
4397  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
4398  */
4399 intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
4400 {
4401 	struct pci_dev *pdev = i915->drm.pdev;
4402 	struct device *kdev = &pdev->dev;
4403 
4404 	assert_rpm_wakelock_held(i915);
4405 	pm_runtime_get_noresume(kdev);
4406 
4407 	return track_intel_runtime_pm_wakeref(i915);
4408 }
4409 
4410 /**
4411  * intel_runtime_pm_put - release a runtime pm reference
4412  * @i915: i915 device instance
4413  *
4414  * This function drops the device-level runtime pm reference obtained by
4415  * intel_runtime_pm_get() and might power down the corresponding
4416  * hardware block right away if this is the last reference.
4417  */
4418 void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
4419 {
4420 	struct pci_dev *pdev = i915->drm.pdev;
4421 	struct device *kdev = &pdev->dev;
4422 
4423 	untrack_intel_runtime_pm_wakeref(i915);
4424 
4425 	pm_runtime_mark_last_busy(kdev);
4426 	pm_runtime_put_autosuspend(kdev);
4427 }
4428 
4429 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4430 void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
4431 {
4432 	cancel_intel_runtime_pm_wakeref(i915, wref);
4433 	intel_runtime_pm_put_unchecked(i915);
4434 }
4435 #endif
4436 
4437 /**
4438  * intel_runtime_pm_enable - enable runtime pm
4439  * @i915: i915 device instance
4440  *
4441  * This function enables runtime pm at the end of the driver load sequence.
4442  *
4443  * Note that this function does currently not enable runtime pm for the
4444  * subordinate display power domains. That is done by
4445  * intel_power_domains_enable().
4446  */
4447 void intel_runtime_pm_enable(struct drm_i915_private *i915)
4448 {
4449 	struct pci_dev *pdev = i915->drm.pdev;
4450 	struct device *kdev = &pdev->dev;
4451 
4452 	/*
4453 	 * Disable the system suspend direct complete optimization, which can
4454 	 * leave the device suspended skipping the driver's suspend handlers
4455 	 * if the device was already runtime suspended. This is needed due to
4456 	 * the difference in our runtime and system suspend sequence and
4457 	 * becaue the HDA driver may require us to enable the audio power
4458 	 * domain during system suspend.
4459 	 */
4460 	dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
4461 
4462 	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
4463 	pm_runtime_mark_last_busy(kdev);
4464 
4465 	/*
4466 	 * Take a permanent reference to disable the RPM functionality and drop
4467 	 * it only when unloading the driver. Use the low level get/put helpers,
4468 	 * so the driver's own RPM reference tracking asserts also work on
4469 	 * platforms without RPM support.
4470 	 */
4471 	if (!HAS_RUNTIME_PM(i915)) {
4472 		int ret;
4473 
4474 		pm_runtime_dont_use_autosuspend(kdev);
4475 		ret = pm_runtime_get_sync(kdev);
4476 		WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
4477 	} else {
4478 		pm_runtime_use_autosuspend(kdev);
4479 	}
4480 
4481 	/*
4482 	 * The core calls the driver load handler with an RPM reference held.
4483 	 * We drop that here and will reacquire it during unloading in
4484 	 * intel_power_domains_fini().
4485 	 */
4486 	pm_runtime_put_autosuspend(kdev);
4487 }
4488 
4489 void intel_runtime_pm_disable(struct drm_i915_private *i915)
4490 {
4491 	struct pci_dev *pdev = i915->drm.pdev;
4492 	struct device *kdev = &pdev->dev;
4493 
4494 	/* Transfer rpm ownership back to core */
4495 	WARN(pm_runtime_get_sync(kdev) < 0,
4496 	     "Failed to pass rpm ownership back to core\n");
4497 
4498 	pm_runtime_dont_use_autosuspend(kdev);
4499 
4500 	if (!HAS_RUNTIME_PM(i915))
4501 		pm_runtime_put(kdev);
4502 }
4503 
4504 void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
4505 {
4506 	struct i915_runtime_pm *rpm = &i915->runtime_pm;
4507 	int count;
4508 
4509 	count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */
4510 	WARN(count,
4511 	     "i915->runtime_pm.wakeref_count=%d on cleanup\n",
4512 	     count);
4513 
4514 	untrack_intel_runtime_pm_wakeref(i915);
4515 }
4516 
4517 void intel_runtime_pm_init_early(struct drm_i915_private *i915)
4518 {
4519 	init_intel_runtime_pm_wakeref(i915);
4520 }
4521