1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/string_helpers.h>
7 
8 #include "i915_drv.h"
9 #include "i915_irq.h"
10 #include "intel_backlight_regs.h"
11 #include "intel_cdclk.h"
12 #include "intel_combo_phy.h"
13 #include "intel_de.h"
14 #include "intel_display_power.h"
15 #include "intel_display_power_map.h"
16 #include "intel_display_power_well.h"
17 #include "intel_display_types.h"
18 #include "intel_dmc.h"
19 #include "intel_mchbar_regs.h"
20 #include "intel_pch_refclk.h"
21 #include "intel_pcode.h"
22 #include "intel_snps_phy.h"
23 #include "skl_watermark.h"
24 #include "vlv_sideband.h"
25 
26 #define for_each_power_domain_well(__dev_priv, __power_well, __domain)	\
27 	for_each_power_well(__dev_priv, __power_well)				\
28 		for_each_if(test_bit((__domain), (__power_well)->domains.bits))
29 
30 #define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \
31 	for_each_power_well_reverse(__dev_priv, __power_well)		        \
32 		for_each_if(test_bit((__domain), (__power_well)->domains.bits))
33 
34 const char *
35 intel_display_power_domain_str(enum intel_display_power_domain domain)
36 {
37 	switch (domain) {
38 	case POWER_DOMAIN_DISPLAY_CORE:
39 		return "DISPLAY_CORE";
40 	case POWER_DOMAIN_PIPE_A:
41 		return "PIPE_A";
42 	case POWER_DOMAIN_PIPE_B:
43 		return "PIPE_B";
44 	case POWER_DOMAIN_PIPE_C:
45 		return "PIPE_C";
46 	case POWER_DOMAIN_PIPE_D:
47 		return "PIPE_D";
48 	case POWER_DOMAIN_PIPE_PANEL_FITTER_A:
49 		return "PIPE_PANEL_FITTER_A";
50 	case POWER_DOMAIN_PIPE_PANEL_FITTER_B:
51 		return "PIPE_PANEL_FITTER_B";
52 	case POWER_DOMAIN_PIPE_PANEL_FITTER_C:
53 		return "PIPE_PANEL_FITTER_C";
54 	case POWER_DOMAIN_PIPE_PANEL_FITTER_D:
55 		return "PIPE_PANEL_FITTER_D";
56 	case POWER_DOMAIN_TRANSCODER_A:
57 		return "TRANSCODER_A";
58 	case POWER_DOMAIN_TRANSCODER_B:
59 		return "TRANSCODER_B";
60 	case POWER_DOMAIN_TRANSCODER_C:
61 		return "TRANSCODER_C";
62 	case POWER_DOMAIN_TRANSCODER_D:
63 		return "TRANSCODER_D";
64 	case POWER_DOMAIN_TRANSCODER_EDP:
65 		return "TRANSCODER_EDP";
66 	case POWER_DOMAIN_TRANSCODER_DSI_A:
67 		return "TRANSCODER_DSI_A";
68 	case POWER_DOMAIN_TRANSCODER_DSI_C:
69 		return "TRANSCODER_DSI_C";
70 	case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
71 		return "TRANSCODER_VDSC_PW2";
72 	case POWER_DOMAIN_PORT_DDI_LANES_A:
73 		return "PORT_DDI_LANES_A";
74 	case POWER_DOMAIN_PORT_DDI_LANES_B:
75 		return "PORT_DDI_LANES_B";
76 	case POWER_DOMAIN_PORT_DDI_LANES_C:
77 		return "PORT_DDI_LANES_C";
78 	case POWER_DOMAIN_PORT_DDI_LANES_D:
79 		return "PORT_DDI_LANES_D";
80 	case POWER_DOMAIN_PORT_DDI_LANES_E:
81 		return "PORT_DDI_LANES_E";
82 	case POWER_DOMAIN_PORT_DDI_LANES_F:
83 		return "PORT_DDI_LANES_F";
84 	case POWER_DOMAIN_PORT_DDI_LANES_TC1:
85 		return "PORT_DDI_LANES_TC1";
86 	case POWER_DOMAIN_PORT_DDI_LANES_TC2:
87 		return "PORT_DDI_LANES_TC2";
88 	case POWER_DOMAIN_PORT_DDI_LANES_TC3:
89 		return "PORT_DDI_LANES_TC3";
90 	case POWER_DOMAIN_PORT_DDI_LANES_TC4:
91 		return "PORT_DDI_LANES_TC4";
92 	case POWER_DOMAIN_PORT_DDI_LANES_TC5:
93 		return "PORT_DDI_LANES_TC5";
94 	case POWER_DOMAIN_PORT_DDI_LANES_TC6:
95 		return "PORT_DDI_LANES_TC6";
96 	case POWER_DOMAIN_PORT_DDI_IO_A:
97 		return "PORT_DDI_IO_A";
98 	case POWER_DOMAIN_PORT_DDI_IO_B:
99 		return "PORT_DDI_IO_B";
100 	case POWER_DOMAIN_PORT_DDI_IO_C:
101 		return "PORT_DDI_IO_C";
102 	case POWER_DOMAIN_PORT_DDI_IO_D:
103 		return "PORT_DDI_IO_D";
104 	case POWER_DOMAIN_PORT_DDI_IO_E:
105 		return "PORT_DDI_IO_E";
106 	case POWER_DOMAIN_PORT_DDI_IO_F:
107 		return "PORT_DDI_IO_F";
108 	case POWER_DOMAIN_PORT_DDI_IO_TC1:
109 		return "PORT_DDI_IO_TC1";
110 	case POWER_DOMAIN_PORT_DDI_IO_TC2:
111 		return "PORT_DDI_IO_TC2";
112 	case POWER_DOMAIN_PORT_DDI_IO_TC3:
113 		return "PORT_DDI_IO_TC3";
114 	case POWER_DOMAIN_PORT_DDI_IO_TC4:
115 		return "PORT_DDI_IO_TC4";
116 	case POWER_DOMAIN_PORT_DDI_IO_TC5:
117 		return "PORT_DDI_IO_TC5";
118 	case POWER_DOMAIN_PORT_DDI_IO_TC6:
119 		return "PORT_DDI_IO_TC6";
120 	case POWER_DOMAIN_PORT_DSI:
121 		return "PORT_DSI";
122 	case POWER_DOMAIN_PORT_CRT:
123 		return "PORT_CRT";
124 	case POWER_DOMAIN_PORT_OTHER:
125 		return "PORT_OTHER";
126 	case POWER_DOMAIN_VGA:
127 		return "VGA";
128 	case POWER_DOMAIN_AUDIO_MMIO:
129 		return "AUDIO_MMIO";
130 	case POWER_DOMAIN_AUDIO_PLAYBACK:
131 		return "AUDIO_PLAYBACK";
132 	case POWER_DOMAIN_AUX_IO_A:
133 		return "AUX_IO_A";
134 	case POWER_DOMAIN_AUX_IO_B:
135 		return "AUX_IO_B";
136 	case POWER_DOMAIN_AUX_IO_C:
137 		return "AUX_IO_C";
138 	case POWER_DOMAIN_AUX_IO_D:
139 		return "AUX_IO_D";
140 	case POWER_DOMAIN_AUX_IO_E:
141 		return "AUX_IO_E";
142 	case POWER_DOMAIN_AUX_IO_F:
143 		return "AUX_IO_F";
144 	case POWER_DOMAIN_AUX_A:
145 		return "AUX_A";
146 	case POWER_DOMAIN_AUX_B:
147 		return "AUX_B";
148 	case POWER_DOMAIN_AUX_C:
149 		return "AUX_C";
150 	case POWER_DOMAIN_AUX_D:
151 		return "AUX_D";
152 	case POWER_DOMAIN_AUX_E:
153 		return "AUX_E";
154 	case POWER_DOMAIN_AUX_F:
155 		return "AUX_F";
156 	case POWER_DOMAIN_AUX_USBC1:
157 		return "AUX_USBC1";
158 	case POWER_DOMAIN_AUX_USBC2:
159 		return "AUX_USBC2";
160 	case POWER_DOMAIN_AUX_USBC3:
161 		return "AUX_USBC3";
162 	case POWER_DOMAIN_AUX_USBC4:
163 		return "AUX_USBC4";
164 	case POWER_DOMAIN_AUX_USBC5:
165 		return "AUX_USBC5";
166 	case POWER_DOMAIN_AUX_USBC6:
167 		return "AUX_USBC6";
168 	case POWER_DOMAIN_AUX_TBT1:
169 		return "AUX_TBT1";
170 	case POWER_DOMAIN_AUX_TBT2:
171 		return "AUX_TBT2";
172 	case POWER_DOMAIN_AUX_TBT3:
173 		return "AUX_TBT3";
174 	case POWER_DOMAIN_AUX_TBT4:
175 		return "AUX_TBT4";
176 	case POWER_DOMAIN_AUX_TBT5:
177 		return "AUX_TBT5";
178 	case POWER_DOMAIN_AUX_TBT6:
179 		return "AUX_TBT6";
180 	case POWER_DOMAIN_GMBUS:
181 		return "GMBUS";
182 	case POWER_DOMAIN_INIT:
183 		return "INIT";
184 	case POWER_DOMAIN_MODESET:
185 		return "MODESET";
186 	case POWER_DOMAIN_GT_IRQ:
187 		return "GT_IRQ";
188 	case POWER_DOMAIN_DC_OFF:
189 		return "DC_OFF";
190 	case POWER_DOMAIN_TC_COLD_OFF:
191 		return "TC_COLD_OFF";
192 	default:
193 		MISSING_CASE(domain);
194 		return "?";
195 	}
196 }
197 
198 /**
199  * __intel_display_power_is_enabled - unlocked check for a power domain
200  * @dev_priv: i915 device instance
201  * @domain: power domain to check
202  *
203  * This is the unlocked version of intel_display_power_is_enabled() and should
204  * only be used from error capture and recovery code where deadlocks are
205  * possible.
206  *
207  * Returns:
208  * True when the power domain is enabled, false otherwise.
209  */
210 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
211 				      enum intel_display_power_domain domain)
212 {
213 	struct i915_power_well *power_well;
214 	bool is_enabled;
215 
216 	if (dev_priv->runtime_pm.suspended)
217 		return false;
218 
219 	is_enabled = true;
220 
221 	for_each_power_domain_well_reverse(dev_priv, power_well, domain) {
222 		if (intel_power_well_is_always_on(power_well))
223 			continue;
224 
225 		if (!intel_power_well_is_enabled_cached(power_well)) {
226 			is_enabled = false;
227 			break;
228 		}
229 	}
230 
231 	return is_enabled;
232 }
233 
234 /**
235  * intel_display_power_is_enabled - check for a power domain
236  * @dev_priv: i915 device instance
237  * @domain: power domain to check
238  *
239  * This function can be used to check the hw power domain state. It is mostly
240  * used in hardware state readout functions. Everywhere else code should rely
241  * upon explicit power domain reference counting to ensure that the hardware
242  * block is powered up before accessing it.
243  *
244  * Callers must hold the relevant modesetting locks to ensure that concurrent
245  * threads can't disable the power well while the caller tries to read a few
246  * registers.
247  *
248  * Returns:
249  * True when the power domain is enabled, false otherwise.
250  */
251 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
252 				    enum intel_display_power_domain domain)
253 {
254 	struct i915_power_domains *power_domains;
255 	bool ret;
256 
257 	power_domains = &dev_priv->display.power.domains;
258 
259 	mutex_lock(&power_domains->lock);
260 	ret = __intel_display_power_is_enabled(dev_priv, domain);
261 	mutex_unlock(&power_domains->lock);
262 
263 	return ret;
264 }
265 
266 static u32
267 sanitize_target_dc_state(struct drm_i915_private *dev_priv,
268 			 u32 target_dc_state)
269 {
270 	static const u32 states[] = {
271 		DC_STATE_EN_UPTO_DC6,
272 		DC_STATE_EN_UPTO_DC5,
273 		DC_STATE_EN_DC3CO,
274 		DC_STATE_DISABLE,
275 	};
276 	int i;
277 
278 	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
279 		if (target_dc_state != states[i])
280 			continue;
281 
282 		if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state)
283 			break;
284 
285 		target_dc_state = states[i + 1];
286 	}
287 
288 	return target_dc_state;
289 }
290 
291 /**
292  * intel_display_power_set_target_dc_state - Set target dc state.
293  * @dev_priv: i915 device
294  * @state: state which needs to be set as target_dc_state.
295  *
296  * This function set the "DC off" power well target_dc_state,
297  * based upon this target_dc_stste, "DC off" power well will
298  * enable desired DC state.
299  */
300 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
301 					     u32 state)
302 {
303 	struct i915_power_well *power_well;
304 	bool dc_off_enabled;
305 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
306 
307 	mutex_lock(&power_domains->lock);
308 	power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
309 
310 	if (drm_WARN_ON(&dev_priv->drm, !power_well))
311 		goto unlock;
312 
313 	state = sanitize_target_dc_state(dev_priv, state);
314 
315 	if (state == dev_priv->display.dmc.target_dc_state)
316 		goto unlock;
317 
318 	dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
319 	/*
320 	 * If DC off power well is disabled, need to enable and disable the
321 	 * DC off power well to effect target DC state.
322 	 */
323 	if (!dc_off_enabled)
324 		intel_power_well_enable(dev_priv, power_well);
325 
326 	dev_priv->display.dmc.target_dc_state = state;
327 
328 	if (!dc_off_enabled)
329 		intel_power_well_disable(dev_priv, power_well);
330 
331 unlock:
332 	mutex_unlock(&power_domains->lock);
333 }
334 
335 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
336 
337 static void __async_put_domains_mask(struct i915_power_domains *power_domains,
338 				     struct intel_power_domain_mask *mask)
339 {
340 	bitmap_or(mask->bits,
341 		  power_domains->async_put_domains[0].bits,
342 		  power_domains->async_put_domains[1].bits,
343 		  POWER_DOMAIN_NUM);
344 }
345 
346 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
347 
348 static bool
349 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
350 {
351 	struct drm_i915_private *i915 = container_of(power_domains,
352 						     struct drm_i915_private,
353 						     display.power.domains);
354 
355 	return !drm_WARN_ON(&i915->drm,
356 			    bitmap_intersects(power_domains->async_put_domains[0].bits,
357 					      power_domains->async_put_domains[1].bits,
358 					      POWER_DOMAIN_NUM));
359 }
360 
361 static bool
362 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
363 {
364 	struct drm_i915_private *i915 = container_of(power_domains,
365 						     struct drm_i915_private,
366 						     display.power.domains);
367 	struct intel_power_domain_mask async_put_mask;
368 	enum intel_display_power_domain domain;
369 	bool err = false;
370 
371 	err |= !assert_async_put_domain_masks_disjoint(power_domains);
372 	__async_put_domains_mask(power_domains, &async_put_mask);
373 	err |= drm_WARN_ON(&i915->drm,
374 			   !!power_domains->async_put_wakeref !=
375 			   !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
376 
377 	for_each_power_domain(domain, &async_put_mask)
378 		err |= drm_WARN_ON(&i915->drm,
379 				   power_domains->domain_use_count[domain] != 1);
380 
381 	return !err;
382 }
383 
384 static void print_power_domains(struct i915_power_domains *power_domains,
385 				const char *prefix, struct intel_power_domain_mask *mask)
386 {
387 	struct drm_i915_private *i915 = container_of(power_domains,
388 						     struct drm_i915_private,
389 						     display.power.domains);
390 	enum intel_display_power_domain domain;
391 
392 	drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
393 	for_each_power_domain(domain, mask)
394 		drm_dbg(&i915->drm, "%s use_count %d\n",
395 			intel_display_power_domain_str(domain),
396 			power_domains->domain_use_count[domain]);
397 }
398 
399 static void
400 print_async_put_domains_state(struct i915_power_domains *power_domains)
401 {
402 	struct drm_i915_private *i915 = container_of(power_domains,
403 						     struct drm_i915_private,
404 						     display.power.domains);
405 
406 	drm_dbg(&i915->drm, "async_put_wakeref %u\n",
407 		power_domains->async_put_wakeref);
408 
409 	print_power_domains(power_domains, "async_put_domains[0]",
410 			    &power_domains->async_put_domains[0]);
411 	print_power_domains(power_domains, "async_put_domains[1]",
412 			    &power_domains->async_put_domains[1]);
413 }
414 
415 static void
416 verify_async_put_domains_state(struct i915_power_domains *power_domains)
417 {
418 	if (!__async_put_domains_state_ok(power_domains))
419 		print_async_put_domains_state(power_domains);
420 }
421 
422 #else
423 
424 static void
425 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
426 {
427 }
428 
429 static void
430 verify_async_put_domains_state(struct i915_power_domains *power_domains)
431 {
432 }
433 
434 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
435 
436 static void async_put_domains_mask(struct i915_power_domains *power_domains,
437 				   struct intel_power_domain_mask *mask)
438 
439 {
440 	assert_async_put_domain_masks_disjoint(power_domains);
441 
442 	__async_put_domains_mask(power_domains, mask);
443 }
444 
445 static void
446 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
447 			       enum intel_display_power_domain domain)
448 {
449 	assert_async_put_domain_masks_disjoint(power_domains);
450 
451 	clear_bit(domain, power_domains->async_put_domains[0].bits);
452 	clear_bit(domain, power_domains->async_put_domains[1].bits);
453 }
454 
455 static bool
456 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
457 				       enum intel_display_power_domain domain)
458 {
459 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
460 	struct intel_power_domain_mask async_put_mask;
461 	bool ret = false;
462 
463 	async_put_domains_mask(power_domains, &async_put_mask);
464 	if (!test_bit(domain, async_put_mask.bits))
465 		goto out_verify;
466 
467 	async_put_domains_clear_domain(power_domains, domain);
468 
469 	ret = true;
470 
471 	async_put_domains_mask(power_domains, &async_put_mask);
472 	if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
473 		goto out_verify;
474 
475 	cancel_delayed_work(&power_domains->async_put_work);
476 	intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
477 				 fetch_and_zero(&power_domains->async_put_wakeref));
478 out_verify:
479 	verify_async_put_domains_state(power_domains);
480 
481 	return ret;
482 }
483 
484 static void
485 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
486 				 enum intel_display_power_domain domain)
487 {
488 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
489 	struct i915_power_well *power_well;
490 
491 	if (intel_display_power_grab_async_put_ref(dev_priv, domain))
492 		return;
493 
494 	for_each_power_domain_well(dev_priv, power_well, domain)
495 		intel_power_well_get(dev_priv, power_well);
496 
497 	power_domains->domain_use_count[domain]++;
498 }
499 
500 /**
501  * intel_display_power_get - grab a power domain reference
502  * @dev_priv: i915 device instance
503  * @domain: power domain to reference
504  *
505  * This function grabs a power domain reference for @domain and ensures that the
506  * power domain and all its parents are powered up. Therefore users should only
507  * grab a reference to the innermost power domain they need.
508  *
509  * Any power domain reference obtained by this function must have a symmetric
510  * call to intel_display_power_put() to release the reference again.
511  */
512 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
513 					enum intel_display_power_domain domain)
514 {
515 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
516 	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
517 
518 	mutex_lock(&power_domains->lock);
519 	__intel_display_power_get_domain(dev_priv, domain);
520 	mutex_unlock(&power_domains->lock);
521 
522 	return wakeref;
523 }
524 
525 /**
526  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
527  * @dev_priv: i915 device instance
528  * @domain: power domain to reference
529  *
530  * This function grabs a power domain reference for @domain and ensures that the
531  * power domain and all its parents are powered up. Therefore users should only
532  * grab a reference to the innermost power domain they need.
533  *
534  * Any power domain reference obtained by this function must have a symmetric
535  * call to intel_display_power_put() to release the reference again.
536  */
537 intel_wakeref_t
538 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
539 				   enum intel_display_power_domain domain)
540 {
541 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
542 	intel_wakeref_t wakeref;
543 	bool is_enabled;
544 
545 	wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
546 	if (!wakeref)
547 		return false;
548 
549 	mutex_lock(&power_domains->lock);
550 
551 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
552 		__intel_display_power_get_domain(dev_priv, domain);
553 		is_enabled = true;
554 	} else {
555 		is_enabled = false;
556 	}
557 
558 	mutex_unlock(&power_domains->lock);
559 
560 	if (!is_enabled) {
561 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
562 		wakeref = 0;
563 	}
564 
565 	return wakeref;
566 }
567 
568 static void
569 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
570 				 enum intel_display_power_domain domain)
571 {
572 	struct i915_power_domains *power_domains;
573 	struct i915_power_well *power_well;
574 	const char *name = intel_display_power_domain_str(domain);
575 	struct intel_power_domain_mask async_put_mask;
576 
577 	power_domains = &dev_priv->display.power.domains;
578 
579 	drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
580 		 "Use count on domain %s is already zero\n",
581 		 name);
582 	async_put_domains_mask(power_domains, &async_put_mask);
583 	drm_WARN(&dev_priv->drm,
584 		 test_bit(domain, async_put_mask.bits),
585 		 "Async disabling of domain %s is pending\n",
586 		 name);
587 
588 	power_domains->domain_use_count[domain]--;
589 
590 	for_each_power_domain_well_reverse(dev_priv, power_well, domain)
591 		intel_power_well_put(dev_priv, power_well);
592 }
593 
594 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
595 				      enum intel_display_power_domain domain)
596 {
597 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
598 
599 	mutex_lock(&power_domains->lock);
600 	__intel_display_power_put_domain(dev_priv, domain);
601 	mutex_unlock(&power_domains->lock);
602 }
603 
604 static void
605 queue_async_put_domains_work(struct i915_power_domains *power_domains,
606 			     intel_wakeref_t wakeref)
607 {
608 	struct drm_i915_private *i915 = container_of(power_domains,
609 						     struct drm_i915_private,
610 						     display.power.domains);
611 	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
612 	power_domains->async_put_wakeref = wakeref;
613 	drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
614 						    &power_domains->async_put_work,
615 						    msecs_to_jiffies(100)));
616 }
617 
618 static void
619 release_async_put_domains(struct i915_power_domains *power_domains,
620 			  struct intel_power_domain_mask *mask)
621 {
622 	struct drm_i915_private *dev_priv =
623 		container_of(power_domains, struct drm_i915_private,
624 			     display.power.domains);
625 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
626 	enum intel_display_power_domain domain;
627 	intel_wakeref_t wakeref;
628 
629 	/*
630 	 * The caller must hold already raw wakeref, upgrade that to a proper
631 	 * wakeref to make the state checker happy about the HW access during
632 	 * power well disabling.
633 	 */
634 	assert_rpm_raw_wakeref_held(rpm);
635 	wakeref = intel_runtime_pm_get(rpm);
636 
637 	for_each_power_domain(domain, mask) {
638 		/* Clear before put, so put's sanity check is happy. */
639 		async_put_domains_clear_domain(power_domains, domain);
640 		__intel_display_power_put_domain(dev_priv, domain);
641 	}
642 
643 	intel_runtime_pm_put(rpm, wakeref);
644 }
645 
646 static void
647 intel_display_power_put_async_work(struct work_struct *work)
648 {
649 	struct drm_i915_private *dev_priv =
650 		container_of(work, struct drm_i915_private,
651 			     display.power.domains.async_put_work.work);
652 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
653 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
654 	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
655 	intel_wakeref_t old_work_wakeref = 0;
656 
657 	mutex_lock(&power_domains->lock);
658 
659 	/*
660 	 * Bail out if all the domain refs pending to be released were grabbed
661 	 * by subsequent gets or a flush_work.
662 	 */
663 	old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
664 	if (!old_work_wakeref)
665 		goto out_verify;
666 
667 	release_async_put_domains(power_domains,
668 				  &power_domains->async_put_domains[0]);
669 
670 	/* Requeue the work if more domains were async put meanwhile. */
671 	if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
672 		bitmap_copy(power_domains->async_put_domains[0].bits,
673 			    power_domains->async_put_domains[1].bits,
674 			    POWER_DOMAIN_NUM);
675 		bitmap_zero(power_domains->async_put_domains[1].bits,
676 			    POWER_DOMAIN_NUM);
677 		queue_async_put_domains_work(power_domains,
678 					     fetch_and_zero(&new_work_wakeref));
679 	} else {
680 		/*
681 		 * Cancel the work that got queued after this one got dequeued,
682 		 * since here we released the corresponding async-put reference.
683 		 */
684 		cancel_delayed_work(&power_domains->async_put_work);
685 	}
686 
687 out_verify:
688 	verify_async_put_domains_state(power_domains);
689 
690 	mutex_unlock(&power_domains->lock);
691 
692 	if (old_work_wakeref)
693 		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
694 	if (new_work_wakeref)
695 		intel_runtime_pm_put_raw(rpm, new_work_wakeref);
696 }
697 
698 /**
699  * intel_display_power_put_async - release a power domain reference asynchronously
700  * @i915: i915 device instance
701  * @domain: power domain to reference
702  * @wakeref: wakeref acquired for the reference that is being released
703  *
704  * This function drops the power domain reference obtained by
705  * intel_display_power_get*() and schedules a work to power down the
706  * corresponding hardware block if this is the last reference.
707  */
708 void __intel_display_power_put_async(struct drm_i915_private *i915,
709 				     enum intel_display_power_domain domain,
710 				     intel_wakeref_t wakeref)
711 {
712 	struct i915_power_domains *power_domains = &i915->display.power.domains;
713 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
714 	intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
715 
716 	mutex_lock(&power_domains->lock);
717 
718 	if (power_domains->domain_use_count[domain] > 1) {
719 		__intel_display_power_put_domain(i915, domain);
720 
721 		goto out_verify;
722 	}
723 
724 	drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
725 
726 	/* Let a pending work requeue itself or queue a new one. */
727 	if (power_domains->async_put_wakeref) {
728 		set_bit(domain, power_domains->async_put_domains[1].bits);
729 	} else {
730 		set_bit(domain, power_domains->async_put_domains[0].bits);
731 		queue_async_put_domains_work(power_domains,
732 					     fetch_and_zero(&work_wakeref));
733 	}
734 
735 out_verify:
736 	verify_async_put_domains_state(power_domains);
737 
738 	mutex_unlock(&power_domains->lock);
739 
740 	if (work_wakeref)
741 		intel_runtime_pm_put_raw(rpm, work_wakeref);
742 
743 	intel_runtime_pm_put(rpm, wakeref);
744 }
745 
746 /**
747  * intel_display_power_flush_work - flushes the async display power disabling work
748  * @i915: i915 device instance
749  *
750  * Flushes any pending work that was scheduled by a preceding
751  * intel_display_power_put_async() call, completing the disabling of the
752  * corresponding power domains.
753  *
754  * Note that the work handler function may still be running after this
755  * function returns; to ensure that the work handler isn't running use
756  * intel_display_power_flush_work_sync() instead.
757  */
758 void intel_display_power_flush_work(struct drm_i915_private *i915)
759 {
760 	struct i915_power_domains *power_domains = &i915->display.power.domains;
761 	struct intel_power_domain_mask async_put_mask;
762 	intel_wakeref_t work_wakeref;
763 
764 	mutex_lock(&power_domains->lock);
765 
766 	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
767 	if (!work_wakeref)
768 		goto out_verify;
769 
770 	async_put_domains_mask(power_domains, &async_put_mask);
771 	release_async_put_domains(power_domains, &async_put_mask);
772 	cancel_delayed_work(&power_domains->async_put_work);
773 
774 out_verify:
775 	verify_async_put_domains_state(power_domains);
776 
777 	mutex_unlock(&power_domains->lock);
778 
779 	if (work_wakeref)
780 		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
781 }
782 
783 /**
784  * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
785  * @i915: i915 device instance
786  *
787  * Like intel_display_power_flush_work(), but also ensure that the work
788  * handler function is not running any more when this function returns.
789  */
790 static void
791 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
792 {
793 	struct i915_power_domains *power_domains = &i915->display.power.domains;
794 
795 	intel_display_power_flush_work(i915);
796 	cancel_delayed_work_sync(&power_domains->async_put_work);
797 
798 	verify_async_put_domains_state(power_domains);
799 
800 	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
801 }
802 
803 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
804 /**
805  * intel_display_power_put - release a power domain reference
806  * @dev_priv: i915 device instance
807  * @domain: power domain to reference
808  * @wakeref: wakeref acquired for the reference that is being released
809  *
810  * This function drops the power domain reference obtained by
811  * intel_display_power_get() and might power down the corresponding hardware
812  * block right away if this is the last reference.
813  */
814 void intel_display_power_put(struct drm_i915_private *dev_priv,
815 			     enum intel_display_power_domain domain,
816 			     intel_wakeref_t wakeref)
817 {
818 	__intel_display_power_put(dev_priv, domain);
819 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
820 }
821 #else
822 /**
823  * intel_display_power_put_unchecked - release an unchecked power domain reference
824  * @dev_priv: i915 device instance
825  * @domain: power domain to reference
826  *
827  * This function drops the power domain reference obtained by
828  * intel_display_power_get() and might power down the corresponding hardware
829  * block right away if this is the last reference.
830  *
831  * This function is only for the power domain code's internal use to suppress wakeref
832  * tracking when the correspondig debug kconfig option is disabled, should not
833  * be used otherwise.
834  */
835 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
836 				       enum intel_display_power_domain domain)
837 {
838 	__intel_display_power_put(dev_priv, domain);
839 	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
840 }
841 #endif
842 
843 void
844 intel_display_power_get_in_set(struct drm_i915_private *i915,
845 			       struct intel_display_power_domain_set *power_domain_set,
846 			       enum intel_display_power_domain domain)
847 {
848 	intel_wakeref_t __maybe_unused wf;
849 
850 	drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
851 
852 	wf = intel_display_power_get(i915, domain);
853 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
854 	power_domain_set->wakerefs[domain] = wf;
855 #endif
856 	set_bit(domain, power_domain_set->mask.bits);
857 }
858 
859 bool
860 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
861 					  struct intel_display_power_domain_set *power_domain_set,
862 					  enum intel_display_power_domain domain)
863 {
864 	intel_wakeref_t wf;
865 
866 	drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
867 
868 	wf = intel_display_power_get_if_enabled(i915, domain);
869 	if (!wf)
870 		return false;
871 
872 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
873 	power_domain_set->wakerefs[domain] = wf;
874 #endif
875 	set_bit(domain, power_domain_set->mask.bits);
876 
877 	return true;
878 }
879 
880 void
881 intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
882 				    struct intel_display_power_domain_set *power_domain_set,
883 				    struct intel_power_domain_mask *mask)
884 {
885 	enum intel_display_power_domain domain;
886 
887 	drm_WARN_ON(&i915->drm,
888 		    !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
889 
890 	for_each_power_domain(domain, mask) {
891 		intel_wakeref_t __maybe_unused wf = -1;
892 
893 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
894 		wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
895 #endif
896 		intel_display_power_put(i915, domain, wf);
897 		clear_bit(domain, power_domain_set->mask.bits);
898 	}
899 }
900 
901 static int
902 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
903 				   int disable_power_well)
904 {
905 	if (disable_power_well >= 0)
906 		return !!disable_power_well;
907 
908 	return 1;
909 }
910 
911 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
912 			       int enable_dc)
913 {
914 	u32 mask;
915 	int requested_dc;
916 	int max_dc;
917 
918 	if (!HAS_DISPLAY(dev_priv))
919 		return 0;
920 
921 	if (IS_DG2(dev_priv))
922 		max_dc = 1;
923 	else if (IS_DG1(dev_priv))
924 		max_dc = 3;
925 	else if (DISPLAY_VER(dev_priv) >= 12)
926 		max_dc = 4;
927 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
928 		max_dc = 1;
929 	else if (DISPLAY_VER(dev_priv) >= 9)
930 		max_dc = 2;
931 	else
932 		max_dc = 0;
933 
934 	/*
935 	 * DC9 has a separate HW flow from the rest of the DC states,
936 	 * not depending on the DMC firmware. It's needed by system
937 	 * suspend/resume, so allow it unconditionally.
938 	 */
939 	mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
940 		DISPLAY_VER(dev_priv) >= 11 ?
941 	       DC_STATE_EN_DC9 : 0;
942 
943 	if (!dev_priv->params.disable_power_well)
944 		max_dc = 0;
945 
946 	if (enable_dc >= 0 && enable_dc <= max_dc) {
947 		requested_dc = enable_dc;
948 	} else if (enable_dc == -1) {
949 		requested_dc = max_dc;
950 	} else if (enable_dc > max_dc && enable_dc <= 4) {
951 		drm_dbg_kms(&dev_priv->drm,
952 			    "Adjusting requested max DC state (%d->%d)\n",
953 			    enable_dc, max_dc);
954 		requested_dc = max_dc;
955 	} else {
956 		drm_err(&dev_priv->drm,
957 			"Unexpected value for enable_dc (%d)\n", enable_dc);
958 		requested_dc = max_dc;
959 	}
960 
961 	switch (requested_dc) {
962 	case 4:
963 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
964 		break;
965 	case 3:
966 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
967 		break;
968 	case 2:
969 		mask |= DC_STATE_EN_UPTO_DC6;
970 		break;
971 	case 1:
972 		mask |= DC_STATE_EN_UPTO_DC5;
973 		break;
974 	}
975 
976 	drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
977 
978 	return mask;
979 }
980 
981 /**
982  * intel_power_domains_init - initializes the power domain structures
983  * @dev_priv: i915 device instance
984  *
985  * Initializes the power domain structures for @dev_priv depending upon the
986  * supported platform.
987  */
988 int intel_power_domains_init(struct drm_i915_private *dev_priv)
989 {
990 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
991 
992 	dev_priv->params.disable_power_well =
993 		sanitize_disable_power_well_option(dev_priv,
994 						   dev_priv->params.disable_power_well);
995 	dev_priv->display.dmc.allowed_dc_mask =
996 		get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
997 
998 	dev_priv->display.dmc.target_dc_state =
999 		sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1000 
1001 	mutex_init(&power_domains->lock);
1002 
1003 	INIT_DELAYED_WORK(&power_domains->async_put_work,
1004 			  intel_display_power_put_async_work);
1005 
1006 	return intel_display_power_map_init(power_domains);
1007 }
1008 
1009 /**
1010  * intel_power_domains_cleanup - clean up power domains resources
1011  * @dev_priv: i915 device instance
1012  *
1013  * Release any resources acquired by intel_power_domains_init()
1014  */
1015 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
1016 {
1017 	intel_display_power_map_cleanup(&dev_priv->display.power.domains);
1018 }
1019 
1020 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
1021 {
1022 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1023 	struct i915_power_well *power_well;
1024 
1025 	mutex_lock(&power_domains->lock);
1026 	for_each_power_well(dev_priv, power_well)
1027 		intel_power_well_sync_hw(dev_priv, power_well);
1028 	mutex_unlock(&power_domains->lock);
1029 }
1030 
1031 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
1032 				enum dbuf_slice slice, bool enable)
1033 {
1034 	i915_reg_t reg = DBUF_CTL_S(slice);
1035 	bool state;
1036 
1037 	intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
1038 		     enable ? DBUF_POWER_REQUEST : 0);
1039 	intel_de_posting_read(dev_priv, reg);
1040 	udelay(10);
1041 
1042 	state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
1043 	drm_WARN(&dev_priv->drm, enable != state,
1044 		 "DBuf slice %d power %s timeout!\n",
1045 		 slice, str_enable_disable(enable));
1046 }
1047 
1048 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
1049 			     u8 req_slices)
1050 {
1051 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1052 	u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask;
1053 	enum dbuf_slice slice;
1054 
1055 	drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
1056 		 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
1057 		 req_slices, slice_mask);
1058 
1059 	drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
1060 		    req_slices);
1061 
1062 	/*
1063 	 * Might be running this in parallel to gen9_dc_off_power_well_enable
1064 	 * being called from intel_dp_detect for instance,
1065 	 * which causes assertion triggered by race condition,
1066 	 * as gen9_assert_dbuf_enabled might preempt this when registers
1067 	 * were already updated, while dev_priv was not.
1068 	 */
1069 	mutex_lock(&power_domains->lock);
1070 
1071 	for_each_dbuf_slice(dev_priv, slice)
1072 		gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
1073 
1074 	dev_priv->display.dbuf.enabled_slices = req_slices;
1075 
1076 	mutex_unlock(&power_domains->lock);
1077 }
1078 
1079 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
1080 {
1081 	dev_priv->display.dbuf.enabled_slices =
1082 		intel_enabled_dbuf_slices_mask(dev_priv);
1083 
1084 	/*
1085 	 * Just power up at least 1 slice, we will
1086 	 * figure out later which slices we have and what we need.
1087 	 */
1088 	gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
1089 				dev_priv->display.dbuf.enabled_slices);
1090 }
1091 
1092 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
1093 {
1094 	gen9_dbuf_slices_update(dev_priv, 0);
1095 }
1096 
1097 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
1098 {
1099 	enum dbuf_slice slice;
1100 
1101 	if (IS_ALDERLAKE_P(dev_priv))
1102 		return;
1103 
1104 	for_each_dbuf_slice(dev_priv, slice)
1105 		intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
1106 			     DBUF_TRACKER_STATE_SERVICE_MASK,
1107 			     DBUF_TRACKER_STATE_SERVICE(8));
1108 }
1109 
1110 static void icl_mbus_init(struct drm_i915_private *dev_priv)
1111 {
1112 	unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask;
1113 	u32 mask, val, i;
1114 
1115 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1116 		return;
1117 
1118 	mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
1119 		MBUS_ABOX_BT_CREDIT_POOL2_MASK |
1120 		MBUS_ABOX_B_CREDIT_MASK |
1121 		MBUS_ABOX_BW_CREDIT_MASK;
1122 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
1123 		MBUS_ABOX_BT_CREDIT_POOL2(16) |
1124 		MBUS_ABOX_B_CREDIT(1) |
1125 		MBUS_ABOX_BW_CREDIT(1);
1126 
1127 	/*
1128 	 * gen12 platforms that use abox1 and abox2 for pixel data reads still
1129 	 * expect us to program the abox_ctl0 register as well, even though
1130 	 * we don't have to program other instance-0 registers like BW_BUDDY.
1131 	 */
1132 	if (DISPLAY_VER(dev_priv) == 12)
1133 		abox_regs |= BIT(0);
1134 
1135 	for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
1136 		intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
1137 }
1138 
1139 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
1140 {
1141 	u32 val = intel_de_read(dev_priv, LCPLL_CTL);
1142 
1143 	/*
1144 	 * The LCPLL register should be turned on by the BIOS. For now
1145 	 * let's just check its state and print errors in case
1146 	 * something is wrong.  Don't even try to turn it on.
1147 	 */
1148 
1149 	if (val & LCPLL_CD_SOURCE_FCLK)
1150 		drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
1151 
1152 	if (val & LCPLL_PLL_DISABLE)
1153 		drm_err(&dev_priv->drm, "LCPLL is disabled\n");
1154 
1155 	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
1156 		drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
1157 }
1158 
1159 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
1160 {
1161 	struct intel_crtc *crtc;
1162 
1163 	for_each_intel_crtc(&dev_priv->drm, crtc)
1164 		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
1165 				pipe_name(crtc->pipe));
1166 
1167 	I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
1168 			"Display power well on\n");
1169 	I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
1170 			"SPLL enabled\n");
1171 	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
1172 			"WRPLL1 enabled\n");
1173 	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
1174 			"WRPLL2 enabled\n");
1175 	I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
1176 			"Panel power on\n");
1177 	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
1178 			"CPU PWM1 enabled\n");
1179 	if (IS_HASWELL(dev_priv))
1180 		I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
1181 				"CPU PWM2 enabled\n");
1182 	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
1183 			"PCH PWM1 enabled\n");
1184 	I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1185 			"Utility pin enabled\n");
1186 	I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
1187 			"PCH GTC enabled\n");
1188 
1189 	/*
1190 	 * In theory we can still leave IRQs enabled, as long as only the HPD
1191 	 * interrupts remain enabled. We used to check for that, but since it's
1192 	 * gen-specific and since we only disable LCPLL after we fully disable
1193 	 * the interrupts, the check below should be enough.
1194 	 */
1195 	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
1196 }
1197 
1198 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
1199 {
1200 	if (IS_HASWELL(dev_priv))
1201 		return intel_de_read(dev_priv, D_COMP_HSW);
1202 	else
1203 		return intel_de_read(dev_priv, D_COMP_BDW);
1204 }
1205 
1206 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
1207 {
1208 	if (IS_HASWELL(dev_priv)) {
1209 		if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
1210 			drm_dbg_kms(&dev_priv->drm,
1211 				    "Failed to write to D_COMP\n");
1212 	} else {
1213 		intel_de_write(dev_priv, D_COMP_BDW, val);
1214 		intel_de_posting_read(dev_priv, D_COMP_BDW);
1215 	}
1216 }
1217 
1218 /*
1219  * This function implements pieces of two sequences from BSpec:
1220  * - Sequence for display software to disable LCPLL
1221  * - Sequence for display software to allow package C8+
1222  * The steps implemented here are just the steps that actually touch the LCPLL
1223  * register. Callers should take care of disabling all the display engine
1224  * functions, doing the mode unset, fixing interrupts, etc.
1225  */
1226 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
1227 			      bool switch_to_fclk, bool allow_power_down)
1228 {
1229 	u32 val;
1230 
1231 	assert_can_disable_lcpll(dev_priv);
1232 
1233 	val = intel_de_read(dev_priv, LCPLL_CTL);
1234 
1235 	if (switch_to_fclk) {
1236 		val |= LCPLL_CD_SOURCE_FCLK;
1237 		intel_de_write(dev_priv, LCPLL_CTL, val);
1238 
1239 		if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
1240 				LCPLL_CD_SOURCE_FCLK_DONE, 1))
1241 			drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
1242 
1243 		val = intel_de_read(dev_priv, LCPLL_CTL);
1244 	}
1245 
1246 	val |= LCPLL_PLL_DISABLE;
1247 	intel_de_write(dev_priv, LCPLL_CTL, val);
1248 	intel_de_posting_read(dev_priv, LCPLL_CTL);
1249 
1250 	if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
1251 		drm_err(&dev_priv->drm, "LCPLL still locked\n");
1252 
1253 	val = hsw_read_dcomp(dev_priv);
1254 	val |= D_COMP_COMP_DISABLE;
1255 	hsw_write_dcomp(dev_priv, val);
1256 	ndelay(100);
1257 
1258 	if (wait_for((hsw_read_dcomp(dev_priv) &
1259 		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
1260 		drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
1261 
1262 	if (allow_power_down) {
1263 		val = intel_de_read(dev_priv, LCPLL_CTL);
1264 		val |= LCPLL_POWER_DOWN_ALLOW;
1265 		intel_de_write(dev_priv, LCPLL_CTL, val);
1266 		intel_de_posting_read(dev_priv, LCPLL_CTL);
1267 	}
1268 }
1269 
1270 /*
1271  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
1272  * source.
1273  */
1274 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
1275 {
1276 	u32 val;
1277 
1278 	val = intel_de_read(dev_priv, LCPLL_CTL);
1279 
1280 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
1281 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
1282 		return;
1283 
1284 	/*
1285 	 * Make sure we're not on PC8 state before disabling PC8, otherwise
1286 	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
1287 	 */
1288 	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1289 
1290 	if (val & LCPLL_POWER_DOWN_ALLOW) {
1291 		val &= ~LCPLL_POWER_DOWN_ALLOW;
1292 		intel_de_write(dev_priv, LCPLL_CTL, val);
1293 		intel_de_posting_read(dev_priv, LCPLL_CTL);
1294 	}
1295 
1296 	val = hsw_read_dcomp(dev_priv);
1297 	val |= D_COMP_COMP_FORCE;
1298 	val &= ~D_COMP_COMP_DISABLE;
1299 	hsw_write_dcomp(dev_priv, val);
1300 
1301 	val = intel_de_read(dev_priv, LCPLL_CTL);
1302 	val &= ~LCPLL_PLL_DISABLE;
1303 	intel_de_write(dev_priv, LCPLL_CTL, val);
1304 
1305 	if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
1306 		drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
1307 
1308 	if (val & LCPLL_CD_SOURCE_FCLK) {
1309 		val = intel_de_read(dev_priv, LCPLL_CTL);
1310 		val &= ~LCPLL_CD_SOURCE_FCLK;
1311 		intel_de_write(dev_priv, LCPLL_CTL, val);
1312 
1313 		if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
1314 				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
1315 			drm_err(&dev_priv->drm,
1316 				"Switching back to LCPLL failed\n");
1317 	}
1318 
1319 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1320 
1321 	intel_update_cdclk(dev_priv);
1322 	intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
1323 }
1324 
1325 /*
1326  * Package states C8 and deeper are really deep PC states that can only be
1327  * reached when all the devices on the system allow it, so even if the graphics
1328  * device allows PC8+, it doesn't mean the system will actually get to these
1329  * states. Our driver only allows PC8+ when going into runtime PM.
1330  *
1331  * The requirements for PC8+ are that all the outputs are disabled, the power
1332  * well is disabled and most interrupts are disabled, and these are also
1333  * requirements for runtime PM. When these conditions are met, we manually do
1334  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
1335  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
1336  * hang the machine.
1337  *
1338  * When we really reach PC8 or deeper states (not just when we allow it) we lose
1339  * the state of some registers, so when we come back from PC8+ we need to
1340  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1341  * need to take care of the registers kept by RC6. Notice that this happens even
1342  * if we don't put the device in PCI D3 state (which is what currently happens
1343  * because of the runtime PM support).
1344  *
1345  * For more, read "Display Sequences for Package C8" on the hardware
1346  * documentation.
1347  */
1348 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
1349 {
1350 	u32 val;
1351 
1352 	drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
1353 
1354 	if (HAS_PCH_LPT_LP(dev_priv)) {
1355 		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
1356 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
1357 		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
1358 	}
1359 
1360 	lpt_disable_clkout_dp(dev_priv);
1361 	hsw_disable_lcpll(dev_priv, true, true);
1362 }
1363 
1364 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
1365 {
1366 	u32 val;
1367 
1368 	drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
1369 
1370 	hsw_restore_lcpll(dev_priv);
1371 	intel_init_pch_refclk(dev_priv);
1372 
1373 	if (HAS_PCH_LPT_LP(dev_priv)) {
1374 		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
1375 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
1376 		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
1377 	}
1378 }
1379 
1380 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
1381 				      bool enable)
1382 {
1383 	i915_reg_t reg;
1384 	u32 reset_bits, val;
1385 
1386 	if (IS_IVYBRIDGE(dev_priv)) {
1387 		reg = GEN7_MSG_CTL;
1388 		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
1389 	} else {
1390 		reg = HSW_NDE_RSTWRN_OPT;
1391 		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
1392 	}
1393 
1394 	if (DISPLAY_VER(dev_priv) >= 14)
1395 		reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
1396 
1397 	val = intel_de_read(dev_priv, reg);
1398 
1399 	if (enable)
1400 		val |= reset_bits;
1401 	else
1402 		val &= ~reset_bits;
1403 
1404 	intel_de_write(dev_priv, reg, val);
1405 }
1406 
1407 static void skl_display_core_init(struct drm_i915_private *dev_priv,
1408 				  bool resume)
1409 {
1410 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1411 	struct i915_power_well *well;
1412 
1413 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1414 
1415 	/* enable PCH reset handshake */
1416 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
1417 
1418 	if (!HAS_DISPLAY(dev_priv))
1419 		return;
1420 
1421 	/* enable PG1 and Misc I/O */
1422 	mutex_lock(&power_domains->lock);
1423 
1424 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1425 	intel_power_well_enable(dev_priv, well);
1426 
1427 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1428 	intel_power_well_enable(dev_priv, well);
1429 
1430 	mutex_unlock(&power_domains->lock);
1431 
1432 	intel_cdclk_init_hw(dev_priv);
1433 
1434 	gen9_dbuf_enable(dev_priv);
1435 
1436 	if (resume)
1437 		intel_dmc_load_program(dev_priv);
1438 }
1439 
1440 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
1441 {
1442 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1443 	struct i915_power_well *well;
1444 
1445 	if (!HAS_DISPLAY(dev_priv))
1446 		return;
1447 
1448 	gen9_disable_dc_states(dev_priv);
1449 	/* TODO: disable DMC program */
1450 
1451 	gen9_dbuf_disable(dev_priv);
1452 
1453 	intel_cdclk_uninit_hw(dev_priv);
1454 
1455 	/* The spec doesn't call for removing the reset handshake flag */
1456 	/* disable PG1 and Misc I/O */
1457 
1458 	mutex_lock(&power_domains->lock);
1459 
1460 	/*
1461 	 * BSpec says to keep the MISC IO power well enabled here, only
1462 	 * remove our request for power well 1.
1463 	 * Note that even though the driver's request is removed power well 1
1464 	 * may stay enabled after this due to DMC's own request on it.
1465 	 */
1466 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1467 	intel_power_well_disable(dev_priv, well);
1468 
1469 	mutex_unlock(&power_domains->lock);
1470 
1471 	usleep_range(10, 30);		/* 10 us delay per Bspec */
1472 }
1473 
1474 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
1475 {
1476 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1477 	struct i915_power_well *well;
1478 
1479 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1480 
1481 	/*
1482 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
1483 	 * or else the reset will hang because there is no PCH to respond.
1484 	 * Move the handshake programming to initialization sequence.
1485 	 * Previously was left up to BIOS.
1486 	 */
1487 	intel_pch_reset_handshake(dev_priv, false);
1488 
1489 	if (!HAS_DISPLAY(dev_priv))
1490 		return;
1491 
1492 	/* Enable PG1 */
1493 	mutex_lock(&power_domains->lock);
1494 
1495 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1496 	intel_power_well_enable(dev_priv, well);
1497 
1498 	mutex_unlock(&power_domains->lock);
1499 
1500 	intel_cdclk_init_hw(dev_priv);
1501 
1502 	gen9_dbuf_enable(dev_priv);
1503 
1504 	if (resume)
1505 		intel_dmc_load_program(dev_priv);
1506 }
1507 
1508 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
1509 {
1510 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1511 	struct i915_power_well *well;
1512 
1513 	if (!HAS_DISPLAY(dev_priv))
1514 		return;
1515 
1516 	gen9_disable_dc_states(dev_priv);
1517 	/* TODO: disable DMC program */
1518 
1519 	gen9_dbuf_disable(dev_priv);
1520 
1521 	intel_cdclk_uninit_hw(dev_priv);
1522 
1523 	/* The spec doesn't call for removing the reset handshake flag */
1524 
1525 	/*
1526 	 * Disable PW1 (PG1).
1527 	 * Note that even though the driver's request is removed power well 1
1528 	 * may stay enabled after this due to DMC's own request on it.
1529 	 */
1530 	mutex_lock(&power_domains->lock);
1531 
1532 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1533 	intel_power_well_disable(dev_priv, well);
1534 
1535 	mutex_unlock(&power_domains->lock);
1536 
1537 	usleep_range(10, 30);		/* 10 us delay per Bspec */
1538 }
1539 
1540 struct buddy_page_mask {
1541 	u32 page_mask;
1542 	u8 type;
1543 	u8 num_channels;
1544 };
1545 
1546 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
1547 	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
1548 	{ .num_channels = 1, .type = INTEL_DRAM_DDR5,	.page_mask = 0xF },
1549 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
1550 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
1551 	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
1552 	{ .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1E },
1553 	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
1554 	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
1555 	{}
1556 };
1557 
1558 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
1559 	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
1560 	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
1561 	{ .num_channels = 1, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1 },
1562 	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
1563 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
1564 	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
1565 	{ .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x3 },
1566 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
1567 	{}
1568 };
1569 
1570 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
1571 {
1572 	enum intel_dram_type type = dev_priv->dram_info.type;
1573 	u8 num_channels = dev_priv->dram_info.num_channels;
1574 	const struct buddy_page_mask *table;
1575 	unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask;
1576 	int config, i;
1577 
1578 	/* BW_BUDDY registers are not used on dgpu's beyond DG1 */
1579 	if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
1580 		return;
1581 
1582 	if (IS_ALDERLAKE_S(dev_priv) ||
1583 	    IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1584 	    IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1585 	    IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
1586 		/* Wa_1409767108:tgl,dg1,adl-s */
1587 		table = wa_1409767108_buddy_page_masks;
1588 	else
1589 		table = tgl_buddy_page_masks;
1590 
1591 	for (config = 0; table[config].page_mask != 0; config++)
1592 		if (table[config].num_channels == num_channels &&
1593 		    table[config].type == type)
1594 			break;
1595 
1596 	if (table[config].page_mask == 0) {
1597 		drm_dbg(&dev_priv->drm,
1598 			"Unknown memory configuration; disabling address buddy logic.\n");
1599 		for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
1600 			intel_de_write(dev_priv, BW_BUDDY_CTL(i),
1601 				       BW_BUDDY_DISABLE);
1602 	} else {
1603 		for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
1604 			intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
1605 				       table[config].page_mask);
1606 
1607 			/* Wa_22010178259:tgl,dg1,rkl,adl-s */
1608 			if (DISPLAY_VER(dev_priv) == 12)
1609 				intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
1610 					     BW_BUDDY_TLB_REQ_TIMER_MASK,
1611 					     BW_BUDDY_TLB_REQ_TIMER(0x8));
1612 		}
1613 	}
1614 }
1615 
1616 static void icl_display_core_init(struct drm_i915_private *dev_priv,
1617 				  bool resume)
1618 {
1619 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1620 	struct i915_power_well *well;
1621 	u32 val;
1622 
1623 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1624 
1625 	/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
1626 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
1627 	    INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
1628 		intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
1629 			     PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
1630 
1631 	/* 1. Enable PCH reset handshake. */
1632 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
1633 
1634 	if (!HAS_DISPLAY(dev_priv))
1635 		return;
1636 
1637 	/* 2. Initialize all combo phys */
1638 	intel_combo_phy_init(dev_priv);
1639 
1640 	/*
1641 	 * 3. Enable Power Well 1 (PG1).
1642 	 *    The AUX IO power wells will be enabled on demand.
1643 	 */
1644 	mutex_lock(&power_domains->lock);
1645 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1646 	intel_power_well_enable(dev_priv, well);
1647 	mutex_unlock(&power_domains->lock);
1648 
1649 	/* 4. Enable CDCLK. */
1650 	intel_cdclk_init_hw(dev_priv);
1651 
1652 	if (DISPLAY_VER(dev_priv) >= 12)
1653 		gen12_dbuf_slices_config(dev_priv);
1654 
1655 	/* 5. Enable DBUF. */
1656 	gen9_dbuf_enable(dev_priv);
1657 
1658 	/* 6. Setup MBUS. */
1659 	icl_mbus_init(dev_priv);
1660 
1661 	/* 7. Program arbiter BW_BUDDY registers */
1662 	if (DISPLAY_VER(dev_priv) >= 12)
1663 		tgl_bw_buddy_init(dev_priv);
1664 
1665 	/* 8. Ensure PHYs have completed calibration and adaptation */
1666 	if (IS_DG2(dev_priv))
1667 		intel_snps_phy_wait_for_calibration(dev_priv);
1668 
1669 	if (resume)
1670 		intel_dmc_load_program(dev_priv);
1671 
1672 	/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
1673 	if (DISPLAY_VER(dev_priv) >= 12) {
1674 		val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
1675 		      DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
1676 		intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, val);
1677 	}
1678 
1679 	/* Wa_14011503030:xelpd */
1680 	if (DISPLAY_VER(dev_priv) >= 13)
1681 		intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
1682 }
1683 
1684 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
1685 {
1686 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1687 	struct i915_power_well *well;
1688 
1689 	if (!HAS_DISPLAY(dev_priv))
1690 		return;
1691 
1692 	gen9_disable_dc_states(dev_priv);
1693 	intel_dmc_disable_program(dev_priv);
1694 
1695 	/* 1. Disable all display engine functions -> aready done */
1696 
1697 	/* 2. Disable DBUF */
1698 	gen9_dbuf_disable(dev_priv);
1699 
1700 	/* 3. Disable CD clock */
1701 	intel_cdclk_uninit_hw(dev_priv);
1702 
1703 	/*
1704 	 * 4. Disable Power Well 1 (PG1).
1705 	 *    The AUX IO power wells are toggled on demand, so they are already
1706 	 *    disabled at this point.
1707 	 */
1708 	mutex_lock(&power_domains->lock);
1709 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1710 	intel_power_well_disable(dev_priv, well);
1711 	mutex_unlock(&power_domains->lock);
1712 
1713 	/* 5. */
1714 	intel_combo_phy_uninit(dev_priv);
1715 }
1716 
1717 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1718 {
1719 	struct i915_power_well *cmn_bc =
1720 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1721 	struct i915_power_well *cmn_d =
1722 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1723 
1724 	/*
1725 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1726 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
1727 	 * instead maintain a shadow copy ourselves. Use the actual
1728 	 * power well state and lane status to reconstruct the
1729 	 * expected initial value.
1730 	 */
1731 	dev_priv->display.power.chv_phy_control =
1732 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1733 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1734 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
1735 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
1736 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
1737 
1738 	/*
1739 	 * If all lanes are disabled we leave the override disabled
1740 	 * with all power down bits cleared to match the state we
1741 	 * would use after disabling the port. Otherwise enable the
1742 	 * override and set the lane powerdown bits accding to the
1743 	 * current lane status.
1744 	 */
1745 	if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
1746 		u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
1747 		unsigned int mask;
1748 
1749 		mask = status & DPLL_PORTB_READY_MASK;
1750 		if (mask == 0xf)
1751 			mask = 0x0;
1752 		else
1753 			dev_priv->display.power.chv_phy_control |=
1754 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
1755 
1756 		dev_priv->display.power.chv_phy_control |=
1757 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
1758 
1759 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
1760 		if (mask == 0xf)
1761 			mask = 0x0;
1762 		else
1763 			dev_priv->display.power.chv_phy_control |=
1764 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
1765 
1766 		dev_priv->display.power.chv_phy_control |=
1767 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
1768 
1769 		dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
1770 
1771 		dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false;
1772 	} else {
1773 		dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true;
1774 	}
1775 
1776 	if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
1777 		u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
1778 		unsigned int mask;
1779 
1780 		mask = status & DPLL_PORTD_READY_MASK;
1781 
1782 		if (mask == 0xf)
1783 			mask = 0x0;
1784 		else
1785 			dev_priv->display.power.chv_phy_control |=
1786 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
1787 
1788 		dev_priv->display.power.chv_phy_control |=
1789 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
1790 
1791 		dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
1792 
1793 		dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false;
1794 	} else {
1795 		dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true;
1796 	}
1797 
1798 	drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
1799 		    dev_priv->display.power.chv_phy_control);
1800 
1801 	/* Defer application of initial phy_control to enabling the powerwell */
1802 }
1803 
1804 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1805 {
1806 	struct i915_power_well *cmn =
1807 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1808 	struct i915_power_well *disp2d =
1809 		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
1810 
1811 	/* If the display might be already active skip this */
1812 	if (intel_power_well_is_enabled(dev_priv, cmn) &&
1813 	    intel_power_well_is_enabled(dev_priv, disp2d) &&
1814 	    intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
1815 		return;
1816 
1817 	drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
1818 
1819 	/* cmnlane needs DPLL registers */
1820 	intel_power_well_enable(dev_priv, disp2d);
1821 
1822 	/*
1823 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1824 	 * Need to assert and de-assert PHY SB reset by gating the
1825 	 * common lane power, then un-gating it.
1826 	 * Simply ungating isn't enough to reset the PHY enough to get
1827 	 * ports and lanes running.
1828 	 */
1829 	intel_power_well_disable(dev_priv, cmn);
1830 }
1831 
1832 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
1833 {
1834 	bool ret;
1835 
1836 	vlv_punit_get(dev_priv);
1837 	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
1838 	vlv_punit_put(dev_priv);
1839 
1840 	return ret;
1841 }
1842 
1843 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
1844 {
1845 	drm_WARN(&dev_priv->drm,
1846 		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
1847 		 "VED not power gated\n");
1848 }
1849 
1850 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
1851 {
1852 	static const struct pci_device_id isp_ids[] = {
1853 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
1854 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
1855 		{}
1856 	};
1857 
1858 	drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
1859 		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
1860 		 "ISP not power gated\n");
1861 }
1862 
1863 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
1864 
1865 /**
1866  * intel_power_domains_init_hw - initialize hardware power domain state
1867  * @i915: i915 device instance
1868  * @resume: Called from resume code paths or not
1869  *
1870  * This function initializes the hardware power domain state and enables all
1871  * power wells belonging to the INIT power domain. Power wells in other
1872  * domains (and not in the INIT domain) are referenced or disabled by
1873  * intel_modeset_readout_hw_state(). After that the reference count of each
1874  * power well must match its HW enabled state, see
1875  * intel_power_domains_verify_state().
1876  *
1877  * It will return with power domains disabled (to be enabled later by
1878  * intel_power_domains_enable()) and must be paired with
1879  * intel_power_domains_driver_remove().
1880  */
1881 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
1882 {
1883 	struct i915_power_domains *power_domains = &i915->display.power.domains;
1884 
1885 	power_domains->initializing = true;
1886 
1887 	if (DISPLAY_VER(i915) >= 11) {
1888 		icl_display_core_init(i915, resume);
1889 	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
1890 		bxt_display_core_init(i915, resume);
1891 	} else if (DISPLAY_VER(i915) == 9) {
1892 		skl_display_core_init(i915, resume);
1893 	} else if (IS_CHERRYVIEW(i915)) {
1894 		mutex_lock(&power_domains->lock);
1895 		chv_phy_control_init(i915);
1896 		mutex_unlock(&power_domains->lock);
1897 		assert_isp_power_gated(i915);
1898 	} else if (IS_VALLEYVIEW(i915)) {
1899 		mutex_lock(&power_domains->lock);
1900 		vlv_cmnlane_wa(i915);
1901 		mutex_unlock(&power_domains->lock);
1902 		assert_ved_power_gated(i915);
1903 		assert_isp_power_gated(i915);
1904 	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
1905 		hsw_assert_cdclk(i915);
1906 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
1907 	} else if (IS_IVYBRIDGE(i915)) {
1908 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
1909 	}
1910 
1911 	/*
1912 	 * Keep all power wells enabled for any dependent HW access during
1913 	 * initialization and to make sure we keep BIOS enabled display HW
1914 	 * resources powered until display HW readout is complete. We drop
1915 	 * this reference in intel_power_domains_enable().
1916 	 */
1917 	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
1918 	power_domains->init_wakeref =
1919 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
1920 
1921 	/* Disable power support if the user asked so. */
1922 	if (!i915->params.disable_power_well) {
1923 		drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
1924 		i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
1925 										      POWER_DOMAIN_INIT);
1926 	}
1927 	intel_power_domains_sync_hw(i915);
1928 
1929 	power_domains->initializing = false;
1930 }
1931 
1932 /**
1933  * intel_power_domains_driver_remove - deinitialize hw power domain state
1934  * @i915: i915 device instance
1935  *
1936  * De-initializes the display power domain HW state. It also ensures that the
1937  * device stays powered up so that the driver can be reloaded.
1938  *
1939  * It must be called with power domains already disabled (after a call to
1940  * intel_power_domains_disable()) and must be paired with
1941  * intel_power_domains_init_hw().
1942  */
1943 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
1944 {
1945 	intel_wakeref_t wakeref __maybe_unused =
1946 		fetch_and_zero(&i915->display.power.domains.init_wakeref);
1947 
1948 	/* Remove the refcount we took to keep power well support disabled. */
1949 	if (!i915->params.disable_power_well)
1950 		intel_display_power_put(i915, POWER_DOMAIN_INIT,
1951 					fetch_and_zero(&i915->display.power.domains.disable_wakeref));
1952 
1953 	intel_display_power_flush_work_sync(i915);
1954 
1955 	intel_power_domains_verify_state(i915);
1956 
1957 	/* Keep the power well enabled, but cancel its rpm wakeref. */
1958 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1959 }
1960 
1961 /**
1962  * intel_power_domains_sanitize_state - sanitize power domains state
1963  * @i915: i915 device instance
1964  *
1965  * Sanitize the power domains state during driver loading and system resume.
1966  * The function will disable all display power wells that BIOS has enabled
1967  * without a user for it (any user for a power well has taken a reference
1968  * on it by the time this function is called, after the state of all the
1969  * pipe, encoder, etc. HW resources have been sanitized).
1970  */
1971 void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
1972 {
1973 	struct i915_power_domains *power_domains = &i915->display.power.domains;
1974 	struct i915_power_well *power_well;
1975 
1976 	mutex_lock(&power_domains->lock);
1977 
1978 	for_each_power_well_reverse(i915, power_well) {
1979 		if (power_well->desc->always_on || power_well->count ||
1980 		    !intel_power_well_is_enabled(i915, power_well))
1981 			continue;
1982 
1983 		drm_dbg_kms(&i915->drm,
1984 			    "BIOS left unused %s power well enabled, disabling it\n",
1985 			    intel_power_well_name(power_well));
1986 		intel_power_well_disable(i915, power_well);
1987 	}
1988 
1989 	mutex_unlock(&power_domains->lock);
1990 }
1991 
1992 /**
1993  * intel_power_domains_enable - enable toggling of display power wells
1994  * @i915: i915 device instance
1995  *
1996  * Enable the ondemand enabling/disabling of the display power wells. Note that
1997  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
1998  * only at specific points of the display modeset sequence, thus they are not
1999  * affected by the intel_power_domains_enable()/disable() calls. The purpose
2000  * of these function is to keep the rest of power wells enabled until the end
2001  * of display HW readout (which will acquire the power references reflecting
2002  * the current HW state).
2003  */
2004 void intel_power_domains_enable(struct drm_i915_private *i915)
2005 {
2006 	intel_wakeref_t wakeref __maybe_unused =
2007 		fetch_and_zero(&i915->display.power.domains.init_wakeref);
2008 
2009 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
2010 	intel_power_domains_verify_state(i915);
2011 }
2012 
2013 /**
2014  * intel_power_domains_disable - disable toggling of display power wells
2015  * @i915: i915 device instance
2016  *
2017  * Disable the ondemand enabling/disabling of the display power wells. See
2018  * intel_power_domains_enable() for which power wells this call controls.
2019  */
2020 void intel_power_domains_disable(struct drm_i915_private *i915)
2021 {
2022 	struct i915_power_domains *power_domains = &i915->display.power.domains;
2023 
2024 	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
2025 	power_domains->init_wakeref =
2026 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
2027 
2028 	intel_power_domains_verify_state(i915);
2029 }
2030 
2031 /**
2032  * intel_power_domains_suspend - suspend power domain state
2033  * @i915: i915 device instance
2034  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
2035  *
2036  * This function prepares the hardware power domain state before entering
2037  * system suspend.
2038  *
2039  * It must be called with power domains already disabled (after a call to
2040  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
2041  */
2042 void intel_power_domains_suspend(struct drm_i915_private *i915,
2043 				 enum i915_drm_suspend_mode suspend_mode)
2044 {
2045 	struct i915_power_domains *power_domains = &i915->display.power.domains;
2046 	intel_wakeref_t wakeref __maybe_unused =
2047 		fetch_and_zero(&power_domains->init_wakeref);
2048 
2049 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
2050 
2051 	/*
2052 	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
2053 	 * support don't manually deinit the power domains. This also means the
2054 	 * DMC firmware will stay active, it will power down any HW
2055 	 * resources as required and also enable deeper system power states
2056 	 * that would be blocked if the firmware was inactive.
2057 	 */
2058 	if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
2059 	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
2060 	    intel_dmc_has_payload(i915)) {
2061 		intel_display_power_flush_work(i915);
2062 		intel_power_domains_verify_state(i915);
2063 		return;
2064 	}
2065 
2066 	/*
2067 	 * Even if power well support was disabled we still want to disable
2068 	 * power wells if power domains must be deinitialized for suspend.
2069 	 */
2070 	if (!i915->params.disable_power_well)
2071 		intel_display_power_put(i915, POWER_DOMAIN_INIT,
2072 					fetch_and_zero(&i915->display.power.domains.disable_wakeref));
2073 
2074 	intel_display_power_flush_work(i915);
2075 	intel_power_domains_verify_state(i915);
2076 
2077 	if (DISPLAY_VER(i915) >= 11)
2078 		icl_display_core_uninit(i915);
2079 	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
2080 		bxt_display_core_uninit(i915);
2081 	else if (DISPLAY_VER(i915) == 9)
2082 		skl_display_core_uninit(i915);
2083 
2084 	power_domains->display_core_suspended = true;
2085 }
2086 
2087 /**
2088  * intel_power_domains_resume - resume power domain state
2089  * @i915: i915 device instance
2090  *
2091  * This function resume the hardware power domain state during system resume.
2092  *
2093  * It will return with power domain support disabled (to be enabled later by
2094  * intel_power_domains_enable()) and must be paired with
2095  * intel_power_domains_suspend().
2096  */
2097 void intel_power_domains_resume(struct drm_i915_private *i915)
2098 {
2099 	struct i915_power_domains *power_domains = &i915->display.power.domains;
2100 
2101 	if (power_domains->display_core_suspended) {
2102 		intel_power_domains_init_hw(i915, true);
2103 		power_domains->display_core_suspended = false;
2104 	} else {
2105 		drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
2106 		power_domains->init_wakeref =
2107 			intel_display_power_get(i915, POWER_DOMAIN_INIT);
2108 	}
2109 
2110 	intel_power_domains_verify_state(i915);
2111 }
2112 
2113 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2114 
2115 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
2116 {
2117 	struct i915_power_domains *power_domains = &i915->display.power.domains;
2118 	struct i915_power_well *power_well;
2119 
2120 	for_each_power_well(i915, power_well) {
2121 		enum intel_display_power_domain domain;
2122 
2123 		drm_dbg(&i915->drm, "%-25s %d\n",
2124 			intel_power_well_name(power_well), intel_power_well_refcount(power_well));
2125 
2126 		for_each_power_domain(domain, intel_power_well_domains(power_well))
2127 			drm_dbg(&i915->drm, "  %-23s %d\n",
2128 				intel_display_power_domain_str(domain),
2129 				power_domains->domain_use_count[domain]);
2130 	}
2131 }
2132 
2133 /**
2134  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
2135  * @i915: i915 device instance
2136  *
2137  * Verify if the reference count of each power well matches its HW enabled
2138  * state and the total refcount of the domains it belongs to. This must be
2139  * called after modeset HW state sanitization, which is responsible for
2140  * acquiring reference counts for any power wells in use and disabling the
2141  * ones left on by BIOS but not required by any active output.
2142  */
2143 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
2144 {
2145 	struct i915_power_domains *power_domains = &i915->display.power.domains;
2146 	struct i915_power_well *power_well;
2147 	bool dump_domain_info;
2148 
2149 	mutex_lock(&power_domains->lock);
2150 
2151 	verify_async_put_domains_state(power_domains);
2152 
2153 	dump_domain_info = false;
2154 	for_each_power_well(i915, power_well) {
2155 		enum intel_display_power_domain domain;
2156 		int domains_count;
2157 		bool enabled;
2158 
2159 		enabled = intel_power_well_is_enabled(i915, power_well);
2160 		if ((intel_power_well_refcount(power_well) ||
2161 		     intel_power_well_is_always_on(power_well)) !=
2162 		    enabled)
2163 			drm_err(&i915->drm,
2164 				"power well %s state mismatch (refcount %d/enabled %d)",
2165 				intel_power_well_name(power_well),
2166 				intel_power_well_refcount(power_well), enabled);
2167 
2168 		domains_count = 0;
2169 		for_each_power_domain(domain, intel_power_well_domains(power_well))
2170 			domains_count += power_domains->domain_use_count[domain];
2171 
2172 		if (intel_power_well_refcount(power_well) != domains_count) {
2173 			drm_err(&i915->drm,
2174 				"power well %s refcount/domain refcount mismatch "
2175 				"(refcount %d/domains refcount %d)\n",
2176 				intel_power_well_name(power_well),
2177 				intel_power_well_refcount(power_well),
2178 				domains_count);
2179 			dump_domain_info = true;
2180 		}
2181 	}
2182 
2183 	if (dump_domain_info) {
2184 		static bool dumped;
2185 
2186 		if (!dumped) {
2187 			intel_power_domains_dump_info(i915);
2188 			dumped = true;
2189 		}
2190 	}
2191 
2192 	mutex_unlock(&power_domains->lock);
2193 }
2194 
2195 #else
2196 
2197 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
2198 {
2199 }
2200 
2201 #endif
2202 
2203 void intel_display_power_suspend_late(struct drm_i915_private *i915)
2204 {
2205 	if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
2206 	    IS_BROXTON(i915)) {
2207 		bxt_enable_dc9(i915);
2208 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2209 		hsw_enable_pc8(i915);
2210 	}
2211 
2212 	/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
2213 	if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
2214 		intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
2215 }
2216 
2217 void intel_display_power_resume_early(struct drm_i915_private *i915)
2218 {
2219 	if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
2220 	    IS_BROXTON(i915)) {
2221 		gen9_sanitize_dc_state(i915);
2222 		bxt_disable_dc9(i915);
2223 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2224 		hsw_disable_pc8(i915);
2225 	}
2226 
2227 	/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
2228 	if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
2229 		intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
2230 }
2231 
2232 void intel_display_power_suspend(struct drm_i915_private *i915)
2233 {
2234 	if (DISPLAY_VER(i915) >= 11) {
2235 		icl_display_core_uninit(i915);
2236 		bxt_enable_dc9(i915);
2237 	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
2238 		bxt_display_core_uninit(i915);
2239 		bxt_enable_dc9(i915);
2240 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2241 		hsw_enable_pc8(i915);
2242 	}
2243 }
2244 
2245 void intel_display_power_resume(struct drm_i915_private *i915)
2246 {
2247 	if (DISPLAY_VER(i915) >= 11) {
2248 		bxt_disable_dc9(i915);
2249 		icl_display_core_init(i915, true);
2250 		if (intel_dmc_has_payload(i915)) {
2251 			if (i915->display.dmc.allowed_dc_mask &
2252 			    DC_STATE_EN_UPTO_DC6)
2253 				skl_enable_dc6(i915);
2254 			else if (i915->display.dmc.allowed_dc_mask &
2255 				 DC_STATE_EN_UPTO_DC5)
2256 				gen9_enable_dc5(i915);
2257 		}
2258 	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
2259 		bxt_disable_dc9(i915);
2260 		bxt_display_core_init(i915, true);
2261 		if (intel_dmc_has_payload(i915) &&
2262 		    (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2263 			gen9_enable_dc5(i915);
2264 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2265 		hsw_disable_pc8(i915);
2266 	}
2267 }
2268 
2269 void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
2270 {
2271 	struct i915_power_domains *power_domains = &i915->display.power.domains;
2272 	int i;
2273 
2274 	mutex_lock(&power_domains->lock);
2275 
2276 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2277 	for (i = 0; i < power_domains->power_well_count; i++) {
2278 		struct i915_power_well *power_well;
2279 		enum intel_display_power_domain power_domain;
2280 
2281 		power_well = &power_domains->power_wells[i];
2282 		seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well),
2283 			   intel_power_well_refcount(power_well));
2284 
2285 		for_each_power_domain(power_domain, intel_power_well_domains(power_well))
2286 			seq_printf(m, "  %-23s %d\n",
2287 				   intel_display_power_domain_str(power_domain),
2288 				   power_domains->domain_use_count[power_domain]);
2289 	}
2290 
2291 	mutex_unlock(&power_domains->lock);
2292 }
2293 
2294 struct intel_ddi_port_domains {
2295 	enum port port_start;
2296 	enum port port_end;
2297 	enum aux_ch aux_ch_start;
2298 	enum aux_ch aux_ch_end;
2299 
2300 	enum intel_display_power_domain ddi_lanes;
2301 	enum intel_display_power_domain ddi_io;
2302 	enum intel_display_power_domain aux_io;
2303 	enum intel_display_power_domain aux_legacy_usbc;
2304 	enum intel_display_power_domain aux_tbt;
2305 };
2306 
2307 static const struct intel_ddi_port_domains
2308 i9xx_port_domains[] = {
2309 	{
2310 		.port_start = PORT_A,
2311 		.port_end = PORT_F,
2312 		.aux_ch_start = AUX_CH_A,
2313 		.aux_ch_end = AUX_CH_F,
2314 
2315 		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2316 		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2317 		.aux_io = POWER_DOMAIN_AUX_IO_A,
2318 		.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2319 		.aux_tbt = POWER_DOMAIN_INVALID,
2320 	},
2321 };
2322 
2323 static const struct intel_ddi_port_domains
2324 d11_port_domains[] = {
2325 	{
2326 		.port_start = PORT_A,
2327 		.port_end = PORT_B,
2328 		.aux_ch_start = AUX_CH_A,
2329 		.aux_ch_end = AUX_CH_B,
2330 
2331 		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2332 		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2333 		.aux_io = POWER_DOMAIN_AUX_IO_A,
2334 		.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2335 		.aux_tbt = POWER_DOMAIN_INVALID,
2336 	}, {
2337 		.port_start = PORT_C,
2338 		.port_end = PORT_F,
2339 		.aux_ch_start = AUX_CH_C,
2340 		.aux_ch_end = AUX_CH_F,
2341 
2342 		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C,
2343 		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_C,
2344 		.aux_io = POWER_DOMAIN_AUX_IO_C,
2345 		.aux_legacy_usbc = POWER_DOMAIN_AUX_C,
2346 		.aux_tbt = POWER_DOMAIN_AUX_TBT1,
2347 	},
2348 };
2349 
2350 static const struct intel_ddi_port_domains
2351 d12_port_domains[] = {
2352 	{
2353 		.port_start = PORT_A,
2354 		.port_end = PORT_C,
2355 		.aux_ch_start = AUX_CH_A,
2356 		.aux_ch_end = AUX_CH_C,
2357 
2358 		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2359 		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2360 		.aux_io = POWER_DOMAIN_AUX_IO_A,
2361 		.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2362 		.aux_tbt = POWER_DOMAIN_INVALID,
2363 	}, {
2364 		.port_start = PORT_TC1,
2365 		.port_end = PORT_TC6,
2366 		.aux_ch_start = AUX_CH_USBC1,
2367 		.aux_ch_end = AUX_CH_USBC6,
2368 
2369 		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2370 		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2371 		.aux_io = POWER_DOMAIN_INVALID,
2372 		.aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2373 		.aux_tbt = POWER_DOMAIN_AUX_TBT1,
2374 	},
2375 };
2376 
2377 static const struct intel_ddi_port_domains
2378 d13_port_domains[] = {
2379 	{
2380 		.port_start = PORT_A,
2381 		.port_end = PORT_C,
2382 		.aux_ch_start = AUX_CH_A,
2383 		.aux_ch_end = AUX_CH_C,
2384 
2385 		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2386 		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2387 		.aux_io = POWER_DOMAIN_AUX_IO_A,
2388 		.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2389 		.aux_tbt = POWER_DOMAIN_INVALID,
2390 	}, {
2391 		.port_start = PORT_TC1,
2392 		.port_end = PORT_TC4,
2393 		.aux_ch_start = AUX_CH_USBC1,
2394 		.aux_ch_end = AUX_CH_USBC4,
2395 
2396 		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2397 		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2398 		.aux_io = POWER_DOMAIN_INVALID,
2399 		.aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2400 		.aux_tbt = POWER_DOMAIN_AUX_TBT1,
2401 	}, {
2402 		.port_start = PORT_D_XELPD,
2403 		.port_end = PORT_E_XELPD,
2404 		.aux_ch_start = AUX_CH_D_XELPD,
2405 		.aux_ch_end = AUX_CH_E_XELPD,
2406 
2407 		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D,
2408 		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_D,
2409 		.aux_io = POWER_DOMAIN_AUX_IO_D,
2410 		.aux_legacy_usbc = POWER_DOMAIN_AUX_D,
2411 		.aux_tbt = POWER_DOMAIN_INVALID,
2412 	},
2413 };
2414 
2415 static void
2416 intel_port_domains_for_platform(struct drm_i915_private *i915,
2417 				const struct intel_ddi_port_domains **domains,
2418 				int *domains_size)
2419 {
2420 	if (DISPLAY_VER(i915) >= 13) {
2421 		*domains = d13_port_domains;
2422 		*domains_size = ARRAY_SIZE(d13_port_domains);
2423 	} else if (DISPLAY_VER(i915) >= 12) {
2424 		*domains = d12_port_domains;
2425 		*domains_size = ARRAY_SIZE(d12_port_domains);
2426 	} else if (DISPLAY_VER(i915) >= 11) {
2427 		*domains = d11_port_domains;
2428 		*domains_size = ARRAY_SIZE(d11_port_domains);
2429 	} else {
2430 		*domains = i9xx_port_domains;
2431 		*domains_size = ARRAY_SIZE(i9xx_port_domains);
2432 	}
2433 }
2434 
2435 static const struct intel_ddi_port_domains *
2436 intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
2437 {
2438 	const struct intel_ddi_port_domains *domains;
2439 	int domains_size;
2440 	int i;
2441 
2442 	intel_port_domains_for_platform(i915, &domains, &domains_size);
2443 	for (i = 0; i < domains_size; i++)
2444 		if (port >= domains[i].port_start && port <= domains[i].port_end)
2445 			return &domains[i];
2446 
2447 	return NULL;
2448 }
2449 
2450 enum intel_display_power_domain
2451 intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
2452 {
2453 	const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
2454 
2455 	if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
2456 		return POWER_DOMAIN_PORT_DDI_IO_A;
2457 
2458 	return domains->ddi_io + (int)(port - domains->port_start);
2459 }
2460 
2461 enum intel_display_power_domain
2462 intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
2463 {
2464 	const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
2465 
2466 	if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
2467 		return POWER_DOMAIN_PORT_DDI_LANES_A;
2468 
2469 	return domains->ddi_lanes + (int)(port - domains->port_start);
2470 }
2471 
2472 static const struct intel_ddi_port_domains *
2473 intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
2474 {
2475 	const struct intel_ddi_port_domains *domains;
2476 	int domains_size;
2477 	int i;
2478 
2479 	intel_port_domains_for_platform(i915, &domains, &domains_size);
2480 	for (i = 0; i < domains_size; i++)
2481 		if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
2482 			return &domains[i];
2483 
2484 	return NULL;
2485 }
2486 
2487 enum intel_display_power_domain
2488 intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2489 {
2490 	const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2491 
2492 	if (drm_WARN_ON(&i915->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
2493 		return POWER_DOMAIN_AUX_IO_A;
2494 
2495 	return domains->aux_io + (int)(aux_ch - domains->aux_ch_start);
2496 }
2497 
2498 enum intel_display_power_domain
2499 intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2500 {
2501 	const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2502 
2503 	if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
2504 		return POWER_DOMAIN_AUX_A;
2505 
2506 	return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
2507 }
2508 
2509 enum intel_display_power_domain
2510 intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2511 {
2512 	const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2513 
2514 	if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
2515 		return POWER_DOMAIN_AUX_TBT1;
2516 
2517 	return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
2518 }
2519