1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "display/intel_crt.h" 7 #include "display/intel_dp.h" 8 9 #include "i915_drv.h" 10 #include "i915_irq.h" 11 #include "intel_cdclk.h" 12 #include "intel_combo_phy.h" 13 #include "intel_csr.h" 14 #include "intel_display_power.h" 15 #include "intel_display_types.h" 16 #include "intel_dpio_phy.h" 17 #include "intel_hotplug.h" 18 #include "intel_sideband.h" 19 #include "intel_tc.h" 20 #include "intel_vga.h" 21 22 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 23 enum i915_power_well_id power_well_id); 24 25 const char * 26 intel_display_power_domain_str(enum intel_display_power_domain domain) 27 { 28 switch (domain) { 29 case POWER_DOMAIN_DISPLAY_CORE: 30 return "DISPLAY_CORE"; 31 case POWER_DOMAIN_PIPE_A: 32 return "PIPE_A"; 33 case POWER_DOMAIN_PIPE_B: 34 return "PIPE_B"; 35 case POWER_DOMAIN_PIPE_C: 36 return "PIPE_C"; 37 case POWER_DOMAIN_PIPE_D: 38 return "PIPE_D"; 39 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 40 return "PIPE_A_PANEL_FITTER"; 41 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 42 return "PIPE_B_PANEL_FITTER"; 43 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 44 return "PIPE_C_PANEL_FITTER"; 45 case POWER_DOMAIN_PIPE_D_PANEL_FITTER: 46 return "PIPE_D_PANEL_FITTER"; 47 case POWER_DOMAIN_TRANSCODER_A: 48 return "TRANSCODER_A"; 49 case POWER_DOMAIN_TRANSCODER_B: 50 return "TRANSCODER_B"; 51 case POWER_DOMAIN_TRANSCODER_C: 52 return "TRANSCODER_C"; 53 case POWER_DOMAIN_TRANSCODER_D: 54 return "TRANSCODER_D"; 55 case POWER_DOMAIN_TRANSCODER_EDP: 56 return "TRANSCODER_EDP"; 57 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 58 return "TRANSCODER_VDSC_PW2"; 59 case POWER_DOMAIN_TRANSCODER_DSI_A: 60 return "TRANSCODER_DSI_A"; 61 case POWER_DOMAIN_TRANSCODER_DSI_C: 62 return "TRANSCODER_DSI_C"; 63 case POWER_DOMAIN_PORT_DDI_A_LANES: 64 return "PORT_DDI_A_LANES"; 65 case POWER_DOMAIN_PORT_DDI_B_LANES: 66 return "PORT_DDI_B_LANES"; 67 case POWER_DOMAIN_PORT_DDI_C_LANES: 68 return "PORT_DDI_C_LANES"; 69 case POWER_DOMAIN_PORT_DDI_D_LANES: 70 return "PORT_DDI_D_LANES"; 71 case POWER_DOMAIN_PORT_DDI_E_LANES: 72 return "PORT_DDI_E_LANES"; 73 case POWER_DOMAIN_PORT_DDI_F_LANES: 74 return "PORT_DDI_F_LANES"; 75 case POWER_DOMAIN_PORT_DDI_G_LANES: 76 return "PORT_DDI_G_LANES"; 77 case POWER_DOMAIN_PORT_DDI_H_LANES: 78 return "PORT_DDI_H_LANES"; 79 case POWER_DOMAIN_PORT_DDI_I_LANES: 80 return "PORT_DDI_I_LANES"; 81 case POWER_DOMAIN_PORT_DDI_A_IO: 82 return "PORT_DDI_A_IO"; 83 case POWER_DOMAIN_PORT_DDI_B_IO: 84 return "PORT_DDI_B_IO"; 85 case POWER_DOMAIN_PORT_DDI_C_IO: 86 return "PORT_DDI_C_IO"; 87 case POWER_DOMAIN_PORT_DDI_D_IO: 88 return "PORT_DDI_D_IO"; 89 case POWER_DOMAIN_PORT_DDI_E_IO: 90 return "PORT_DDI_E_IO"; 91 case POWER_DOMAIN_PORT_DDI_F_IO: 92 return "PORT_DDI_F_IO"; 93 case POWER_DOMAIN_PORT_DDI_G_IO: 94 return "PORT_DDI_G_IO"; 95 case POWER_DOMAIN_PORT_DDI_H_IO: 96 return "PORT_DDI_H_IO"; 97 case POWER_DOMAIN_PORT_DDI_I_IO: 98 return "PORT_DDI_I_IO"; 99 case POWER_DOMAIN_PORT_DSI: 100 return "PORT_DSI"; 101 case POWER_DOMAIN_PORT_CRT: 102 return "PORT_CRT"; 103 case POWER_DOMAIN_PORT_OTHER: 104 return "PORT_OTHER"; 105 case POWER_DOMAIN_VGA: 106 return "VGA"; 107 case POWER_DOMAIN_AUDIO: 108 return "AUDIO"; 109 case POWER_DOMAIN_AUX_A: 110 return "AUX_A"; 111 case POWER_DOMAIN_AUX_B: 112 return "AUX_B"; 113 case POWER_DOMAIN_AUX_C: 114 return "AUX_C"; 115 case POWER_DOMAIN_AUX_D: 116 return "AUX_D"; 117 case POWER_DOMAIN_AUX_E: 118 return "AUX_E"; 119 case POWER_DOMAIN_AUX_F: 120 return "AUX_F"; 121 case POWER_DOMAIN_AUX_G: 122 return "AUX_G"; 123 case POWER_DOMAIN_AUX_H: 124 return "AUX_H"; 125 case POWER_DOMAIN_AUX_I: 126 return "AUX_I"; 127 case POWER_DOMAIN_AUX_IO_A: 128 return "AUX_IO_A"; 129 case POWER_DOMAIN_AUX_C_TBT: 130 return "AUX_C_TBT"; 131 case POWER_DOMAIN_AUX_D_TBT: 132 return "AUX_D_TBT"; 133 case POWER_DOMAIN_AUX_E_TBT: 134 return "AUX_E_TBT"; 135 case POWER_DOMAIN_AUX_F_TBT: 136 return "AUX_F_TBT"; 137 case POWER_DOMAIN_AUX_G_TBT: 138 return "AUX_G_TBT"; 139 case POWER_DOMAIN_AUX_H_TBT: 140 return "AUX_H_TBT"; 141 case POWER_DOMAIN_AUX_I_TBT: 142 return "AUX_I_TBT"; 143 case POWER_DOMAIN_GMBUS: 144 return "GMBUS"; 145 case POWER_DOMAIN_INIT: 146 return "INIT"; 147 case POWER_DOMAIN_MODESET: 148 return "MODESET"; 149 case POWER_DOMAIN_GT_IRQ: 150 return "GT_IRQ"; 151 case POWER_DOMAIN_DPLL_DC_OFF: 152 return "DPLL_DC_OFF"; 153 default: 154 MISSING_CASE(domain); 155 return "?"; 156 } 157 } 158 159 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 160 struct i915_power_well *power_well) 161 { 162 DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name); 163 power_well->desc->ops->enable(dev_priv, power_well); 164 power_well->hw_enabled = true; 165 } 166 167 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 168 struct i915_power_well *power_well) 169 { 170 DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name); 171 power_well->hw_enabled = false; 172 power_well->desc->ops->disable(dev_priv, power_well); 173 } 174 175 static void intel_power_well_get(struct drm_i915_private *dev_priv, 176 struct i915_power_well *power_well) 177 { 178 if (!power_well->count++) 179 intel_power_well_enable(dev_priv, power_well); 180 } 181 182 static void intel_power_well_put(struct drm_i915_private *dev_priv, 183 struct i915_power_well *power_well) 184 { 185 WARN(!power_well->count, "Use count on power well %s is already zero", 186 power_well->desc->name); 187 188 if (!--power_well->count) 189 intel_power_well_disable(dev_priv, power_well); 190 } 191 192 /** 193 * __intel_display_power_is_enabled - unlocked check for a power domain 194 * @dev_priv: i915 device instance 195 * @domain: power domain to check 196 * 197 * This is the unlocked version of intel_display_power_is_enabled() and should 198 * only be used from error capture and recovery code where deadlocks are 199 * possible. 200 * 201 * Returns: 202 * True when the power domain is enabled, false otherwise. 203 */ 204 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 205 enum intel_display_power_domain domain) 206 { 207 struct i915_power_well *power_well; 208 bool is_enabled; 209 210 if (dev_priv->runtime_pm.suspended) 211 return false; 212 213 is_enabled = true; 214 215 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { 216 if (power_well->desc->always_on) 217 continue; 218 219 if (!power_well->hw_enabled) { 220 is_enabled = false; 221 break; 222 } 223 } 224 225 return is_enabled; 226 } 227 228 /** 229 * intel_display_power_is_enabled - check for a power domain 230 * @dev_priv: i915 device instance 231 * @domain: power domain to check 232 * 233 * This function can be used to check the hw power domain state. It is mostly 234 * used in hardware state readout functions. Everywhere else code should rely 235 * upon explicit power domain reference counting to ensure that the hardware 236 * block is powered up before accessing it. 237 * 238 * Callers must hold the relevant modesetting locks to ensure that concurrent 239 * threads can't disable the power well while the caller tries to read a few 240 * registers. 241 * 242 * Returns: 243 * True when the power domain is enabled, false otherwise. 244 */ 245 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 246 enum intel_display_power_domain domain) 247 { 248 struct i915_power_domains *power_domains; 249 bool ret; 250 251 power_domains = &dev_priv->power_domains; 252 253 mutex_lock(&power_domains->lock); 254 ret = __intel_display_power_is_enabled(dev_priv, domain); 255 mutex_unlock(&power_domains->lock); 256 257 return ret; 258 } 259 260 /* 261 * Starting with Haswell, we have a "Power Down Well" that can be turned off 262 * when not needed anymore. We have 4 registers that can request the power well 263 * to be enabled, and it will only be disabled if none of the registers is 264 * requesting it to be enabled. 265 */ 266 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, 267 u8 irq_pipe_mask, bool has_vga) 268 { 269 if (has_vga) 270 intel_vga_reset_io_mem(dev_priv); 271 272 if (irq_pipe_mask) 273 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); 274 } 275 276 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, 277 u8 irq_pipe_mask) 278 { 279 if (irq_pipe_mask) 280 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); 281 } 282 283 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, 284 struct i915_power_well *power_well) 285 { 286 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 287 int pw_idx = power_well->desc->hsw.idx; 288 289 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 290 if (intel_de_wait_for_set(dev_priv, regs->driver, 291 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { 292 DRM_DEBUG_KMS("%s power well enable timeout\n", 293 power_well->desc->name); 294 295 /* An AUX timeout is expected if the TBT DP tunnel is down. */ 296 WARN_ON(!power_well->desc->hsw.is_tc_tbt); 297 } 298 } 299 300 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, 301 const struct i915_power_well_regs *regs, 302 int pw_idx) 303 { 304 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 305 u32 ret; 306 307 ret = I915_READ(regs->bios) & req_mask ? 1 : 0; 308 ret |= I915_READ(regs->driver) & req_mask ? 2 : 0; 309 if (regs->kvmr.reg) 310 ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0; 311 ret |= I915_READ(regs->debug) & req_mask ? 8 : 0; 312 313 return ret; 314 } 315 316 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, 317 struct i915_power_well *power_well) 318 { 319 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 320 int pw_idx = power_well->desc->hsw.idx; 321 bool disabled; 322 u32 reqs; 323 324 /* 325 * Bspec doesn't require waiting for PWs to get disabled, but still do 326 * this for paranoia. The known cases where a PW will be forced on: 327 * - a KVMR request on any power well via the KVMR request register 328 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 329 * DEBUG request registers 330 * Skip the wait in case any of the request bits are set and print a 331 * diagnostic message. 332 */ 333 wait_for((disabled = !(I915_READ(regs->driver) & 334 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 335 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); 336 if (disabled) 337 return; 338 339 DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 340 power_well->desc->name, 341 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 342 } 343 344 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, 345 enum skl_power_gate pg) 346 { 347 /* Timeout 5us for PG#0, for other PGs 1us */ 348 WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, 349 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 350 } 351 352 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 353 struct i915_power_well *power_well) 354 { 355 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 356 int pw_idx = power_well->desc->hsw.idx; 357 bool wait_fuses = power_well->desc->hsw.has_fuses; 358 enum skl_power_gate uninitialized_var(pg); 359 u32 val; 360 361 if (wait_fuses) { 362 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 363 SKL_PW_CTL_IDX_TO_PG(pw_idx); 364 /* 365 * For PW1 we have to wait both for the PW0/PG0 fuse state 366 * before enabling the power well and PW1/PG1's own fuse 367 * state after the enabling. For all other power wells with 368 * fuses we only have to wait for that PW/PG's fuse state 369 * after the enabling. 370 */ 371 if (pg == SKL_PG1) 372 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 373 } 374 375 val = I915_READ(regs->driver); 376 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 377 hsw_wait_for_power_well_enable(dev_priv, power_well); 378 379 /* Display WA #1178: cnl */ 380 if (IS_CANNONLAKE(dev_priv) && 381 pw_idx >= GLK_PW_CTL_IDX_AUX_B && 382 pw_idx <= CNL_PW_CTL_IDX_AUX_F) { 383 val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx)); 384 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS; 385 I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val); 386 } 387 388 if (wait_fuses) 389 gen9_wait_for_power_well_fuses(dev_priv, pg); 390 391 hsw_power_well_post_enable(dev_priv, 392 power_well->desc->hsw.irq_pipe_mask, 393 power_well->desc->hsw.has_vga); 394 } 395 396 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 397 struct i915_power_well *power_well) 398 { 399 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 400 int pw_idx = power_well->desc->hsw.idx; 401 u32 val; 402 403 hsw_power_well_pre_disable(dev_priv, 404 power_well->desc->hsw.irq_pipe_mask); 405 406 val = I915_READ(regs->driver); 407 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 408 hsw_wait_for_power_well_disable(dev_priv, power_well); 409 } 410 411 #define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) 412 413 static void 414 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 415 struct i915_power_well *power_well) 416 { 417 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 418 int pw_idx = power_well->desc->hsw.idx; 419 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); 420 u32 val; 421 422 WARN_ON(!IS_ICELAKE(dev_priv)); 423 424 val = I915_READ(regs->driver); 425 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 426 427 if (INTEL_GEN(dev_priv) < 12) { 428 val = I915_READ(ICL_PORT_CL_DW12(phy)); 429 I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX); 430 } 431 432 hsw_wait_for_power_well_enable(dev_priv, power_well); 433 434 /* Display WA #1178: icl */ 435 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && 436 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { 437 val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx)); 438 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; 439 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val); 440 } 441 } 442 443 static void 444 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 445 struct i915_power_well *power_well) 446 { 447 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 448 int pw_idx = power_well->desc->hsw.idx; 449 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); 450 u32 val; 451 452 WARN_ON(!IS_ICELAKE(dev_priv)); 453 454 val = I915_READ(ICL_PORT_CL_DW12(phy)); 455 I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX); 456 457 val = I915_READ(regs->driver); 458 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 459 460 hsw_wait_for_power_well_disable(dev_priv, power_well); 461 } 462 463 #define ICL_AUX_PW_TO_CH(pw_idx) \ 464 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 465 466 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 467 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 468 469 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv, 470 struct i915_power_well *power_well) 471 { 472 int pw_idx = power_well->desc->hsw.idx; 473 474 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 475 ICL_AUX_PW_TO_CH(pw_idx); 476 } 477 478 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 479 480 static u64 async_put_domains_mask(struct i915_power_domains *power_domains); 481 482 static int power_well_async_ref_count(struct drm_i915_private *dev_priv, 483 struct i915_power_well *power_well) 484 { 485 int refs = hweight64(power_well->desc->domains & 486 async_put_domains_mask(&dev_priv->power_domains)); 487 488 WARN_ON(refs > power_well->count); 489 490 return refs; 491 } 492 493 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 494 struct i915_power_well *power_well) 495 { 496 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 497 struct intel_digital_port *dig_port = NULL; 498 struct intel_encoder *encoder; 499 500 /* Bypass the check if all references are released asynchronously */ 501 if (power_well_async_ref_count(dev_priv, power_well) == 502 power_well->count) 503 return; 504 505 aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 506 507 for_each_intel_encoder(&dev_priv->drm, encoder) { 508 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 509 510 if (!intel_phy_is_tc(dev_priv, phy)) 511 continue; 512 513 /* We'll check the MST primary port */ 514 if (encoder->type == INTEL_OUTPUT_DP_MST) 515 continue; 516 517 dig_port = enc_to_dig_port(encoder); 518 if (WARN_ON(!dig_port)) 519 continue; 520 521 if (dig_port->aux_ch != aux_ch) { 522 dig_port = NULL; 523 continue; 524 } 525 526 break; 527 } 528 529 if (WARN_ON(!dig_port)) 530 return; 531 532 WARN_ON(!intel_tc_port_ref_held(dig_port)); 533 } 534 535 #else 536 537 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 538 struct i915_power_well *power_well) 539 { 540 } 541 542 #endif 543 544 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 545 546 static void 547 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 548 struct i915_power_well *power_well) 549 { 550 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 551 u32 val; 552 553 icl_tc_port_assert_ref_held(dev_priv, power_well); 554 555 val = I915_READ(DP_AUX_CH_CTL(aux_ch)); 556 val &= ~DP_AUX_CH_CTL_TBT_IO; 557 if (power_well->desc->hsw.is_tc_tbt) 558 val |= DP_AUX_CH_CTL_TBT_IO; 559 I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); 560 561 hsw_power_well_enable(dev_priv, power_well); 562 563 if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) { 564 enum tc_port tc_port; 565 566 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); 567 I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2)); 568 569 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), 570 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 571 DRM_WARN("Timeout waiting TC uC health\n"); 572 } 573 } 574 575 static void 576 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 577 struct i915_power_well *power_well) 578 { 579 icl_tc_port_assert_ref_held(dev_priv, power_well); 580 581 hsw_power_well_disable(dev_priv, power_well); 582 } 583 584 /* 585 * We should only use the power well if we explicitly asked the hardware to 586 * enable it, so check if it's enabled and also check if we've requested it to 587 * be enabled. 588 */ 589 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 590 struct i915_power_well *power_well) 591 { 592 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 593 enum i915_power_well_id id = power_well->desc->id; 594 int pw_idx = power_well->desc->hsw.idx; 595 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 596 HSW_PWR_WELL_CTL_STATE(pw_idx); 597 u32 val; 598 599 val = I915_READ(regs->driver); 600 601 /* 602 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 603 * and the MISC_IO PW will be not restored, so check instead for the 604 * BIOS's own request bits, which are forced-on for these power wells 605 * when exiting DC5/6. 606 */ 607 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) && 608 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 609 val |= I915_READ(regs->bios); 610 611 return (val & mask) == mask; 612 } 613 614 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 615 { 616 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 617 "DC9 already programmed to be enabled.\n"); 618 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 619 "DC5 still not disabled to enable DC9.\n"); 620 WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) & 621 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 622 "Power well 2 on.\n"); 623 WARN_ONCE(intel_irqs_enabled(dev_priv), 624 "Interrupts not disabled yet.\n"); 625 626 /* 627 * TODO: check for the following to verify the conditions to enter DC9 628 * state are satisfied: 629 * 1] Check relevant display engine registers to verify if mode set 630 * disable sequence was followed. 631 * 2] Check if display uninitialize sequence is initialized. 632 */ 633 } 634 635 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 636 { 637 WARN_ONCE(intel_irqs_enabled(dev_priv), 638 "Interrupts not disabled yet.\n"); 639 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 640 "DC5 still not disabled.\n"); 641 642 /* 643 * TODO: check for the following to verify DC9 state was indeed 644 * entered before programming to disable it: 645 * 1] Check relevant display engine registers to verify if mode 646 * set disable sequence was followed. 647 * 2] Check if display uninitialize sequence is initialized. 648 */ 649 } 650 651 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 652 u32 state) 653 { 654 int rewrites = 0; 655 int rereads = 0; 656 u32 v; 657 658 I915_WRITE(DC_STATE_EN, state); 659 660 /* It has been observed that disabling the dc6 state sometimes 661 * doesn't stick and dmc keeps returning old value. Make sure 662 * the write really sticks enough times and also force rewrite until 663 * we are confident that state is exactly what we want. 664 */ 665 do { 666 v = I915_READ(DC_STATE_EN); 667 668 if (v != state) { 669 I915_WRITE(DC_STATE_EN, state); 670 rewrites++; 671 rereads = 0; 672 } else if (rereads++ > 5) { 673 break; 674 } 675 676 } while (rewrites < 100); 677 678 if (v != state) 679 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n", 680 state, v); 681 682 /* Most of the times we need one retry, avoid spam */ 683 if (rewrites > 1) 684 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n", 685 state, rewrites); 686 } 687 688 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 689 { 690 u32 mask; 691 692 mask = DC_STATE_EN_UPTO_DC5; 693 694 if (INTEL_GEN(dev_priv) >= 12) 695 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 696 | DC_STATE_EN_DC9; 697 else if (IS_GEN(dev_priv, 11)) 698 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 699 else if (IS_GEN9_LP(dev_priv)) 700 mask |= DC_STATE_EN_DC9; 701 else 702 mask |= DC_STATE_EN_UPTO_DC6; 703 704 return mask; 705 } 706 707 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 708 { 709 u32 val; 710 711 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv); 712 713 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n", 714 dev_priv->csr.dc_state, val); 715 dev_priv->csr.dc_state = val; 716 } 717 718 /** 719 * gen9_set_dc_state - set target display C power state 720 * @dev_priv: i915 device instance 721 * @state: target DC power state 722 * - DC_STATE_DISABLE 723 * - DC_STATE_EN_UPTO_DC5 724 * - DC_STATE_EN_UPTO_DC6 725 * - DC_STATE_EN_DC9 726 * 727 * Signal to DMC firmware/HW the target DC power state passed in @state. 728 * DMC/HW can turn off individual display clocks and power rails when entering 729 * a deeper DC power state (higher in number) and turns these back when exiting 730 * that state to a shallower power state (lower in number). The HW will decide 731 * when to actually enter a given state on an on-demand basis, for instance 732 * depending on the active state of display pipes. The state of display 733 * registers backed by affected power rails are saved/restored as needed. 734 * 735 * Based on the above enabling a deeper DC power state is asynchronous wrt. 736 * enabling it. Disabling a deeper power state is synchronous: for instance 737 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 738 * back on and register state is restored. This is guaranteed by the MMIO write 739 * to DC_STATE_EN blocking until the state is restored. 740 */ 741 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) 742 { 743 u32 val; 744 u32 mask; 745 746 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) 747 state &= dev_priv->csr.allowed_dc_mask; 748 749 val = I915_READ(DC_STATE_EN); 750 mask = gen9_dc_mask(dev_priv); 751 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", 752 val & mask, state); 753 754 /* Check if DMC is ignoring our DC state requests */ 755 if ((val & mask) != dev_priv->csr.dc_state) 756 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n", 757 dev_priv->csr.dc_state, val & mask); 758 759 val &= ~mask; 760 val |= state; 761 762 gen9_write_dc_state(dev_priv, val); 763 764 dev_priv->csr.dc_state = val & mask; 765 } 766 767 static u32 768 sanitize_target_dc_state(struct drm_i915_private *dev_priv, 769 u32 target_dc_state) 770 { 771 u32 states[] = { 772 DC_STATE_EN_UPTO_DC6, 773 DC_STATE_EN_UPTO_DC5, 774 DC_STATE_EN_DC3CO, 775 DC_STATE_DISABLE, 776 }; 777 int i; 778 779 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 780 if (target_dc_state != states[i]) 781 continue; 782 783 if (dev_priv->csr.allowed_dc_mask & target_dc_state) 784 break; 785 786 target_dc_state = states[i + 1]; 787 } 788 789 return target_dc_state; 790 } 791 792 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) 793 { 794 DRM_DEBUG_KMS("Enabling DC3CO\n"); 795 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); 796 } 797 798 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) 799 { 800 u32 val; 801 802 DRM_DEBUG_KMS("Disabling DC3CO\n"); 803 val = I915_READ(DC_STATE_EN); 804 val &= ~DC_STATE_DC3CO_STATUS; 805 I915_WRITE(DC_STATE_EN, val); 806 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 807 /* 808 * Delay of 200us DC3CO Exit time B.Spec 49196 809 */ 810 usleep_range(200, 210); 811 } 812 813 static void bxt_enable_dc9(struct drm_i915_private *dev_priv) 814 { 815 assert_can_enable_dc9(dev_priv); 816 817 DRM_DEBUG_KMS("Enabling DC9\n"); 818 /* 819 * Power sequencer reset is not needed on 820 * platforms with South Display Engine on PCH, 821 * because PPS registers are always on. 822 */ 823 if (!HAS_PCH_SPLIT(dev_priv)) 824 intel_power_sequencer_reset(dev_priv); 825 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 826 } 827 828 static void bxt_disable_dc9(struct drm_i915_private *dev_priv) 829 { 830 assert_can_disable_dc9(dev_priv); 831 832 DRM_DEBUG_KMS("Disabling DC9\n"); 833 834 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 835 836 intel_pps_unlock_regs_wa(dev_priv); 837 } 838 839 static void assert_csr_loaded(struct drm_i915_private *dev_priv) 840 { 841 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), 842 "CSR program storage start is NULL\n"); 843 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); 844 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); 845 } 846 847 static struct i915_power_well * 848 lookup_power_well(struct drm_i915_private *dev_priv, 849 enum i915_power_well_id power_well_id) 850 { 851 struct i915_power_well *power_well; 852 853 for_each_power_well(dev_priv, power_well) 854 if (power_well->desc->id == power_well_id) 855 return power_well; 856 857 /* 858 * It's not feasible to add error checking code to the callers since 859 * this condition really shouldn't happen and it doesn't even make sense 860 * to abort things like display initialization sequences. Just return 861 * the first power well and hope the WARN gets reported so we can fix 862 * our driver. 863 */ 864 WARN(1, "Power well %d not defined for this platform\n", power_well_id); 865 return &dev_priv->power_domains.power_wells[0]; 866 } 867 868 /** 869 * intel_display_power_set_target_dc_state - Set target dc state. 870 * @dev_priv: i915 device 871 * @state: state which needs to be set as target_dc_state. 872 * 873 * This function set the "DC off" power well target_dc_state, 874 * based upon this target_dc_stste, "DC off" power well will 875 * enable desired DC state. 876 */ 877 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, 878 u32 state) 879 { 880 struct i915_power_well *power_well; 881 bool dc_off_enabled; 882 struct i915_power_domains *power_domains = &dev_priv->power_domains; 883 884 mutex_lock(&power_domains->lock); 885 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); 886 887 if (WARN_ON(!power_well)) 888 goto unlock; 889 890 state = sanitize_target_dc_state(dev_priv, state); 891 892 if (state == dev_priv->csr.target_dc_state) 893 goto unlock; 894 895 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv, 896 power_well); 897 /* 898 * If DC off power well is disabled, need to enable and disable the 899 * DC off power well to effect target DC state. 900 */ 901 if (!dc_off_enabled) 902 power_well->desc->ops->enable(dev_priv, power_well); 903 904 dev_priv->csr.target_dc_state = state; 905 906 if (!dc_off_enabled) 907 power_well->desc->ops->disable(dev_priv, power_well); 908 909 unlock: 910 mutex_unlock(&power_domains->lock); 911 } 912 913 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 914 { 915 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, 916 SKL_DISP_PW_2); 917 918 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); 919 920 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), 921 "DC5 already programmed to be enabled.\n"); 922 assert_rpm_wakelock_held(&dev_priv->runtime_pm); 923 924 assert_csr_loaded(dev_priv); 925 } 926 927 static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 928 { 929 assert_can_enable_dc5(dev_priv); 930 931 DRM_DEBUG_KMS("Enabling DC5\n"); 932 933 /* Wa Display #1183: skl,kbl,cfl */ 934 if (IS_GEN9_BC(dev_priv)) 935 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | 936 SKL_SELECT_ALTERNATE_DC_EXIT); 937 938 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 939 } 940 941 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 942 { 943 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 944 "Backlight is not disabled.\n"); 945 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 946 "DC6 already programmed to be enabled.\n"); 947 948 assert_csr_loaded(dev_priv); 949 } 950 951 static void skl_enable_dc6(struct drm_i915_private *dev_priv) 952 { 953 assert_can_enable_dc6(dev_priv); 954 955 DRM_DEBUG_KMS("Enabling DC6\n"); 956 957 /* Wa Display #1183: skl,kbl,cfl */ 958 if (IS_GEN9_BC(dev_priv)) 959 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | 960 SKL_SELECT_ALTERNATE_DC_EXIT); 961 962 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 963 } 964 965 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 966 struct i915_power_well *power_well) 967 { 968 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 969 int pw_idx = power_well->desc->hsw.idx; 970 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 971 u32 bios_req = I915_READ(regs->bios); 972 973 /* Take over the request bit if set by BIOS. */ 974 if (bios_req & mask) { 975 u32 drv_req = I915_READ(regs->driver); 976 977 if (!(drv_req & mask)) 978 I915_WRITE(regs->driver, drv_req | mask); 979 I915_WRITE(regs->bios, bios_req & ~mask); 980 } 981 } 982 983 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 984 struct i915_power_well *power_well) 985 { 986 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); 987 } 988 989 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 990 struct i915_power_well *power_well) 991 { 992 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); 993 } 994 995 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 996 struct i915_power_well *power_well) 997 { 998 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); 999 } 1000 1001 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 1002 { 1003 struct i915_power_well *power_well; 1004 1005 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); 1006 if (power_well->count > 0) 1007 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1008 1009 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1010 if (power_well->count > 0) 1011 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1012 1013 if (IS_GEMINILAKE(dev_priv)) { 1014 power_well = lookup_power_well(dev_priv, 1015 GLK_DISP_PW_DPIO_CMN_C); 1016 if (power_well->count > 0) 1017 bxt_ddi_phy_verify_state(dev_priv, 1018 power_well->desc->bxt.phy); 1019 } 1020 } 1021 1022 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 1023 struct i915_power_well *power_well) 1024 { 1025 return ((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && 1026 (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); 1027 } 1028 1029 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 1030 { 1031 u32 tmp = I915_READ(DBUF_CTL); 1032 1033 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) != 1034 (DBUF_POWER_STATE | DBUF_POWER_REQUEST), 1035 "Unexpected DBuf power power state (0x%08x)\n", tmp); 1036 } 1037 1038 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) 1039 { 1040 struct intel_cdclk_state cdclk_state = {}; 1041 1042 if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) { 1043 tgl_disable_dc3co(dev_priv); 1044 return; 1045 } 1046 1047 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1048 1049 dev_priv->display.get_cdclk(dev_priv, &cdclk_state); 1050 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 1051 WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state)); 1052 1053 gen9_assert_dbuf_enabled(dev_priv); 1054 1055 if (IS_GEN9_LP(dev_priv)) 1056 bxt_verify_ddi_phy_power_wells(dev_priv); 1057 1058 if (INTEL_GEN(dev_priv) >= 11) 1059 /* 1060 * DMC retains HW context only for port A, the other combo 1061 * PHY's HW context for port B is lost after DC transitions, 1062 * so we need to restore it manually. 1063 */ 1064 intel_combo_phy_init(dev_priv); 1065 } 1066 1067 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 1068 struct i915_power_well *power_well) 1069 { 1070 gen9_disable_dc_states(dev_priv); 1071 } 1072 1073 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 1074 struct i915_power_well *power_well) 1075 { 1076 if (!dev_priv->csr.dmc_payload) 1077 return; 1078 1079 switch (dev_priv->csr.target_dc_state) { 1080 case DC_STATE_EN_DC3CO: 1081 tgl_enable_dc3co(dev_priv); 1082 break; 1083 case DC_STATE_EN_UPTO_DC6: 1084 skl_enable_dc6(dev_priv); 1085 break; 1086 case DC_STATE_EN_UPTO_DC5: 1087 gen9_enable_dc5(dev_priv); 1088 break; 1089 } 1090 } 1091 1092 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, 1093 struct i915_power_well *power_well) 1094 { 1095 } 1096 1097 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 1098 struct i915_power_well *power_well) 1099 { 1100 } 1101 1102 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 1103 struct i915_power_well *power_well) 1104 { 1105 return true; 1106 } 1107 1108 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 1109 struct i915_power_well *power_well) 1110 { 1111 if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) 1112 i830_enable_pipe(dev_priv, PIPE_A); 1113 if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) 1114 i830_enable_pipe(dev_priv, PIPE_B); 1115 } 1116 1117 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, 1118 struct i915_power_well *power_well) 1119 { 1120 i830_disable_pipe(dev_priv, PIPE_B); 1121 i830_disable_pipe(dev_priv, PIPE_A); 1122 } 1123 1124 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 1125 struct i915_power_well *power_well) 1126 { 1127 return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE && 1128 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE; 1129 } 1130 1131 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, 1132 struct i915_power_well *power_well) 1133 { 1134 if (power_well->count > 0) 1135 i830_pipes_power_well_enable(dev_priv, power_well); 1136 else 1137 i830_pipes_power_well_disable(dev_priv, power_well); 1138 } 1139 1140 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 1141 struct i915_power_well *power_well, bool enable) 1142 { 1143 int pw_idx = power_well->desc->vlv.idx; 1144 u32 mask; 1145 u32 state; 1146 u32 ctrl; 1147 1148 mask = PUNIT_PWRGT_MASK(pw_idx); 1149 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1150 PUNIT_PWRGT_PWR_GATE(pw_idx); 1151 1152 vlv_punit_get(dev_priv); 1153 1154 #define COND \ 1155 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1156 1157 if (COND) 1158 goto out; 1159 1160 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 1161 ctrl &= ~mask; 1162 ctrl |= state; 1163 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1164 1165 if (wait_for(COND, 100)) 1166 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1167 state, 1168 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1169 1170 #undef COND 1171 1172 out: 1173 vlv_punit_put(dev_priv); 1174 } 1175 1176 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1177 struct i915_power_well *power_well) 1178 { 1179 vlv_set_power_well(dev_priv, power_well, true); 1180 } 1181 1182 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1183 struct i915_power_well *power_well) 1184 { 1185 vlv_set_power_well(dev_priv, power_well, false); 1186 } 1187 1188 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1189 struct i915_power_well *power_well) 1190 { 1191 int pw_idx = power_well->desc->vlv.idx; 1192 bool enabled = false; 1193 u32 mask; 1194 u32 state; 1195 u32 ctrl; 1196 1197 mask = PUNIT_PWRGT_MASK(pw_idx); 1198 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1199 1200 vlv_punit_get(dev_priv); 1201 1202 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1203 /* 1204 * We only ever set the power-on and power-gate states, anything 1205 * else is unexpected. 1206 */ 1207 WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1208 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1209 if (state == ctrl) 1210 enabled = true; 1211 1212 /* 1213 * A transient state at this point would mean some unexpected party 1214 * is poking at the power controls too. 1215 */ 1216 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1217 WARN_ON(ctrl != state); 1218 1219 vlv_punit_put(dev_priv); 1220 1221 return enabled; 1222 } 1223 1224 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1225 { 1226 u32 val; 1227 1228 /* 1229 * On driver load, a pipe may be active and driving a DSI display. 1230 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1231 * (and never recovering) in this case. intel_dsi_post_disable() will 1232 * clear it when we turn off the display. 1233 */ 1234 val = I915_READ(DSPCLK_GATE_D); 1235 val &= DPOUNIT_CLOCK_GATE_DISABLE; 1236 val |= VRHUNIT_CLOCK_GATE_DISABLE; 1237 I915_WRITE(DSPCLK_GATE_D, val); 1238 1239 /* 1240 * Disable trickle feed and enable pnd deadline calculation 1241 */ 1242 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1243 I915_WRITE(CBR1_VLV, 0); 1244 1245 WARN_ON(dev_priv->rawclk_freq == 0); 1246 1247 I915_WRITE(RAWCLK_FREQ_VLV, 1248 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000)); 1249 } 1250 1251 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1252 { 1253 struct intel_encoder *encoder; 1254 enum pipe pipe; 1255 1256 /* 1257 * Enable the CRI clock source so we can get at the 1258 * display and the reference clock for VGA 1259 * hotplug / manual detection. Supposedly DSI also 1260 * needs the ref clock up and running. 1261 * 1262 * CHV DPLL B/C have some issues if VGA mode is enabled. 1263 */ 1264 for_each_pipe(dev_priv, pipe) { 1265 u32 val = I915_READ(DPLL(pipe)); 1266 1267 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1268 if (pipe != PIPE_A) 1269 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1270 1271 I915_WRITE(DPLL(pipe), val); 1272 } 1273 1274 vlv_init_display_clock_gating(dev_priv); 1275 1276 spin_lock_irq(&dev_priv->irq_lock); 1277 valleyview_enable_display_irqs(dev_priv); 1278 spin_unlock_irq(&dev_priv->irq_lock); 1279 1280 /* 1281 * During driver initialization/resume we can avoid restoring the 1282 * part of the HW/SW state that will be inited anyway explicitly. 1283 */ 1284 if (dev_priv->power_domains.initializing) 1285 return; 1286 1287 intel_hpd_init(dev_priv); 1288 1289 /* Re-enable the ADPA, if we have one */ 1290 for_each_intel_encoder(&dev_priv->drm, encoder) { 1291 if (encoder->type == INTEL_OUTPUT_ANALOG) 1292 intel_crt_reset(&encoder->base); 1293 } 1294 1295 intel_vga_redisable_power_on(dev_priv); 1296 1297 intel_pps_unlock_regs_wa(dev_priv); 1298 } 1299 1300 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1301 { 1302 spin_lock_irq(&dev_priv->irq_lock); 1303 valleyview_disable_display_irqs(dev_priv); 1304 spin_unlock_irq(&dev_priv->irq_lock); 1305 1306 /* make sure we're done processing display irqs */ 1307 intel_synchronize_irq(dev_priv); 1308 1309 intel_power_sequencer_reset(dev_priv); 1310 1311 /* Prevent us from re-enabling polling on accident in late suspend */ 1312 if (!dev_priv->drm.dev->power.is_suspended) 1313 intel_hpd_poll_init(dev_priv); 1314 } 1315 1316 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1317 struct i915_power_well *power_well) 1318 { 1319 vlv_set_power_well(dev_priv, power_well, true); 1320 1321 vlv_display_power_well_init(dev_priv); 1322 } 1323 1324 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1325 struct i915_power_well *power_well) 1326 { 1327 vlv_display_power_well_deinit(dev_priv); 1328 1329 vlv_set_power_well(dev_priv, power_well, false); 1330 } 1331 1332 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1333 struct i915_power_well *power_well) 1334 { 1335 /* since ref/cri clock was enabled */ 1336 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1337 1338 vlv_set_power_well(dev_priv, power_well, true); 1339 1340 /* 1341 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1342 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1343 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1344 * b. The other bits such as sfr settings / modesel may all 1345 * be set to 0. 1346 * 1347 * This should only be done on init and resume from S3 with 1348 * both PLLs disabled, or we risk losing DPIO and PLL 1349 * synchronization. 1350 */ 1351 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); 1352 } 1353 1354 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1355 struct i915_power_well *power_well) 1356 { 1357 enum pipe pipe; 1358 1359 for_each_pipe(dev_priv, pipe) 1360 assert_pll_disabled(dev_priv, pipe); 1361 1362 /* Assert common reset */ 1363 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); 1364 1365 vlv_set_power_well(dev_priv, power_well, false); 1366 } 1367 1368 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 1369 1370 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1371 1372 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1373 { 1374 struct i915_power_well *cmn_bc = 1375 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1376 struct i915_power_well *cmn_d = 1377 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1378 u32 phy_control = dev_priv->chv_phy_control; 1379 u32 phy_status = 0; 1380 u32 phy_status_mask = 0xffffffff; 1381 1382 /* 1383 * The BIOS can leave the PHY is some weird state 1384 * where it doesn't fully power down some parts. 1385 * Disable the asserts until the PHY has been fully 1386 * reset (ie. the power well has been disabled at 1387 * least once). 1388 */ 1389 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1390 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1391 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1392 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1393 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1394 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1395 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1396 1397 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1398 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1399 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1400 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1401 1402 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 1403 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1404 1405 /* this assumes override is only used to enable lanes */ 1406 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1407 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1408 1409 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1410 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1411 1412 /* CL1 is on whenever anything is on in either channel */ 1413 if (BITS_SET(phy_control, 1414 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1415 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1416 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1417 1418 /* 1419 * The DPLLB check accounts for the pipe B + port A usage 1420 * with CL2 powered up but all the lanes in the second channel 1421 * powered down. 1422 */ 1423 if (BITS_SET(phy_control, 1424 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1425 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1426 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1427 1428 if (BITS_SET(phy_control, 1429 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1430 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1431 if (BITS_SET(phy_control, 1432 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1433 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1434 1435 if (BITS_SET(phy_control, 1436 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1437 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1438 if (BITS_SET(phy_control, 1439 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1440 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1441 } 1442 1443 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 1444 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1445 1446 /* this assumes override is only used to enable lanes */ 1447 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1448 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1449 1450 if (BITS_SET(phy_control, 1451 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1452 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1453 1454 if (BITS_SET(phy_control, 1455 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1456 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1457 if (BITS_SET(phy_control, 1458 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1459 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1460 } 1461 1462 phy_status &= phy_status_mask; 1463 1464 /* 1465 * The PHY may be busy with some initial calibration and whatnot, 1466 * so the power state can take a while to actually change. 1467 */ 1468 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, 1469 phy_status_mask, phy_status, 10)) 1470 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1471 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask, 1472 phy_status, dev_priv->chv_phy_control); 1473 } 1474 1475 #undef BITS_SET 1476 1477 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1478 struct i915_power_well *power_well) 1479 { 1480 enum dpio_phy phy; 1481 enum pipe pipe; 1482 u32 tmp; 1483 1484 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1485 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1486 1487 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1488 pipe = PIPE_A; 1489 phy = DPIO_PHY0; 1490 } else { 1491 pipe = PIPE_C; 1492 phy = DPIO_PHY1; 1493 } 1494 1495 /* since ref/cri clock was enabled */ 1496 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1497 vlv_set_power_well(dev_priv, power_well, true); 1498 1499 /* Poll for phypwrgood signal */ 1500 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, 1501 PHY_POWERGOOD(phy), 1)) 1502 DRM_ERROR("Display PHY %d is not power up\n", phy); 1503 1504 vlv_dpio_get(dev_priv); 1505 1506 /* Enable dynamic power down */ 1507 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1508 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1509 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1510 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1511 1512 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1513 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1514 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1515 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1516 } else { 1517 /* 1518 * Force the non-existing CL2 off. BXT does this 1519 * too, so maybe it saves some power even though 1520 * CL2 doesn't exist? 1521 */ 1522 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1523 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1524 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1525 } 1526 1527 vlv_dpio_put(dev_priv); 1528 1529 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1530 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1531 1532 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1533 phy, dev_priv->chv_phy_control); 1534 1535 assert_chv_phy_status(dev_priv); 1536 } 1537 1538 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1539 struct i915_power_well *power_well) 1540 { 1541 enum dpio_phy phy; 1542 1543 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1544 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1545 1546 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1547 phy = DPIO_PHY0; 1548 assert_pll_disabled(dev_priv, PIPE_A); 1549 assert_pll_disabled(dev_priv, PIPE_B); 1550 } else { 1551 phy = DPIO_PHY1; 1552 assert_pll_disabled(dev_priv, PIPE_C); 1553 } 1554 1555 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1556 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1557 1558 vlv_set_power_well(dev_priv, power_well, false); 1559 1560 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1561 phy, dev_priv->chv_phy_control); 1562 1563 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1564 dev_priv->chv_phy_assert[phy] = true; 1565 1566 assert_chv_phy_status(dev_priv); 1567 } 1568 1569 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1570 enum dpio_channel ch, bool override, unsigned int mask) 1571 { 1572 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1573 u32 reg, val, expected, actual; 1574 1575 /* 1576 * The BIOS can leave the PHY is some weird state 1577 * where it doesn't fully power down some parts. 1578 * Disable the asserts until the PHY has been fully 1579 * reset (ie. the power well has been disabled at 1580 * least once). 1581 */ 1582 if (!dev_priv->chv_phy_assert[phy]) 1583 return; 1584 1585 if (ch == DPIO_CH0) 1586 reg = _CHV_CMN_DW0_CH0; 1587 else 1588 reg = _CHV_CMN_DW6_CH1; 1589 1590 vlv_dpio_get(dev_priv); 1591 val = vlv_dpio_read(dev_priv, pipe, reg); 1592 vlv_dpio_put(dev_priv); 1593 1594 /* 1595 * This assumes !override is only used when the port is disabled. 1596 * All lanes should power down even without the override when 1597 * the port is disabled. 1598 */ 1599 if (!override || mask == 0xf) { 1600 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1601 /* 1602 * If CH1 common lane is not active anymore 1603 * (eg. for pipe B DPLL) the entire channel will 1604 * shut down, which causes the common lane registers 1605 * to read as 0. That means we can't actually check 1606 * the lane power down status bits, but as the entire 1607 * register reads as 0 it's a good indication that the 1608 * channel is indeed entirely powered down. 1609 */ 1610 if (ch == DPIO_CH1 && val == 0) 1611 expected = 0; 1612 } else if (mask != 0x0) { 1613 expected = DPIO_ANYDL_POWERDOWN; 1614 } else { 1615 expected = 0; 1616 } 1617 1618 if (ch == DPIO_CH0) 1619 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1620 else 1621 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1622 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1623 1624 WARN(actual != expected, 1625 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1626 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), 1627 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), 1628 reg, val); 1629 } 1630 1631 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1632 enum dpio_channel ch, bool override) 1633 { 1634 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1635 bool was_override; 1636 1637 mutex_lock(&power_domains->lock); 1638 1639 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1640 1641 if (override == was_override) 1642 goto out; 1643 1644 if (override) 1645 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1646 else 1647 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1648 1649 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1650 1651 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1652 phy, ch, dev_priv->chv_phy_control); 1653 1654 assert_chv_phy_status(dev_priv); 1655 1656 out: 1657 mutex_unlock(&power_domains->lock); 1658 1659 return was_override; 1660 } 1661 1662 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1663 bool override, unsigned int mask) 1664 { 1665 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1666 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1667 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder)); 1668 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder)); 1669 1670 mutex_lock(&power_domains->lock); 1671 1672 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1673 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1674 1675 if (override) 1676 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1677 else 1678 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1679 1680 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1681 1682 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1683 phy, ch, mask, dev_priv->chv_phy_control); 1684 1685 assert_chv_phy_status(dev_priv); 1686 1687 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1688 1689 mutex_unlock(&power_domains->lock); 1690 } 1691 1692 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1693 struct i915_power_well *power_well) 1694 { 1695 enum pipe pipe = PIPE_A; 1696 bool enabled; 1697 u32 state, ctrl; 1698 1699 vlv_punit_get(dev_priv); 1700 1701 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1702 /* 1703 * We only ever set the power-on and power-gate states, anything 1704 * else is unexpected. 1705 */ 1706 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); 1707 enabled = state == DP_SSS_PWR_ON(pipe); 1708 1709 /* 1710 * A transient state at this point would mean some unexpected party 1711 * is poking at the power controls too. 1712 */ 1713 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1714 WARN_ON(ctrl << 16 != state); 1715 1716 vlv_punit_put(dev_priv); 1717 1718 return enabled; 1719 } 1720 1721 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1722 struct i915_power_well *power_well, 1723 bool enable) 1724 { 1725 enum pipe pipe = PIPE_A; 1726 u32 state; 1727 u32 ctrl; 1728 1729 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1730 1731 vlv_punit_get(dev_priv); 1732 1733 #define COND \ 1734 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 1735 1736 if (COND) 1737 goto out; 1738 1739 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 1740 ctrl &= ~DP_SSC_MASK(pipe); 1741 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1742 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); 1743 1744 if (wait_for(COND, 100)) 1745 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1746 state, 1747 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); 1748 1749 #undef COND 1750 1751 out: 1752 vlv_punit_put(dev_priv); 1753 } 1754 1755 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1756 struct i915_power_well *power_well) 1757 { 1758 chv_set_pipe_power_well(dev_priv, power_well, true); 1759 1760 vlv_display_power_well_init(dev_priv); 1761 } 1762 1763 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1764 struct i915_power_well *power_well) 1765 { 1766 vlv_display_power_well_deinit(dev_priv); 1767 1768 chv_set_pipe_power_well(dev_priv, power_well, false); 1769 } 1770 1771 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) 1772 { 1773 return power_domains->async_put_domains[0] | 1774 power_domains->async_put_domains[1]; 1775 } 1776 1777 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 1778 1779 static bool 1780 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 1781 { 1782 return !WARN_ON(power_domains->async_put_domains[0] & 1783 power_domains->async_put_domains[1]); 1784 } 1785 1786 static bool 1787 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 1788 { 1789 enum intel_display_power_domain domain; 1790 bool err = false; 1791 1792 err |= !assert_async_put_domain_masks_disjoint(power_domains); 1793 err |= WARN_ON(!!power_domains->async_put_wakeref != 1794 !!__async_put_domains_mask(power_domains)); 1795 1796 for_each_power_domain(domain, __async_put_domains_mask(power_domains)) 1797 err |= WARN_ON(power_domains->domain_use_count[domain] != 1); 1798 1799 return !err; 1800 } 1801 1802 static void print_power_domains(struct i915_power_domains *power_domains, 1803 const char *prefix, u64 mask) 1804 { 1805 enum intel_display_power_domain domain; 1806 1807 DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask)); 1808 for_each_power_domain(domain, mask) 1809 DRM_DEBUG_DRIVER("%s use_count %d\n", 1810 intel_display_power_domain_str(domain), 1811 power_domains->domain_use_count[domain]); 1812 } 1813 1814 static void 1815 print_async_put_domains_state(struct i915_power_domains *power_domains) 1816 { 1817 DRM_DEBUG_DRIVER("async_put_wakeref %u\n", 1818 power_domains->async_put_wakeref); 1819 1820 print_power_domains(power_domains, "async_put_domains[0]", 1821 power_domains->async_put_domains[0]); 1822 print_power_domains(power_domains, "async_put_domains[1]", 1823 power_domains->async_put_domains[1]); 1824 } 1825 1826 static void 1827 verify_async_put_domains_state(struct i915_power_domains *power_domains) 1828 { 1829 if (!__async_put_domains_state_ok(power_domains)) 1830 print_async_put_domains_state(power_domains); 1831 } 1832 1833 #else 1834 1835 static void 1836 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 1837 { 1838 } 1839 1840 static void 1841 verify_async_put_domains_state(struct i915_power_domains *power_domains) 1842 { 1843 } 1844 1845 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 1846 1847 static u64 async_put_domains_mask(struct i915_power_domains *power_domains) 1848 { 1849 assert_async_put_domain_masks_disjoint(power_domains); 1850 1851 return __async_put_domains_mask(power_domains); 1852 } 1853 1854 static void 1855 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 1856 enum intel_display_power_domain domain) 1857 { 1858 assert_async_put_domain_masks_disjoint(power_domains); 1859 1860 power_domains->async_put_domains[0] &= ~BIT_ULL(domain); 1861 power_domains->async_put_domains[1] &= ~BIT_ULL(domain); 1862 } 1863 1864 static bool 1865 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 1866 enum intel_display_power_domain domain) 1867 { 1868 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1869 bool ret = false; 1870 1871 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) 1872 goto out_verify; 1873 1874 async_put_domains_clear_domain(power_domains, domain); 1875 1876 ret = true; 1877 1878 if (async_put_domains_mask(power_domains)) 1879 goto out_verify; 1880 1881 cancel_delayed_work(&power_domains->async_put_work); 1882 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 1883 fetch_and_zero(&power_domains->async_put_wakeref)); 1884 out_verify: 1885 verify_async_put_domains_state(power_domains); 1886 1887 return ret; 1888 } 1889 1890 static void 1891 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 1892 enum intel_display_power_domain domain) 1893 { 1894 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1895 struct i915_power_well *power_well; 1896 1897 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 1898 return; 1899 1900 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) 1901 intel_power_well_get(dev_priv, power_well); 1902 1903 power_domains->domain_use_count[domain]++; 1904 } 1905 1906 /** 1907 * intel_display_power_get - grab a power domain reference 1908 * @dev_priv: i915 device instance 1909 * @domain: power domain to reference 1910 * 1911 * This function grabs a power domain reference for @domain and ensures that the 1912 * power domain and all its parents are powered up. Therefore users should only 1913 * grab a reference to the innermost power domain they need. 1914 * 1915 * Any power domain reference obtained by this function must have a symmetric 1916 * call to intel_display_power_put() to release the reference again. 1917 */ 1918 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 1919 enum intel_display_power_domain domain) 1920 { 1921 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1922 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 1923 1924 mutex_lock(&power_domains->lock); 1925 __intel_display_power_get_domain(dev_priv, domain); 1926 mutex_unlock(&power_domains->lock); 1927 1928 return wakeref; 1929 } 1930 1931 /** 1932 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 1933 * @dev_priv: i915 device instance 1934 * @domain: power domain to reference 1935 * 1936 * This function grabs a power domain reference for @domain and ensures that the 1937 * power domain and all its parents are powered up. Therefore users should only 1938 * grab a reference to the innermost power domain they need. 1939 * 1940 * Any power domain reference obtained by this function must have a symmetric 1941 * call to intel_display_power_put() to release the reference again. 1942 */ 1943 intel_wakeref_t 1944 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 1945 enum intel_display_power_domain domain) 1946 { 1947 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1948 intel_wakeref_t wakeref; 1949 bool is_enabled; 1950 1951 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 1952 if (!wakeref) 1953 return false; 1954 1955 mutex_lock(&power_domains->lock); 1956 1957 if (__intel_display_power_is_enabled(dev_priv, domain)) { 1958 __intel_display_power_get_domain(dev_priv, domain); 1959 is_enabled = true; 1960 } else { 1961 is_enabled = false; 1962 } 1963 1964 mutex_unlock(&power_domains->lock); 1965 1966 if (!is_enabled) { 1967 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 1968 wakeref = 0; 1969 } 1970 1971 return wakeref; 1972 } 1973 1974 static void 1975 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 1976 enum intel_display_power_domain domain) 1977 { 1978 struct i915_power_domains *power_domains; 1979 struct i915_power_well *power_well; 1980 const char *name = intel_display_power_domain_str(domain); 1981 1982 power_domains = &dev_priv->power_domains; 1983 1984 WARN(!power_domains->domain_use_count[domain], 1985 "Use count on domain %s is already zero\n", 1986 name); 1987 WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain), 1988 "Async disabling of domain %s is pending\n", 1989 name); 1990 1991 power_domains->domain_use_count[domain]--; 1992 1993 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) 1994 intel_power_well_put(dev_priv, power_well); 1995 } 1996 1997 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 1998 enum intel_display_power_domain domain) 1999 { 2000 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2001 2002 mutex_lock(&power_domains->lock); 2003 __intel_display_power_put_domain(dev_priv, domain); 2004 mutex_unlock(&power_domains->lock); 2005 } 2006 2007 /** 2008 * intel_display_power_put_unchecked - release an unchecked power domain reference 2009 * @dev_priv: i915 device instance 2010 * @domain: power domain to reference 2011 * 2012 * This function drops the power domain reference obtained by 2013 * intel_display_power_get() and might power down the corresponding hardware 2014 * block right away if this is the last reference. 2015 * 2016 * This function exists only for historical reasons and should be avoided in 2017 * new code, as the correctness of its use cannot be checked. Always use 2018 * intel_display_power_put() instead. 2019 */ 2020 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 2021 enum intel_display_power_domain domain) 2022 { 2023 __intel_display_power_put(dev_priv, domain); 2024 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 2025 } 2026 2027 static void 2028 queue_async_put_domains_work(struct i915_power_domains *power_domains, 2029 intel_wakeref_t wakeref) 2030 { 2031 WARN_ON(power_domains->async_put_wakeref); 2032 power_domains->async_put_wakeref = wakeref; 2033 WARN_ON(!queue_delayed_work(system_unbound_wq, 2034 &power_domains->async_put_work, 2035 msecs_to_jiffies(100))); 2036 } 2037 2038 static void 2039 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) 2040 { 2041 struct drm_i915_private *dev_priv = 2042 container_of(power_domains, struct drm_i915_private, 2043 power_domains); 2044 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2045 enum intel_display_power_domain domain; 2046 intel_wakeref_t wakeref; 2047 2048 /* 2049 * The caller must hold already raw wakeref, upgrade that to a proper 2050 * wakeref to make the state checker happy about the HW access during 2051 * power well disabling. 2052 */ 2053 assert_rpm_raw_wakeref_held(rpm); 2054 wakeref = intel_runtime_pm_get(rpm); 2055 2056 for_each_power_domain(domain, mask) { 2057 /* Clear before put, so put's sanity check is happy. */ 2058 async_put_domains_clear_domain(power_domains, domain); 2059 __intel_display_power_put_domain(dev_priv, domain); 2060 } 2061 2062 intel_runtime_pm_put(rpm, wakeref); 2063 } 2064 2065 static void 2066 intel_display_power_put_async_work(struct work_struct *work) 2067 { 2068 struct drm_i915_private *dev_priv = 2069 container_of(work, struct drm_i915_private, 2070 power_domains.async_put_work.work); 2071 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2072 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2073 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 2074 intel_wakeref_t old_work_wakeref = 0; 2075 2076 mutex_lock(&power_domains->lock); 2077 2078 /* 2079 * Bail out if all the domain refs pending to be released were grabbed 2080 * by subsequent gets or a flush_work. 2081 */ 2082 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2083 if (!old_work_wakeref) 2084 goto out_verify; 2085 2086 release_async_put_domains(power_domains, 2087 power_domains->async_put_domains[0]); 2088 2089 /* Requeue the work if more domains were async put meanwhile. */ 2090 if (power_domains->async_put_domains[1]) { 2091 power_domains->async_put_domains[0] = 2092 fetch_and_zero(&power_domains->async_put_domains[1]); 2093 queue_async_put_domains_work(power_domains, 2094 fetch_and_zero(&new_work_wakeref)); 2095 } 2096 2097 out_verify: 2098 verify_async_put_domains_state(power_domains); 2099 2100 mutex_unlock(&power_domains->lock); 2101 2102 if (old_work_wakeref) 2103 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 2104 if (new_work_wakeref) 2105 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 2106 } 2107 2108 /** 2109 * intel_display_power_put_async - release a power domain reference asynchronously 2110 * @i915: i915 device instance 2111 * @domain: power domain to reference 2112 * @wakeref: wakeref acquired for the reference that is being released 2113 * 2114 * This function drops the power domain reference obtained by 2115 * intel_display_power_get*() and schedules a work to power down the 2116 * corresponding hardware block if this is the last reference. 2117 */ 2118 void __intel_display_power_put_async(struct drm_i915_private *i915, 2119 enum intel_display_power_domain domain, 2120 intel_wakeref_t wakeref) 2121 { 2122 struct i915_power_domains *power_domains = &i915->power_domains; 2123 struct intel_runtime_pm *rpm = &i915->runtime_pm; 2124 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 2125 2126 mutex_lock(&power_domains->lock); 2127 2128 if (power_domains->domain_use_count[domain] > 1) { 2129 __intel_display_power_put_domain(i915, domain); 2130 2131 goto out_verify; 2132 } 2133 2134 WARN_ON(power_domains->domain_use_count[domain] != 1); 2135 2136 /* Let a pending work requeue itself or queue a new one. */ 2137 if (power_domains->async_put_wakeref) { 2138 power_domains->async_put_domains[1] |= BIT_ULL(domain); 2139 } else { 2140 power_domains->async_put_domains[0] |= BIT_ULL(domain); 2141 queue_async_put_domains_work(power_domains, 2142 fetch_and_zero(&work_wakeref)); 2143 } 2144 2145 out_verify: 2146 verify_async_put_domains_state(power_domains); 2147 2148 mutex_unlock(&power_domains->lock); 2149 2150 if (work_wakeref) 2151 intel_runtime_pm_put_raw(rpm, work_wakeref); 2152 2153 intel_runtime_pm_put(rpm, wakeref); 2154 } 2155 2156 /** 2157 * intel_display_power_flush_work - flushes the async display power disabling work 2158 * @i915: i915 device instance 2159 * 2160 * Flushes any pending work that was scheduled by a preceding 2161 * intel_display_power_put_async() call, completing the disabling of the 2162 * corresponding power domains. 2163 * 2164 * Note that the work handler function may still be running after this 2165 * function returns; to ensure that the work handler isn't running use 2166 * intel_display_power_flush_work_sync() instead. 2167 */ 2168 void intel_display_power_flush_work(struct drm_i915_private *i915) 2169 { 2170 struct i915_power_domains *power_domains = &i915->power_domains; 2171 intel_wakeref_t work_wakeref; 2172 2173 mutex_lock(&power_domains->lock); 2174 2175 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2176 if (!work_wakeref) 2177 goto out_verify; 2178 2179 release_async_put_domains(power_domains, 2180 async_put_domains_mask(power_domains)); 2181 cancel_delayed_work(&power_domains->async_put_work); 2182 2183 out_verify: 2184 verify_async_put_domains_state(power_domains); 2185 2186 mutex_unlock(&power_domains->lock); 2187 2188 if (work_wakeref) 2189 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 2190 } 2191 2192 /** 2193 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 2194 * @i915: i915 device instance 2195 * 2196 * Like intel_display_power_flush_work(), but also ensure that the work 2197 * handler function is not running any more when this function returns. 2198 */ 2199 static void 2200 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 2201 { 2202 struct i915_power_domains *power_domains = &i915->power_domains; 2203 2204 intel_display_power_flush_work(i915); 2205 cancel_delayed_work_sync(&power_domains->async_put_work); 2206 2207 verify_async_put_domains_state(power_domains); 2208 2209 WARN_ON(power_domains->async_put_wakeref); 2210 } 2211 2212 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2213 /** 2214 * intel_display_power_put - release a power domain reference 2215 * @dev_priv: i915 device instance 2216 * @domain: power domain to reference 2217 * @wakeref: wakeref acquired for the reference that is being released 2218 * 2219 * This function drops the power domain reference obtained by 2220 * intel_display_power_get() and might power down the corresponding hardware 2221 * block right away if this is the last reference. 2222 */ 2223 void intel_display_power_put(struct drm_i915_private *dev_priv, 2224 enum intel_display_power_domain domain, 2225 intel_wakeref_t wakeref) 2226 { 2227 __intel_display_power_put(dev_priv, domain); 2228 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2229 } 2230 #endif 2231 2232 #define I830_PIPES_POWER_DOMAINS ( \ 2233 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2234 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2235 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2236 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2237 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2238 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2239 BIT_ULL(POWER_DOMAIN_INIT)) 2240 2241 #define VLV_DISPLAY_POWER_DOMAINS ( \ 2242 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2243 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2244 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2245 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2246 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2247 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2248 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2249 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2250 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2251 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2252 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2253 BIT_ULL(POWER_DOMAIN_VGA) | \ 2254 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2255 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2256 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2257 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2258 BIT_ULL(POWER_DOMAIN_INIT)) 2259 2260 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2261 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2262 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2263 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2264 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2265 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2266 BIT_ULL(POWER_DOMAIN_INIT)) 2267 2268 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 2269 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2270 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2271 BIT_ULL(POWER_DOMAIN_INIT)) 2272 2273 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 2274 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2275 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2276 BIT_ULL(POWER_DOMAIN_INIT)) 2277 2278 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 2279 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2280 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2281 BIT_ULL(POWER_DOMAIN_INIT)) 2282 2283 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 2284 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2285 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2286 BIT_ULL(POWER_DOMAIN_INIT)) 2287 2288 #define CHV_DISPLAY_POWER_DOMAINS ( \ 2289 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2290 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2291 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2292 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2293 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2294 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2295 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2296 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2297 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2298 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2299 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2300 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2301 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2302 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2303 BIT_ULL(POWER_DOMAIN_VGA) | \ 2304 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2305 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2306 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2307 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2308 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2309 BIT_ULL(POWER_DOMAIN_INIT)) 2310 2311 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2312 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2313 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2314 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2315 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2316 BIT_ULL(POWER_DOMAIN_INIT)) 2317 2318 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 2319 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2320 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2321 BIT_ULL(POWER_DOMAIN_INIT)) 2322 2323 #define HSW_DISPLAY_POWER_DOMAINS ( \ 2324 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2325 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2326 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2327 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2328 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2329 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2330 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2331 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2332 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2333 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2334 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2335 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2336 BIT_ULL(POWER_DOMAIN_VGA) | \ 2337 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2338 BIT_ULL(POWER_DOMAIN_INIT)) 2339 2340 #define BDW_DISPLAY_POWER_DOMAINS ( \ 2341 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2342 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2343 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2344 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2345 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2346 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2347 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2348 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2349 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2350 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2351 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2352 BIT_ULL(POWER_DOMAIN_VGA) | \ 2353 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2354 BIT_ULL(POWER_DOMAIN_INIT)) 2355 2356 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2357 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2358 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2359 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2360 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2361 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2362 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2363 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2364 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2365 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2366 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2367 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2368 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2369 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2370 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2371 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2372 BIT_ULL(POWER_DOMAIN_VGA) | \ 2373 BIT_ULL(POWER_DOMAIN_INIT)) 2374 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ 2375 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2376 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ 2377 BIT_ULL(POWER_DOMAIN_INIT)) 2378 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2379 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2380 BIT_ULL(POWER_DOMAIN_INIT)) 2381 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2382 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2383 BIT_ULL(POWER_DOMAIN_INIT)) 2384 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ 2385 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2386 BIT_ULL(POWER_DOMAIN_INIT)) 2387 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2388 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2389 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2390 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2391 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2392 BIT_ULL(POWER_DOMAIN_INIT)) 2393 2394 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2395 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2396 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2397 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2398 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2399 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2400 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2401 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2402 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2403 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2404 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2405 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2406 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2407 BIT_ULL(POWER_DOMAIN_VGA) | \ 2408 BIT_ULL(POWER_DOMAIN_INIT)) 2409 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2410 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2411 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2412 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2413 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2414 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2415 BIT_ULL(POWER_DOMAIN_INIT)) 2416 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 2417 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2418 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2419 BIT_ULL(POWER_DOMAIN_INIT)) 2420 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 2421 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2422 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2423 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2424 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2425 BIT_ULL(POWER_DOMAIN_INIT)) 2426 2427 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2428 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2429 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2430 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2431 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2432 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2433 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2434 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2435 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2436 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2437 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2438 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2439 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2440 BIT_ULL(POWER_DOMAIN_VGA) | \ 2441 BIT_ULL(POWER_DOMAIN_INIT)) 2442 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ 2443 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2444 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2445 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2446 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2447 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2448 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ 2449 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2450 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2451 BIT_ULL(POWER_DOMAIN_INIT)) 2452 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ 2453 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2454 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2455 BIT_ULL(POWER_DOMAIN_INIT)) 2456 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ 2457 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2458 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2459 BIT_ULL(POWER_DOMAIN_INIT)) 2460 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2461 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2462 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2463 BIT_ULL(POWER_DOMAIN_INIT)) 2464 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2465 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2466 BIT_ULL(POWER_DOMAIN_INIT)) 2467 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2468 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2469 BIT_ULL(POWER_DOMAIN_INIT)) 2470 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2471 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2472 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2473 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2474 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2475 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2476 BIT_ULL(POWER_DOMAIN_INIT)) 2477 2478 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2479 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2480 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2481 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2482 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2483 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2484 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2485 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2486 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2487 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2488 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2489 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2490 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2491 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2492 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2493 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2494 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2495 BIT_ULL(POWER_DOMAIN_VGA) | \ 2496 BIT_ULL(POWER_DOMAIN_INIT)) 2497 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \ 2498 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2499 BIT_ULL(POWER_DOMAIN_INIT)) 2500 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \ 2501 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2502 BIT_ULL(POWER_DOMAIN_INIT)) 2503 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \ 2504 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2505 BIT_ULL(POWER_DOMAIN_INIT)) 2506 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \ 2507 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2508 BIT_ULL(POWER_DOMAIN_INIT)) 2509 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2510 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2511 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2512 BIT_ULL(POWER_DOMAIN_INIT)) 2513 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2514 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2515 BIT_ULL(POWER_DOMAIN_INIT)) 2516 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2517 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2518 BIT_ULL(POWER_DOMAIN_INIT)) 2519 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \ 2520 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2521 BIT_ULL(POWER_DOMAIN_INIT)) 2522 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \ 2523 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2524 BIT_ULL(POWER_DOMAIN_INIT)) 2525 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \ 2526 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ 2527 BIT_ULL(POWER_DOMAIN_INIT)) 2528 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2529 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2530 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2531 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2532 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2533 BIT_ULL(POWER_DOMAIN_INIT)) 2534 2535 /* 2536 * ICL PW_0/PG_0 domains (HW/DMC control): 2537 * - PCI 2538 * - clocks except port PLL 2539 * - central power except FBC 2540 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers 2541 * ICL PW_1/PG_1 domains (HW/DMC control): 2542 * - DBUF function 2543 * - PIPE_A and its planes, except VGA 2544 * - transcoder EDP + PSR 2545 * - transcoder DSI 2546 * - DDI_A 2547 * - FBC 2548 */ 2549 #define ICL_PW_4_POWER_DOMAINS ( \ 2550 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2551 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2552 BIT_ULL(POWER_DOMAIN_INIT)) 2553 /* VDSC/joining */ 2554 #define ICL_PW_3_POWER_DOMAINS ( \ 2555 ICL_PW_4_POWER_DOMAINS | \ 2556 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2557 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2558 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2559 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2560 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2561 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2562 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2563 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2564 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2565 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2566 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2567 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2568 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2569 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2570 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2571 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ 2572 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2573 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2574 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2575 BIT_ULL(POWER_DOMAIN_VGA) | \ 2576 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2577 BIT_ULL(POWER_DOMAIN_INIT)) 2578 /* 2579 * - transcoder WD 2580 * - KVMR (HW control) 2581 */ 2582 #define ICL_PW_2_POWER_DOMAINS ( \ 2583 ICL_PW_3_POWER_DOMAINS | \ 2584 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2585 BIT_ULL(POWER_DOMAIN_INIT)) 2586 /* 2587 * - KVMR (HW control) 2588 */ 2589 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2590 ICL_PW_2_POWER_DOMAINS | \ 2591 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2592 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2593 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \ 2594 BIT_ULL(POWER_DOMAIN_INIT)) 2595 2596 #define ICL_DDI_IO_A_POWER_DOMAINS ( \ 2597 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2598 #define ICL_DDI_IO_B_POWER_DOMAINS ( \ 2599 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2600 #define ICL_DDI_IO_C_POWER_DOMAINS ( \ 2601 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2602 #define ICL_DDI_IO_D_POWER_DOMAINS ( \ 2603 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2604 #define ICL_DDI_IO_E_POWER_DOMAINS ( \ 2605 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2606 #define ICL_DDI_IO_F_POWER_DOMAINS ( \ 2607 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2608 2609 #define ICL_AUX_A_IO_POWER_DOMAINS ( \ 2610 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2611 BIT_ULL(POWER_DOMAIN_AUX_A)) 2612 #define ICL_AUX_B_IO_POWER_DOMAINS ( \ 2613 BIT_ULL(POWER_DOMAIN_AUX_B)) 2614 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ 2615 BIT_ULL(POWER_DOMAIN_AUX_C)) 2616 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ 2617 BIT_ULL(POWER_DOMAIN_AUX_D)) 2618 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ 2619 BIT_ULL(POWER_DOMAIN_AUX_E)) 2620 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ 2621 BIT_ULL(POWER_DOMAIN_AUX_F)) 2622 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ 2623 BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) 2624 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ 2625 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2626 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ 2627 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2628 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ 2629 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2630 2631 #define TGL_PW_5_POWER_DOMAINS ( \ 2632 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 2633 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 2634 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 2635 BIT_ULL(POWER_DOMAIN_INIT)) 2636 2637 #define TGL_PW_4_POWER_DOMAINS ( \ 2638 TGL_PW_5_POWER_DOMAINS | \ 2639 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2640 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2641 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2642 BIT_ULL(POWER_DOMAIN_INIT)) 2643 2644 #define TGL_PW_3_POWER_DOMAINS ( \ 2645 TGL_PW_4_POWER_DOMAINS | \ 2646 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2647 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2648 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2649 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2650 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2651 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2652 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \ 2653 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \ 2654 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \ 2655 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2656 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2657 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2658 BIT_ULL(POWER_DOMAIN_AUX_G) | \ 2659 BIT_ULL(POWER_DOMAIN_AUX_H) | \ 2660 BIT_ULL(POWER_DOMAIN_AUX_I) | \ 2661 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2662 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2663 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2664 BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \ 2665 BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \ 2666 BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \ 2667 BIT_ULL(POWER_DOMAIN_VGA) | \ 2668 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2669 BIT_ULL(POWER_DOMAIN_INIT)) 2670 2671 #define TGL_PW_2_POWER_DOMAINS ( \ 2672 TGL_PW_3_POWER_DOMAINS | \ 2673 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2674 BIT_ULL(POWER_DOMAIN_INIT)) 2675 2676 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2677 TGL_PW_2_POWER_DOMAINS | \ 2678 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2679 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2680 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2681 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2682 BIT_ULL(POWER_DOMAIN_INIT)) 2683 2684 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \ 2685 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2686 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \ 2687 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2688 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \ 2689 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2690 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \ 2691 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO)) 2692 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \ 2693 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO)) 2694 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \ 2695 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO)) 2696 2697 #define TGL_AUX_A_IO_POWER_DOMAINS ( \ 2698 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2699 BIT_ULL(POWER_DOMAIN_AUX_A)) 2700 #define TGL_AUX_B_IO_POWER_DOMAINS ( \ 2701 BIT_ULL(POWER_DOMAIN_AUX_B)) 2702 #define TGL_AUX_C_IO_POWER_DOMAINS ( \ 2703 BIT_ULL(POWER_DOMAIN_AUX_C)) 2704 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \ 2705 BIT_ULL(POWER_DOMAIN_AUX_D)) 2706 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \ 2707 BIT_ULL(POWER_DOMAIN_AUX_E)) 2708 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \ 2709 BIT_ULL(POWER_DOMAIN_AUX_F)) 2710 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \ 2711 BIT_ULL(POWER_DOMAIN_AUX_G)) 2712 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \ 2713 BIT_ULL(POWER_DOMAIN_AUX_H)) 2714 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \ 2715 BIT_ULL(POWER_DOMAIN_AUX_I)) 2716 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \ 2717 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2718 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \ 2719 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2720 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \ 2721 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2722 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \ 2723 BIT_ULL(POWER_DOMAIN_AUX_G_TBT)) 2724 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \ 2725 BIT_ULL(POWER_DOMAIN_AUX_H_TBT)) 2726 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \ 2727 BIT_ULL(POWER_DOMAIN_AUX_I_TBT)) 2728 2729 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 2730 .sync_hw = i9xx_power_well_sync_hw_noop, 2731 .enable = i9xx_always_on_power_well_noop, 2732 .disable = i9xx_always_on_power_well_noop, 2733 .is_enabled = i9xx_always_on_power_well_enabled, 2734 }; 2735 2736 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 2737 .sync_hw = i9xx_power_well_sync_hw_noop, 2738 .enable = chv_pipe_power_well_enable, 2739 .disable = chv_pipe_power_well_disable, 2740 .is_enabled = chv_pipe_power_well_enabled, 2741 }; 2742 2743 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 2744 .sync_hw = i9xx_power_well_sync_hw_noop, 2745 .enable = chv_dpio_cmn_power_well_enable, 2746 .disable = chv_dpio_cmn_power_well_disable, 2747 .is_enabled = vlv_power_well_enabled, 2748 }; 2749 2750 static const struct i915_power_well_desc i9xx_always_on_power_well[] = { 2751 { 2752 .name = "always-on", 2753 .always_on = true, 2754 .domains = POWER_DOMAIN_MASK, 2755 .ops = &i9xx_always_on_power_well_ops, 2756 .id = DISP_PW_ID_NONE, 2757 }, 2758 }; 2759 2760 static const struct i915_power_well_ops i830_pipes_power_well_ops = { 2761 .sync_hw = i830_pipes_power_well_sync_hw, 2762 .enable = i830_pipes_power_well_enable, 2763 .disable = i830_pipes_power_well_disable, 2764 .is_enabled = i830_pipes_power_well_enabled, 2765 }; 2766 2767 static const struct i915_power_well_desc i830_power_wells[] = { 2768 { 2769 .name = "always-on", 2770 .always_on = true, 2771 .domains = POWER_DOMAIN_MASK, 2772 .ops = &i9xx_always_on_power_well_ops, 2773 .id = DISP_PW_ID_NONE, 2774 }, 2775 { 2776 .name = "pipes", 2777 .domains = I830_PIPES_POWER_DOMAINS, 2778 .ops = &i830_pipes_power_well_ops, 2779 .id = DISP_PW_ID_NONE, 2780 }, 2781 }; 2782 2783 static const struct i915_power_well_ops hsw_power_well_ops = { 2784 .sync_hw = hsw_power_well_sync_hw, 2785 .enable = hsw_power_well_enable, 2786 .disable = hsw_power_well_disable, 2787 .is_enabled = hsw_power_well_enabled, 2788 }; 2789 2790 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 2791 .sync_hw = i9xx_power_well_sync_hw_noop, 2792 .enable = gen9_dc_off_power_well_enable, 2793 .disable = gen9_dc_off_power_well_disable, 2794 .is_enabled = gen9_dc_off_power_well_enabled, 2795 }; 2796 2797 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 2798 .sync_hw = i9xx_power_well_sync_hw_noop, 2799 .enable = bxt_dpio_cmn_power_well_enable, 2800 .disable = bxt_dpio_cmn_power_well_disable, 2801 .is_enabled = bxt_dpio_cmn_power_well_enabled, 2802 }; 2803 2804 static const struct i915_power_well_regs hsw_power_well_regs = { 2805 .bios = HSW_PWR_WELL_CTL1, 2806 .driver = HSW_PWR_WELL_CTL2, 2807 .kvmr = HSW_PWR_WELL_CTL3, 2808 .debug = HSW_PWR_WELL_CTL4, 2809 }; 2810 2811 static const struct i915_power_well_desc hsw_power_wells[] = { 2812 { 2813 .name = "always-on", 2814 .always_on = true, 2815 .domains = POWER_DOMAIN_MASK, 2816 .ops = &i9xx_always_on_power_well_ops, 2817 .id = DISP_PW_ID_NONE, 2818 }, 2819 { 2820 .name = "display", 2821 .domains = HSW_DISPLAY_POWER_DOMAINS, 2822 .ops = &hsw_power_well_ops, 2823 .id = HSW_DISP_PW_GLOBAL, 2824 { 2825 .hsw.regs = &hsw_power_well_regs, 2826 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 2827 .hsw.has_vga = true, 2828 }, 2829 }, 2830 }; 2831 2832 static const struct i915_power_well_desc bdw_power_wells[] = { 2833 { 2834 .name = "always-on", 2835 .always_on = true, 2836 .domains = POWER_DOMAIN_MASK, 2837 .ops = &i9xx_always_on_power_well_ops, 2838 .id = DISP_PW_ID_NONE, 2839 }, 2840 { 2841 .name = "display", 2842 .domains = BDW_DISPLAY_POWER_DOMAINS, 2843 .ops = &hsw_power_well_ops, 2844 .id = HSW_DISP_PW_GLOBAL, 2845 { 2846 .hsw.regs = &hsw_power_well_regs, 2847 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 2848 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 2849 .hsw.has_vga = true, 2850 }, 2851 }, 2852 }; 2853 2854 static const struct i915_power_well_ops vlv_display_power_well_ops = { 2855 .sync_hw = i9xx_power_well_sync_hw_noop, 2856 .enable = vlv_display_power_well_enable, 2857 .disable = vlv_display_power_well_disable, 2858 .is_enabled = vlv_power_well_enabled, 2859 }; 2860 2861 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 2862 .sync_hw = i9xx_power_well_sync_hw_noop, 2863 .enable = vlv_dpio_cmn_power_well_enable, 2864 .disable = vlv_dpio_cmn_power_well_disable, 2865 .is_enabled = vlv_power_well_enabled, 2866 }; 2867 2868 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 2869 .sync_hw = i9xx_power_well_sync_hw_noop, 2870 .enable = vlv_power_well_enable, 2871 .disable = vlv_power_well_disable, 2872 .is_enabled = vlv_power_well_enabled, 2873 }; 2874 2875 static const struct i915_power_well_desc vlv_power_wells[] = { 2876 { 2877 .name = "always-on", 2878 .always_on = true, 2879 .domains = POWER_DOMAIN_MASK, 2880 .ops = &i9xx_always_on_power_well_ops, 2881 .id = DISP_PW_ID_NONE, 2882 }, 2883 { 2884 .name = "display", 2885 .domains = VLV_DISPLAY_POWER_DOMAINS, 2886 .ops = &vlv_display_power_well_ops, 2887 .id = VLV_DISP_PW_DISP2D, 2888 { 2889 .vlv.idx = PUNIT_PWGT_IDX_DISP2D, 2890 }, 2891 }, 2892 { 2893 .name = "dpio-tx-b-01", 2894 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2895 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2896 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2897 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2898 .ops = &vlv_dpio_power_well_ops, 2899 .id = DISP_PW_ID_NONE, 2900 { 2901 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, 2902 }, 2903 }, 2904 { 2905 .name = "dpio-tx-b-23", 2906 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2907 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2908 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2909 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2910 .ops = &vlv_dpio_power_well_ops, 2911 .id = DISP_PW_ID_NONE, 2912 { 2913 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, 2914 }, 2915 }, 2916 { 2917 .name = "dpio-tx-c-01", 2918 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2919 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2920 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2921 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2922 .ops = &vlv_dpio_power_well_ops, 2923 .id = DISP_PW_ID_NONE, 2924 { 2925 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, 2926 }, 2927 }, 2928 { 2929 .name = "dpio-tx-c-23", 2930 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2931 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2932 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2933 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2934 .ops = &vlv_dpio_power_well_ops, 2935 .id = DISP_PW_ID_NONE, 2936 { 2937 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, 2938 }, 2939 }, 2940 { 2941 .name = "dpio-common", 2942 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 2943 .ops = &vlv_dpio_cmn_power_well_ops, 2944 .id = VLV_DISP_PW_DPIO_CMN_BC, 2945 { 2946 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 2947 }, 2948 }, 2949 }; 2950 2951 static const struct i915_power_well_desc chv_power_wells[] = { 2952 { 2953 .name = "always-on", 2954 .always_on = true, 2955 .domains = POWER_DOMAIN_MASK, 2956 .ops = &i9xx_always_on_power_well_ops, 2957 .id = DISP_PW_ID_NONE, 2958 }, 2959 { 2960 .name = "display", 2961 /* 2962 * Pipe A power well is the new disp2d well. Pipe B and C 2963 * power wells don't actually exist. Pipe A power well is 2964 * required for any pipe to work. 2965 */ 2966 .domains = CHV_DISPLAY_POWER_DOMAINS, 2967 .ops = &chv_pipe_power_well_ops, 2968 .id = DISP_PW_ID_NONE, 2969 }, 2970 { 2971 .name = "dpio-common-bc", 2972 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 2973 .ops = &chv_dpio_cmn_power_well_ops, 2974 .id = VLV_DISP_PW_DPIO_CMN_BC, 2975 { 2976 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 2977 }, 2978 }, 2979 { 2980 .name = "dpio-common-d", 2981 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 2982 .ops = &chv_dpio_cmn_power_well_ops, 2983 .id = CHV_DISP_PW_DPIO_CMN_D, 2984 { 2985 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, 2986 }, 2987 }, 2988 }; 2989 2990 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 2991 enum i915_power_well_id power_well_id) 2992 { 2993 struct i915_power_well *power_well; 2994 bool ret; 2995 2996 power_well = lookup_power_well(dev_priv, power_well_id); 2997 ret = power_well->desc->ops->is_enabled(dev_priv, power_well); 2998 2999 return ret; 3000 } 3001 3002 static const struct i915_power_well_desc skl_power_wells[] = { 3003 { 3004 .name = "always-on", 3005 .always_on = true, 3006 .domains = POWER_DOMAIN_MASK, 3007 .ops = &i9xx_always_on_power_well_ops, 3008 .id = DISP_PW_ID_NONE, 3009 }, 3010 { 3011 .name = "power well 1", 3012 /* Handled by the DMC firmware */ 3013 .always_on = true, 3014 .domains = 0, 3015 .ops = &hsw_power_well_ops, 3016 .id = SKL_DISP_PW_1, 3017 { 3018 .hsw.regs = &hsw_power_well_regs, 3019 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3020 .hsw.has_fuses = true, 3021 }, 3022 }, 3023 { 3024 .name = "MISC IO power well", 3025 /* Handled by the DMC firmware */ 3026 .always_on = true, 3027 .domains = 0, 3028 .ops = &hsw_power_well_ops, 3029 .id = SKL_DISP_PW_MISC_IO, 3030 { 3031 .hsw.regs = &hsw_power_well_regs, 3032 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, 3033 }, 3034 }, 3035 { 3036 .name = "DC off", 3037 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 3038 .ops = &gen9_dc_off_power_well_ops, 3039 .id = SKL_DISP_DC_OFF, 3040 }, 3041 { 3042 .name = "power well 2", 3043 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3044 .ops = &hsw_power_well_ops, 3045 .id = SKL_DISP_PW_2, 3046 { 3047 .hsw.regs = &hsw_power_well_regs, 3048 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3049 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3050 .hsw.has_vga = true, 3051 .hsw.has_fuses = true, 3052 }, 3053 }, 3054 { 3055 .name = "DDI A/E IO power well", 3056 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, 3057 .ops = &hsw_power_well_ops, 3058 .id = DISP_PW_ID_NONE, 3059 { 3060 .hsw.regs = &hsw_power_well_regs, 3061 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, 3062 }, 3063 }, 3064 { 3065 .name = "DDI B IO power well", 3066 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3067 .ops = &hsw_power_well_ops, 3068 .id = DISP_PW_ID_NONE, 3069 { 3070 .hsw.regs = &hsw_power_well_regs, 3071 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3072 }, 3073 }, 3074 { 3075 .name = "DDI C IO power well", 3076 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3077 .ops = &hsw_power_well_ops, 3078 .id = DISP_PW_ID_NONE, 3079 { 3080 .hsw.regs = &hsw_power_well_regs, 3081 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3082 }, 3083 }, 3084 { 3085 .name = "DDI D IO power well", 3086 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, 3087 .ops = &hsw_power_well_ops, 3088 .id = DISP_PW_ID_NONE, 3089 { 3090 .hsw.regs = &hsw_power_well_regs, 3091 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3092 }, 3093 }, 3094 }; 3095 3096 static const struct i915_power_well_desc bxt_power_wells[] = { 3097 { 3098 .name = "always-on", 3099 .always_on = true, 3100 .domains = POWER_DOMAIN_MASK, 3101 .ops = &i9xx_always_on_power_well_ops, 3102 .id = DISP_PW_ID_NONE, 3103 }, 3104 { 3105 .name = "power well 1", 3106 /* Handled by the DMC firmware */ 3107 .always_on = true, 3108 .domains = 0, 3109 .ops = &hsw_power_well_ops, 3110 .id = SKL_DISP_PW_1, 3111 { 3112 .hsw.regs = &hsw_power_well_regs, 3113 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3114 .hsw.has_fuses = true, 3115 }, 3116 }, 3117 { 3118 .name = "DC off", 3119 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 3120 .ops = &gen9_dc_off_power_well_ops, 3121 .id = SKL_DISP_DC_OFF, 3122 }, 3123 { 3124 .name = "power well 2", 3125 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3126 .ops = &hsw_power_well_ops, 3127 .id = SKL_DISP_PW_2, 3128 { 3129 .hsw.regs = &hsw_power_well_regs, 3130 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3131 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3132 .hsw.has_vga = true, 3133 .hsw.has_fuses = true, 3134 }, 3135 }, 3136 { 3137 .name = "dpio-common-a", 3138 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 3139 .ops = &bxt_dpio_cmn_power_well_ops, 3140 .id = BXT_DISP_PW_DPIO_CMN_A, 3141 { 3142 .bxt.phy = DPIO_PHY1, 3143 }, 3144 }, 3145 { 3146 .name = "dpio-common-bc", 3147 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 3148 .ops = &bxt_dpio_cmn_power_well_ops, 3149 .id = VLV_DISP_PW_DPIO_CMN_BC, 3150 { 3151 .bxt.phy = DPIO_PHY0, 3152 }, 3153 }, 3154 }; 3155 3156 static const struct i915_power_well_desc glk_power_wells[] = { 3157 { 3158 .name = "always-on", 3159 .always_on = true, 3160 .domains = POWER_DOMAIN_MASK, 3161 .ops = &i9xx_always_on_power_well_ops, 3162 .id = DISP_PW_ID_NONE, 3163 }, 3164 { 3165 .name = "power well 1", 3166 /* Handled by the DMC firmware */ 3167 .always_on = true, 3168 .domains = 0, 3169 .ops = &hsw_power_well_ops, 3170 .id = SKL_DISP_PW_1, 3171 { 3172 .hsw.regs = &hsw_power_well_regs, 3173 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3174 .hsw.has_fuses = true, 3175 }, 3176 }, 3177 { 3178 .name = "DC off", 3179 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, 3180 .ops = &gen9_dc_off_power_well_ops, 3181 .id = SKL_DISP_DC_OFF, 3182 }, 3183 { 3184 .name = "power well 2", 3185 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3186 .ops = &hsw_power_well_ops, 3187 .id = SKL_DISP_PW_2, 3188 { 3189 .hsw.regs = &hsw_power_well_regs, 3190 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3191 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3192 .hsw.has_vga = true, 3193 .hsw.has_fuses = true, 3194 }, 3195 }, 3196 { 3197 .name = "dpio-common-a", 3198 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, 3199 .ops = &bxt_dpio_cmn_power_well_ops, 3200 .id = BXT_DISP_PW_DPIO_CMN_A, 3201 { 3202 .bxt.phy = DPIO_PHY1, 3203 }, 3204 }, 3205 { 3206 .name = "dpio-common-b", 3207 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, 3208 .ops = &bxt_dpio_cmn_power_well_ops, 3209 .id = VLV_DISP_PW_DPIO_CMN_BC, 3210 { 3211 .bxt.phy = DPIO_PHY0, 3212 }, 3213 }, 3214 { 3215 .name = "dpio-common-c", 3216 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, 3217 .ops = &bxt_dpio_cmn_power_well_ops, 3218 .id = GLK_DISP_PW_DPIO_CMN_C, 3219 { 3220 .bxt.phy = DPIO_PHY2, 3221 }, 3222 }, 3223 { 3224 .name = "AUX A", 3225 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, 3226 .ops = &hsw_power_well_ops, 3227 .id = DISP_PW_ID_NONE, 3228 { 3229 .hsw.regs = &hsw_power_well_regs, 3230 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3231 }, 3232 }, 3233 { 3234 .name = "AUX B", 3235 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, 3236 .ops = &hsw_power_well_ops, 3237 .id = DISP_PW_ID_NONE, 3238 { 3239 .hsw.regs = &hsw_power_well_regs, 3240 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3241 }, 3242 }, 3243 { 3244 .name = "AUX C", 3245 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, 3246 .ops = &hsw_power_well_ops, 3247 .id = DISP_PW_ID_NONE, 3248 { 3249 .hsw.regs = &hsw_power_well_regs, 3250 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3251 }, 3252 }, 3253 { 3254 .name = "DDI A IO power well", 3255 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, 3256 .ops = &hsw_power_well_ops, 3257 .id = DISP_PW_ID_NONE, 3258 { 3259 .hsw.regs = &hsw_power_well_regs, 3260 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3261 }, 3262 }, 3263 { 3264 .name = "DDI B IO power well", 3265 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3266 .ops = &hsw_power_well_ops, 3267 .id = DISP_PW_ID_NONE, 3268 { 3269 .hsw.regs = &hsw_power_well_regs, 3270 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3271 }, 3272 }, 3273 { 3274 .name = "DDI C IO power well", 3275 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3276 .ops = &hsw_power_well_ops, 3277 .id = DISP_PW_ID_NONE, 3278 { 3279 .hsw.regs = &hsw_power_well_regs, 3280 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3281 }, 3282 }, 3283 }; 3284 3285 static const struct i915_power_well_desc cnl_power_wells[] = { 3286 { 3287 .name = "always-on", 3288 .always_on = true, 3289 .domains = POWER_DOMAIN_MASK, 3290 .ops = &i9xx_always_on_power_well_ops, 3291 .id = DISP_PW_ID_NONE, 3292 }, 3293 { 3294 .name = "power well 1", 3295 /* Handled by the DMC firmware */ 3296 .always_on = true, 3297 .domains = 0, 3298 .ops = &hsw_power_well_ops, 3299 .id = SKL_DISP_PW_1, 3300 { 3301 .hsw.regs = &hsw_power_well_regs, 3302 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3303 .hsw.has_fuses = true, 3304 }, 3305 }, 3306 { 3307 .name = "AUX A", 3308 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS, 3309 .ops = &hsw_power_well_ops, 3310 .id = DISP_PW_ID_NONE, 3311 { 3312 .hsw.regs = &hsw_power_well_regs, 3313 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3314 }, 3315 }, 3316 { 3317 .name = "AUX B", 3318 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS, 3319 .ops = &hsw_power_well_ops, 3320 .id = DISP_PW_ID_NONE, 3321 { 3322 .hsw.regs = &hsw_power_well_regs, 3323 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3324 }, 3325 }, 3326 { 3327 .name = "AUX C", 3328 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS, 3329 .ops = &hsw_power_well_ops, 3330 .id = DISP_PW_ID_NONE, 3331 { 3332 .hsw.regs = &hsw_power_well_regs, 3333 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3334 }, 3335 }, 3336 { 3337 .name = "AUX D", 3338 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS, 3339 .ops = &hsw_power_well_ops, 3340 .id = DISP_PW_ID_NONE, 3341 { 3342 .hsw.regs = &hsw_power_well_regs, 3343 .hsw.idx = CNL_PW_CTL_IDX_AUX_D, 3344 }, 3345 }, 3346 { 3347 .name = "DC off", 3348 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, 3349 .ops = &gen9_dc_off_power_well_ops, 3350 .id = SKL_DISP_DC_OFF, 3351 }, 3352 { 3353 .name = "power well 2", 3354 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3355 .ops = &hsw_power_well_ops, 3356 .id = SKL_DISP_PW_2, 3357 { 3358 .hsw.regs = &hsw_power_well_regs, 3359 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3360 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3361 .hsw.has_vga = true, 3362 .hsw.has_fuses = true, 3363 }, 3364 }, 3365 { 3366 .name = "DDI A IO power well", 3367 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS, 3368 .ops = &hsw_power_well_ops, 3369 .id = DISP_PW_ID_NONE, 3370 { 3371 .hsw.regs = &hsw_power_well_regs, 3372 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3373 }, 3374 }, 3375 { 3376 .name = "DDI B IO power well", 3377 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS, 3378 .ops = &hsw_power_well_ops, 3379 .id = DISP_PW_ID_NONE, 3380 { 3381 .hsw.regs = &hsw_power_well_regs, 3382 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3383 }, 3384 }, 3385 { 3386 .name = "DDI C IO power well", 3387 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS, 3388 .ops = &hsw_power_well_ops, 3389 .id = DISP_PW_ID_NONE, 3390 { 3391 .hsw.regs = &hsw_power_well_regs, 3392 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3393 }, 3394 }, 3395 { 3396 .name = "DDI D IO power well", 3397 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS, 3398 .ops = &hsw_power_well_ops, 3399 .id = DISP_PW_ID_NONE, 3400 { 3401 .hsw.regs = &hsw_power_well_regs, 3402 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3403 }, 3404 }, 3405 { 3406 .name = "DDI F IO power well", 3407 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS, 3408 .ops = &hsw_power_well_ops, 3409 .id = DISP_PW_ID_NONE, 3410 { 3411 .hsw.regs = &hsw_power_well_regs, 3412 .hsw.idx = CNL_PW_CTL_IDX_DDI_F, 3413 }, 3414 }, 3415 { 3416 .name = "AUX F", 3417 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS, 3418 .ops = &hsw_power_well_ops, 3419 .id = DISP_PW_ID_NONE, 3420 { 3421 .hsw.regs = &hsw_power_well_regs, 3422 .hsw.idx = CNL_PW_CTL_IDX_AUX_F, 3423 }, 3424 }, 3425 }; 3426 3427 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { 3428 .sync_hw = hsw_power_well_sync_hw, 3429 .enable = icl_combo_phy_aux_power_well_enable, 3430 .disable = icl_combo_phy_aux_power_well_disable, 3431 .is_enabled = hsw_power_well_enabled, 3432 }; 3433 3434 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = { 3435 .sync_hw = hsw_power_well_sync_hw, 3436 .enable = icl_tc_phy_aux_power_well_enable, 3437 .disable = icl_tc_phy_aux_power_well_disable, 3438 .is_enabled = hsw_power_well_enabled, 3439 }; 3440 3441 static const struct i915_power_well_regs icl_aux_power_well_regs = { 3442 .bios = ICL_PWR_WELL_CTL_AUX1, 3443 .driver = ICL_PWR_WELL_CTL_AUX2, 3444 .debug = ICL_PWR_WELL_CTL_AUX4, 3445 }; 3446 3447 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 3448 .bios = ICL_PWR_WELL_CTL_DDI1, 3449 .driver = ICL_PWR_WELL_CTL_DDI2, 3450 .debug = ICL_PWR_WELL_CTL_DDI4, 3451 }; 3452 3453 static const struct i915_power_well_desc icl_power_wells[] = { 3454 { 3455 .name = "always-on", 3456 .always_on = true, 3457 .domains = POWER_DOMAIN_MASK, 3458 .ops = &i9xx_always_on_power_well_ops, 3459 .id = DISP_PW_ID_NONE, 3460 }, 3461 { 3462 .name = "power well 1", 3463 /* Handled by the DMC firmware */ 3464 .always_on = true, 3465 .domains = 0, 3466 .ops = &hsw_power_well_ops, 3467 .id = SKL_DISP_PW_1, 3468 { 3469 .hsw.regs = &hsw_power_well_regs, 3470 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3471 .hsw.has_fuses = true, 3472 }, 3473 }, 3474 { 3475 .name = "DC off", 3476 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, 3477 .ops = &gen9_dc_off_power_well_ops, 3478 .id = SKL_DISP_DC_OFF, 3479 }, 3480 { 3481 .name = "power well 2", 3482 .domains = ICL_PW_2_POWER_DOMAINS, 3483 .ops = &hsw_power_well_ops, 3484 .id = SKL_DISP_PW_2, 3485 { 3486 .hsw.regs = &hsw_power_well_regs, 3487 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3488 .hsw.has_fuses = true, 3489 }, 3490 }, 3491 { 3492 .name = "power well 3", 3493 .domains = ICL_PW_3_POWER_DOMAINS, 3494 .ops = &hsw_power_well_ops, 3495 .id = DISP_PW_ID_NONE, 3496 { 3497 .hsw.regs = &hsw_power_well_regs, 3498 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3499 .hsw.irq_pipe_mask = BIT(PIPE_B), 3500 .hsw.has_vga = true, 3501 .hsw.has_fuses = true, 3502 }, 3503 }, 3504 { 3505 .name = "DDI A IO", 3506 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3507 .ops = &hsw_power_well_ops, 3508 .id = DISP_PW_ID_NONE, 3509 { 3510 .hsw.regs = &icl_ddi_power_well_regs, 3511 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3512 }, 3513 }, 3514 { 3515 .name = "DDI B IO", 3516 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3517 .ops = &hsw_power_well_ops, 3518 .id = DISP_PW_ID_NONE, 3519 { 3520 .hsw.regs = &icl_ddi_power_well_regs, 3521 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3522 }, 3523 }, 3524 { 3525 .name = "DDI C IO", 3526 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3527 .ops = &hsw_power_well_ops, 3528 .id = DISP_PW_ID_NONE, 3529 { 3530 .hsw.regs = &icl_ddi_power_well_regs, 3531 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3532 }, 3533 }, 3534 { 3535 .name = "DDI D IO", 3536 .domains = ICL_DDI_IO_D_POWER_DOMAINS, 3537 .ops = &hsw_power_well_ops, 3538 .id = DISP_PW_ID_NONE, 3539 { 3540 .hsw.regs = &icl_ddi_power_well_regs, 3541 .hsw.idx = ICL_PW_CTL_IDX_DDI_D, 3542 }, 3543 }, 3544 { 3545 .name = "DDI E IO", 3546 .domains = ICL_DDI_IO_E_POWER_DOMAINS, 3547 .ops = &hsw_power_well_ops, 3548 .id = DISP_PW_ID_NONE, 3549 { 3550 .hsw.regs = &icl_ddi_power_well_regs, 3551 .hsw.idx = ICL_PW_CTL_IDX_DDI_E, 3552 }, 3553 }, 3554 { 3555 .name = "DDI F IO", 3556 .domains = ICL_DDI_IO_F_POWER_DOMAINS, 3557 .ops = &hsw_power_well_ops, 3558 .id = DISP_PW_ID_NONE, 3559 { 3560 .hsw.regs = &icl_ddi_power_well_regs, 3561 .hsw.idx = ICL_PW_CTL_IDX_DDI_F, 3562 }, 3563 }, 3564 { 3565 .name = "AUX A", 3566 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 3567 .ops = &icl_combo_phy_aux_power_well_ops, 3568 .id = DISP_PW_ID_NONE, 3569 { 3570 .hsw.regs = &icl_aux_power_well_regs, 3571 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3572 }, 3573 }, 3574 { 3575 .name = "AUX B", 3576 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 3577 .ops = &icl_combo_phy_aux_power_well_ops, 3578 .id = DISP_PW_ID_NONE, 3579 { 3580 .hsw.regs = &icl_aux_power_well_regs, 3581 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3582 }, 3583 }, 3584 { 3585 .name = "AUX C TC1", 3586 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, 3587 .ops = &icl_tc_phy_aux_power_well_ops, 3588 .id = DISP_PW_ID_NONE, 3589 { 3590 .hsw.regs = &icl_aux_power_well_regs, 3591 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3592 .hsw.is_tc_tbt = false, 3593 }, 3594 }, 3595 { 3596 .name = "AUX D TC2", 3597 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, 3598 .ops = &icl_tc_phy_aux_power_well_ops, 3599 .id = DISP_PW_ID_NONE, 3600 { 3601 .hsw.regs = &icl_aux_power_well_regs, 3602 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 3603 .hsw.is_tc_tbt = false, 3604 }, 3605 }, 3606 { 3607 .name = "AUX E TC3", 3608 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, 3609 .ops = &icl_tc_phy_aux_power_well_ops, 3610 .id = DISP_PW_ID_NONE, 3611 { 3612 .hsw.regs = &icl_aux_power_well_regs, 3613 .hsw.idx = ICL_PW_CTL_IDX_AUX_E, 3614 .hsw.is_tc_tbt = false, 3615 }, 3616 }, 3617 { 3618 .name = "AUX F TC4", 3619 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, 3620 .ops = &icl_tc_phy_aux_power_well_ops, 3621 .id = DISP_PW_ID_NONE, 3622 { 3623 .hsw.regs = &icl_aux_power_well_regs, 3624 .hsw.idx = ICL_PW_CTL_IDX_AUX_F, 3625 .hsw.is_tc_tbt = false, 3626 }, 3627 }, 3628 { 3629 .name = "AUX C TBT1", 3630 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, 3631 .ops = &icl_tc_phy_aux_power_well_ops, 3632 .id = DISP_PW_ID_NONE, 3633 { 3634 .hsw.regs = &icl_aux_power_well_regs, 3635 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, 3636 .hsw.is_tc_tbt = true, 3637 }, 3638 }, 3639 { 3640 .name = "AUX D TBT2", 3641 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, 3642 .ops = &icl_tc_phy_aux_power_well_ops, 3643 .id = DISP_PW_ID_NONE, 3644 { 3645 .hsw.regs = &icl_aux_power_well_regs, 3646 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, 3647 .hsw.is_tc_tbt = true, 3648 }, 3649 }, 3650 { 3651 .name = "AUX E TBT3", 3652 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, 3653 .ops = &icl_tc_phy_aux_power_well_ops, 3654 .id = DISP_PW_ID_NONE, 3655 { 3656 .hsw.regs = &icl_aux_power_well_regs, 3657 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, 3658 .hsw.is_tc_tbt = true, 3659 }, 3660 }, 3661 { 3662 .name = "AUX F TBT4", 3663 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, 3664 .ops = &icl_tc_phy_aux_power_well_ops, 3665 .id = DISP_PW_ID_NONE, 3666 { 3667 .hsw.regs = &icl_aux_power_well_regs, 3668 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, 3669 .hsw.is_tc_tbt = true, 3670 }, 3671 }, 3672 { 3673 .name = "power well 4", 3674 .domains = ICL_PW_4_POWER_DOMAINS, 3675 .ops = &hsw_power_well_ops, 3676 .id = DISP_PW_ID_NONE, 3677 { 3678 .hsw.regs = &hsw_power_well_regs, 3679 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 3680 .hsw.has_fuses = true, 3681 .hsw.irq_pipe_mask = BIT(PIPE_C), 3682 }, 3683 }, 3684 }; 3685 3686 static const struct i915_power_well_desc ehl_power_wells[] = { 3687 { 3688 .name = "always-on", 3689 .always_on = true, 3690 .domains = POWER_DOMAIN_MASK, 3691 .ops = &i9xx_always_on_power_well_ops, 3692 .id = DISP_PW_ID_NONE, 3693 }, 3694 { 3695 .name = "power well 1", 3696 /* Handled by the DMC firmware */ 3697 .always_on = true, 3698 .domains = 0, 3699 .ops = &hsw_power_well_ops, 3700 .id = SKL_DISP_PW_1, 3701 { 3702 .hsw.regs = &hsw_power_well_regs, 3703 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3704 .hsw.has_fuses = true, 3705 }, 3706 }, 3707 { 3708 .name = "DC off", 3709 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, 3710 .ops = &gen9_dc_off_power_well_ops, 3711 .id = SKL_DISP_DC_OFF, 3712 }, 3713 { 3714 .name = "power well 2", 3715 .domains = ICL_PW_2_POWER_DOMAINS, 3716 .ops = &hsw_power_well_ops, 3717 .id = SKL_DISP_PW_2, 3718 { 3719 .hsw.regs = &hsw_power_well_regs, 3720 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3721 .hsw.has_fuses = true, 3722 }, 3723 }, 3724 { 3725 .name = "power well 3", 3726 .domains = ICL_PW_3_POWER_DOMAINS, 3727 .ops = &hsw_power_well_ops, 3728 .id = DISP_PW_ID_NONE, 3729 { 3730 .hsw.regs = &hsw_power_well_regs, 3731 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3732 .hsw.irq_pipe_mask = BIT(PIPE_B), 3733 .hsw.has_vga = true, 3734 .hsw.has_fuses = true, 3735 }, 3736 }, 3737 { 3738 .name = "DDI A IO", 3739 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3740 .ops = &hsw_power_well_ops, 3741 .id = DISP_PW_ID_NONE, 3742 { 3743 .hsw.regs = &icl_ddi_power_well_regs, 3744 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3745 }, 3746 }, 3747 { 3748 .name = "DDI B IO", 3749 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3750 .ops = &hsw_power_well_ops, 3751 .id = DISP_PW_ID_NONE, 3752 { 3753 .hsw.regs = &icl_ddi_power_well_regs, 3754 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3755 }, 3756 }, 3757 { 3758 .name = "DDI C IO", 3759 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3760 .ops = &hsw_power_well_ops, 3761 .id = DISP_PW_ID_NONE, 3762 { 3763 .hsw.regs = &icl_ddi_power_well_regs, 3764 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3765 }, 3766 }, 3767 { 3768 .name = "DDI D IO", 3769 .domains = ICL_DDI_IO_D_POWER_DOMAINS, 3770 .ops = &hsw_power_well_ops, 3771 .id = DISP_PW_ID_NONE, 3772 { 3773 .hsw.regs = &icl_ddi_power_well_regs, 3774 .hsw.idx = ICL_PW_CTL_IDX_DDI_D, 3775 }, 3776 }, 3777 { 3778 .name = "AUX A", 3779 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 3780 .ops = &hsw_power_well_ops, 3781 .id = DISP_PW_ID_NONE, 3782 { 3783 .hsw.regs = &icl_aux_power_well_regs, 3784 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3785 }, 3786 }, 3787 { 3788 .name = "AUX B", 3789 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 3790 .ops = &hsw_power_well_ops, 3791 .id = DISP_PW_ID_NONE, 3792 { 3793 .hsw.regs = &icl_aux_power_well_regs, 3794 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3795 }, 3796 }, 3797 { 3798 .name = "AUX C", 3799 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, 3800 .ops = &hsw_power_well_ops, 3801 .id = DISP_PW_ID_NONE, 3802 { 3803 .hsw.regs = &icl_aux_power_well_regs, 3804 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3805 }, 3806 }, 3807 { 3808 .name = "AUX D", 3809 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, 3810 .ops = &hsw_power_well_ops, 3811 .id = DISP_PW_ID_NONE, 3812 { 3813 .hsw.regs = &icl_aux_power_well_regs, 3814 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 3815 }, 3816 }, 3817 { 3818 .name = "power well 4", 3819 .domains = ICL_PW_4_POWER_DOMAINS, 3820 .ops = &hsw_power_well_ops, 3821 .id = DISP_PW_ID_NONE, 3822 { 3823 .hsw.regs = &hsw_power_well_regs, 3824 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 3825 .hsw.has_fuses = true, 3826 .hsw.irq_pipe_mask = BIT(PIPE_C), 3827 }, 3828 }, 3829 }; 3830 3831 static const struct i915_power_well_desc tgl_power_wells[] = { 3832 { 3833 .name = "always-on", 3834 .always_on = true, 3835 .domains = POWER_DOMAIN_MASK, 3836 .ops = &i9xx_always_on_power_well_ops, 3837 .id = DISP_PW_ID_NONE, 3838 }, 3839 { 3840 .name = "power well 1", 3841 /* Handled by the DMC firmware */ 3842 .always_on = true, 3843 .domains = 0, 3844 .ops = &hsw_power_well_ops, 3845 .id = SKL_DISP_PW_1, 3846 { 3847 .hsw.regs = &hsw_power_well_regs, 3848 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3849 .hsw.has_fuses = true, 3850 }, 3851 }, 3852 { 3853 .name = "DC off", 3854 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, 3855 .ops = &gen9_dc_off_power_well_ops, 3856 .id = SKL_DISP_DC_OFF, 3857 }, 3858 { 3859 .name = "power well 2", 3860 .domains = TGL_PW_2_POWER_DOMAINS, 3861 .ops = &hsw_power_well_ops, 3862 .id = SKL_DISP_PW_2, 3863 { 3864 .hsw.regs = &hsw_power_well_regs, 3865 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3866 .hsw.has_fuses = true, 3867 }, 3868 }, 3869 { 3870 .name = "power well 3", 3871 .domains = TGL_PW_3_POWER_DOMAINS, 3872 .ops = &hsw_power_well_ops, 3873 .id = DISP_PW_ID_NONE, 3874 { 3875 .hsw.regs = &hsw_power_well_regs, 3876 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3877 .hsw.irq_pipe_mask = BIT(PIPE_B), 3878 .hsw.has_vga = true, 3879 .hsw.has_fuses = true, 3880 }, 3881 }, 3882 { 3883 .name = "DDI A IO", 3884 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3885 .ops = &hsw_power_well_ops, 3886 .id = DISP_PW_ID_NONE, 3887 { 3888 .hsw.regs = &icl_ddi_power_well_regs, 3889 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3890 } 3891 }, 3892 { 3893 .name = "DDI B IO", 3894 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3895 .ops = &hsw_power_well_ops, 3896 .id = DISP_PW_ID_NONE, 3897 { 3898 .hsw.regs = &icl_ddi_power_well_regs, 3899 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3900 } 3901 }, 3902 { 3903 .name = "DDI C IO", 3904 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3905 .ops = &hsw_power_well_ops, 3906 .id = DISP_PW_ID_NONE, 3907 { 3908 .hsw.regs = &icl_ddi_power_well_regs, 3909 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3910 } 3911 }, 3912 { 3913 .name = "DDI D TC1 IO", 3914 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS, 3915 .ops = &hsw_power_well_ops, 3916 .id = DISP_PW_ID_NONE, 3917 { 3918 .hsw.regs = &icl_ddi_power_well_regs, 3919 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 3920 }, 3921 }, 3922 { 3923 .name = "DDI E TC2 IO", 3924 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS, 3925 .ops = &hsw_power_well_ops, 3926 .id = DISP_PW_ID_NONE, 3927 { 3928 .hsw.regs = &icl_ddi_power_well_regs, 3929 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 3930 }, 3931 }, 3932 { 3933 .name = "DDI F TC3 IO", 3934 .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS, 3935 .ops = &hsw_power_well_ops, 3936 .id = DISP_PW_ID_NONE, 3937 { 3938 .hsw.regs = &icl_ddi_power_well_regs, 3939 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 3940 }, 3941 }, 3942 { 3943 .name = "DDI G TC4 IO", 3944 .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS, 3945 .ops = &hsw_power_well_ops, 3946 .id = DISP_PW_ID_NONE, 3947 { 3948 .hsw.regs = &icl_ddi_power_well_regs, 3949 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 3950 }, 3951 }, 3952 { 3953 .name = "DDI H TC5 IO", 3954 .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS, 3955 .ops = &hsw_power_well_ops, 3956 .id = DISP_PW_ID_NONE, 3957 { 3958 .hsw.regs = &icl_ddi_power_well_regs, 3959 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, 3960 }, 3961 }, 3962 { 3963 .name = "DDI I TC6 IO", 3964 .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS, 3965 .ops = &hsw_power_well_ops, 3966 .id = DISP_PW_ID_NONE, 3967 { 3968 .hsw.regs = &icl_ddi_power_well_regs, 3969 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, 3970 }, 3971 }, 3972 { 3973 .name = "AUX A", 3974 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 3975 .ops = &hsw_power_well_ops, 3976 .id = DISP_PW_ID_NONE, 3977 { 3978 .hsw.regs = &icl_aux_power_well_regs, 3979 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3980 }, 3981 }, 3982 { 3983 .name = "AUX B", 3984 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 3985 .ops = &hsw_power_well_ops, 3986 .id = DISP_PW_ID_NONE, 3987 { 3988 .hsw.regs = &icl_aux_power_well_regs, 3989 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3990 }, 3991 }, 3992 { 3993 .name = "AUX C", 3994 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 3995 .ops = &hsw_power_well_ops, 3996 .id = DISP_PW_ID_NONE, 3997 { 3998 .hsw.regs = &icl_aux_power_well_regs, 3999 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4000 }, 4001 }, 4002 { 4003 .name = "AUX D TC1", 4004 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS, 4005 .ops = &icl_tc_phy_aux_power_well_ops, 4006 .id = DISP_PW_ID_NONE, 4007 { 4008 .hsw.regs = &icl_aux_power_well_regs, 4009 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4010 .hsw.is_tc_tbt = false, 4011 }, 4012 }, 4013 { 4014 .name = "AUX E TC2", 4015 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS, 4016 .ops = &icl_tc_phy_aux_power_well_ops, 4017 .id = DISP_PW_ID_NONE, 4018 { 4019 .hsw.regs = &icl_aux_power_well_regs, 4020 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4021 .hsw.is_tc_tbt = false, 4022 }, 4023 }, 4024 { 4025 .name = "AUX F TC3", 4026 .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS, 4027 .ops = &icl_tc_phy_aux_power_well_ops, 4028 .id = DISP_PW_ID_NONE, 4029 { 4030 .hsw.regs = &icl_aux_power_well_regs, 4031 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 4032 .hsw.is_tc_tbt = false, 4033 }, 4034 }, 4035 { 4036 .name = "AUX G TC4", 4037 .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS, 4038 .ops = &icl_tc_phy_aux_power_well_ops, 4039 .id = DISP_PW_ID_NONE, 4040 { 4041 .hsw.regs = &icl_aux_power_well_regs, 4042 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 4043 .hsw.is_tc_tbt = false, 4044 }, 4045 }, 4046 { 4047 .name = "AUX H TC5", 4048 .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS, 4049 .ops = &icl_tc_phy_aux_power_well_ops, 4050 .id = DISP_PW_ID_NONE, 4051 { 4052 .hsw.regs = &icl_aux_power_well_regs, 4053 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, 4054 .hsw.is_tc_tbt = false, 4055 }, 4056 }, 4057 { 4058 .name = "AUX I TC6", 4059 .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS, 4060 .ops = &icl_tc_phy_aux_power_well_ops, 4061 .id = DISP_PW_ID_NONE, 4062 { 4063 .hsw.regs = &icl_aux_power_well_regs, 4064 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, 4065 .hsw.is_tc_tbt = false, 4066 }, 4067 }, 4068 { 4069 .name = "AUX D TBT1", 4070 .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS, 4071 .ops = &hsw_power_well_ops, 4072 .id = DISP_PW_ID_NONE, 4073 { 4074 .hsw.regs = &icl_aux_power_well_regs, 4075 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 4076 .hsw.is_tc_tbt = true, 4077 }, 4078 }, 4079 { 4080 .name = "AUX E TBT2", 4081 .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS, 4082 .ops = &hsw_power_well_ops, 4083 .id = DISP_PW_ID_NONE, 4084 { 4085 .hsw.regs = &icl_aux_power_well_regs, 4086 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 4087 .hsw.is_tc_tbt = true, 4088 }, 4089 }, 4090 { 4091 .name = "AUX F TBT3", 4092 .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS, 4093 .ops = &hsw_power_well_ops, 4094 .id = DISP_PW_ID_NONE, 4095 { 4096 .hsw.regs = &icl_aux_power_well_regs, 4097 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 4098 .hsw.is_tc_tbt = true, 4099 }, 4100 }, 4101 { 4102 .name = "AUX G TBT4", 4103 .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS, 4104 .ops = &hsw_power_well_ops, 4105 .id = DISP_PW_ID_NONE, 4106 { 4107 .hsw.regs = &icl_aux_power_well_regs, 4108 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 4109 .hsw.is_tc_tbt = true, 4110 }, 4111 }, 4112 { 4113 .name = "AUX H TBT5", 4114 .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS, 4115 .ops = &hsw_power_well_ops, 4116 .id = DISP_PW_ID_NONE, 4117 { 4118 .hsw.regs = &icl_aux_power_well_regs, 4119 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, 4120 .hsw.is_tc_tbt = true, 4121 }, 4122 }, 4123 { 4124 .name = "AUX I TBT6", 4125 .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS, 4126 .ops = &hsw_power_well_ops, 4127 .id = DISP_PW_ID_NONE, 4128 { 4129 .hsw.regs = &icl_aux_power_well_regs, 4130 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, 4131 .hsw.is_tc_tbt = true, 4132 }, 4133 }, 4134 { 4135 .name = "power well 4", 4136 .domains = TGL_PW_4_POWER_DOMAINS, 4137 .ops = &hsw_power_well_ops, 4138 .id = DISP_PW_ID_NONE, 4139 { 4140 .hsw.regs = &hsw_power_well_regs, 4141 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4142 .hsw.has_fuses = true, 4143 .hsw.irq_pipe_mask = BIT(PIPE_C), 4144 } 4145 }, 4146 { 4147 .name = "power well 5", 4148 .domains = TGL_PW_5_POWER_DOMAINS, 4149 .ops = &hsw_power_well_ops, 4150 .id = DISP_PW_ID_NONE, 4151 { 4152 .hsw.regs = &hsw_power_well_regs, 4153 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4154 .hsw.has_fuses = true, 4155 .hsw.irq_pipe_mask = BIT(PIPE_D), 4156 }, 4157 }, 4158 }; 4159 4160 static int 4161 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 4162 int disable_power_well) 4163 { 4164 if (disable_power_well >= 0) 4165 return !!disable_power_well; 4166 4167 return 1; 4168 } 4169 4170 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 4171 int enable_dc) 4172 { 4173 u32 mask; 4174 int requested_dc; 4175 int max_dc; 4176 4177 if (INTEL_GEN(dev_priv) >= 12) { 4178 max_dc = 4; 4179 /* 4180 * DC9 has a separate HW flow from the rest of the DC states, 4181 * not depending on the DMC firmware. It's needed by system 4182 * suspend/resume, so allow it unconditionally. 4183 */ 4184 mask = DC_STATE_EN_DC9; 4185 } else if (IS_GEN(dev_priv, 11)) { 4186 max_dc = 2; 4187 mask = DC_STATE_EN_DC9; 4188 } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) { 4189 max_dc = 2; 4190 mask = 0; 4191 } else if (IS_GEN9_LP(dev_priv)) { 4192 max_dc = 1; 4193 mask = DC_STATE_EN_DC9; 4194 } else { 4195 max_dc = 0; 4196 mask = 0; 4197 } 4198 4199 if (!i915_modparams.disable_power_well) 4200 max_dc = 0; 4201 4202 if (enable_dc >= 0 && enable_dc <= max_dc) { 4203 requested_dc = enable_dc; 4204 } else if (enable_dc == -1) { 4205 requested_dc = max_dc; 4206 } else if (enable_dc > max_dc && enable_dc <= 4) { 4207 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n", 4208 enable_dc, max_dc); 4209 requested_dc = max_dc; 4210 } else { 4211 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc); 4212 requested_dc = max_dc; 4213 } 4214 4215 switch (requested_dc) { 4216 case 4: 4217 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 4218 break; 4219 case 3: 4220 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 4221 break; 4222 case 2: 4223 mask |= DC_STATE_EN_UPTO_DC6; 4224 break; 4225 case 1: 4226 mask |= DC_STATE_EN_UPTO_DC5; 4227 break; 4228 } 4229 4230 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask); 4231 4232 return mask; 4233 } 4234 4235 static int 4236 __set_power_wells(struct i915_power_domains *power_domains, 4237 const struct i915_power_well_desc *power_well_descs, 4238 int power_well_count) 4239 { 4240 u64 power_well_ids = 0; 4241 int i; 4242 4243 power_domains->power_well_count = power_well_count; 4244 power_domains->power_wells = 4245 kcalloc(power_well_count, 4246 sizeof(*power_domains->power_wells), 4247 GFP_KERNEL); 4248 if (!power_domains->power_wells) 4249 return -ENOMEM; 4250 4251 for (i = 0; i < power_well_count; i++) { 4252 enum i915_power_well_id id = power_well_descs[i].id; 4253 4254 power_domains->power_wells[i].desc = &power_well_descs[i]; 4255 4256 if (id == DISP_PW_ID_NONE) 4257 continue; 4258 4259 WARN_ON(id >= sizeof(power_well_ids) * 8); 4260 WARN_ON(power_well_ids & BIT_ULL(id)); 4261 power_well_ids |= BIT_ULL(id); 4262 } 4263 4264 return 0; 4265 } 4266 4267 #define set_power_wells(power_domains, __power_well_descs) \ 4268 __set_power_wells(power_domains, __power_well_descs, \ 4269 ARRAY_SIZE(__power_well_descs)) 4270 4271 /** 4272 * intel_power_domains_init - initializes the power domain structures 4273 * @dev_priv: i915 device instance 4274 * 4275 * Initializes the power domain structures for @dev_priv depending upon the 4276 * supported platform. 4277 */ 4278 int intel_power_domains_init(struct drm_i915_private *dev_priv) 4279 { 4280 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4281 int err; 4282 4283 i915_modparams.disable_power_well = 4284 sanitize_disable_power_well_option(dev_priv, 4285 i915_modparams.disable_power_well); 4286 dev_priv->csr.allowed_dc_mask = 4287 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc); 4288 4289 dev_priv->csr.target_dc_state = 4290 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 4291 4292 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); 4293 4294 mutex_init(&power_domains->lock); 4295 4296 INIT_DELAYED_WORK(&power_domains->async_put_work, 4297 intel_display_power_put_async_work); 4298 4299 /* 4300 * The enabling order will be from lower to higher indexed wells, 4301 * the disabling order is reversed. 4302 */ 4303 if (IS_GEN(dev_priv, 12)) { 4304 err = set_power_wells(power_domains, tgl_power_wells); 4305 } else if (IS_ELKHARTLAKE(dev_priv)) { 4306 err = set_power_wells(power_domains, ehl_power_wells); 4307 } else if (IS_GEN(dev_priv, 11)) { 4308 err = set_power_wells(power_domains, icl_power_wells); 4309 } else if (IS_CANNONLAKE(dev_priv)) { 4310 err = set_power_wells(power_domains, cnl_power_wells); 4311 4312 /* 4313 * DDI and Aux IO are getting enabled for all ports 4314 * regardless the presence or use. So, in order to avoid 4315 * timeouts, lets remove them from the list 4316 * for the SKUs without port F. 4317 */ 4318 if (!IS_CNL_WITH_PORT_F(dev_priv)) 4319 power_domains->power_well_count -= 2; 4320 } else if (IS_GEMINILAKE(dev_priv)) { 4321 err = set_power_wells(power_domains, glk_power_wells); 4322 } else if (IS_BROXTON(dev_priv)) { 4323 err = set_power_wells(power_domains, bxt_power_wells); 4324 } else if (IS_GEN9_BC(dev_priv)) { 4325 err = set_power_wells(power_domains, skl_power_wells); 4326 } else if (IS_CHERRYVIEW(dev_priv)) { 4327 err = set_power_wells(power_domains, chv_power_wells); 4328 } else if (IS_BROADWELL(dev_priv)) { 4329 err = set_power_wells(power_domains, bdw_power_wells); 4330 } else if (IS_HASWELL(dev_priv)) { 4331 err = set_power_wells(power_domains, hsw_power_wells); 4332 } else if (IS_VALLEYVIEW(dev_priv)) { 4333 err = set_power_wells(power_domains, vlv_power_wells); 4334 } else if (IS_I830(dev_priv)) { 4335 err = set_power_wells(power_domains, i830_power_wells); 4336 } else { 4337 err = set_power_wells(power_domains, i9xx_always_on_power_well); 4338 } 4339 4340 return err; 4341 } 4342 4343 /** 4344 * intel_power_domains_cleanup - clean up power domains resources 4345 * @dev_priv: i915 device instance 4346 * 4347 * Release any resources acquired by intel_power_domains_init() 4348 */ 4349 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 4350 { 4351 kfree(dev_priv->power_domains.power_wells); 4352 } 4353 4354 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 4355 { 4356 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4357 struct i915_power_well *power_well; 4358 4359 mutex_lock(&power_domains->lock); 4360 for_each_power_well(dev_priv, power_well) { 4361 power_well->desc->ops->sync_hw(dev_priv, power_well); 4362 power_well->hw_enabled = 4363 power_well->desc->ops->is_enabled(dev_priv, power_well); 4364 } 4365 mutex_unlock(&power_domains->lock); 4366 } 4367 4368 static inline 4369 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv, 4370 i915_reg_t reg, bool enable) 4371 { 4372 u32 val, status; 4373 4374 val = I915_READ(reg); 4375 val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST); 4376 I915_WRITE(reg, val); 4377 POSTING_READ(reg); 4378 udelay(10); 4379 4380 status = I915_READ(reg) & DBUF_POWER_STATE; 4381 if ((enable && !status) || (!enable && status)) { 4382 DRM_ERROR("DBus power %s timeout!\n", 4383 enable ? "enable" : "disable"); 4384 return false; 4385 } 4386 return true; 4387 } 4388 4389 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 4390 { 4391 intel_dbuf_slice_set(dev_priv, DBUF_CTL, true); 4392 } 4393 4394 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 4395 { 4396 intel_dbuf_slice_set(dev_priv, DBUF_CTL, false); 4397 } 4398 4399 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv) 4400 { 4401 if (INTEL_GEN(dev_priv) < 11) 4402 return 1; 4403 return 2; 4404 } 4405 4406 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, 4407 u8 req_slices) 4408 { 4409 const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; 4410 bool ret; 4411 4412 if (req_slices > intel_dbuf_max_slices(dev_priv)) { 4413 DRM_ERROR("Invalid number of dbuf slices requested\n"); 4414 return; 4415 } 4416 4417 if (req_slices == hw_enabled_slices || req_slices == 0) 4418 return; 4419 4420 if (req_slices > hw_enabled_slices) 4421 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); 4422 else 4423 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false); 4424 4425 if (ret) 4426 dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices; 4427 } 4428 4429 static void icl_dbuf_enable(struct drm_i915_private *dev_priv) 4430 { 4431 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST); 4432 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST); 4433 POSTING_READ(DBUF_CTL_S2); 4434 4435 udelay(10); 4436 4437 if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || 4438 !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) 4439 DRM_ERROR("DBuf power enable timeout\n"); 4440 else 4441 /* 4442 * FIXME: for now pretend that we only have 1 slice, see 4443 * intel_enabled_dbuf_slices_num(). 4444 */ 4445 dev_priv->wm.skl_hw.ddb.enabled_slices = 1; 4446 } 4447 4448 static void icl_dbuf_disable(struct drm_i915_private *dev_priv) 4449 { 4450 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST); 4451 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST); 4452 POSTING_READ(DBUF_CTL_S2); 4453 4454 udelay(10); 4455 4456 if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || 4457 (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) 4458 DRM_ERROR("DBuf power disable timeout!\n"); 4459 else 4460 /* 4461 * FIXME: for now pretend that the first slice is always 4462 * enabled, see intel_enabled_dbuf_slices_num(). 4463 */ 4464 dev_priv->wm.skl_hw.ddb.enabled_slices = 1; 4465 } 4466 4467 static void icl_mbus_init(struct drm_i915_private *dev_priv) 4468 { 4469 u32 val; 4470 4471 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 4472 MBUS_ABOX_BT_CREDIT_POOL2(16) | 4473 MBUS_ABOX_B_CREDIT(1) | 4474 MBUS_ABOX_BW_CREDIT(1); 4475 4476 I915_WRITE(MBUS_ABOX_CTL, val); 4477 } 4478 4479 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 4480 { 4481 u32 val = I915_READ(LCPLL_CTL); 4482 4483 /* 4484 * The LCPLL register should be turned on by the BIOS. For now 4485 * let's just check its state and print errors in case 4486 * something is wrong. Don't even try to turn it on. 4487 */ 4488 4489 if (val & LCPLL_CD_SOURCE_FCLK) 4490 DRM_ERROR("CDCLK source is not LCPLL\n"); 4491 4492 if (val & LCPLL_PLL_DISABLE) 4493 DRM_ERROR("LCPLL is disabled\n"); 4494 4495 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 4496 DRM_ERROR("LCPLL not using non-SSC reference\n"); 4497 } 4498 4499 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 4500 { 4501 struct drm_device *dev = &dev_priv->drm; 4502 struct intel_crtc *crtc; 4503 4504 for_each_intel_crtc(dev, crtc) 4505 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 4506 pipe_name(crtc->pipe)); 4507 4508 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2), 4509 "Display power well on\n"); 4510 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, 4511 "SPLL enabled\n"); 4512 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 4513 "WRPLL1 enabled\n"); 4514 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 4515 "WRPLL2 enabled\n"); 4516 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, 4517 "Panel power on\n"); 4518 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 4519 "CPU PWM1 enabled\n"); 4520 if (IS_HASWELL(dev_priv)) 4521 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 4522 "CPU PWM2 enabled\n"); 4523 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 4524 "PCH PWM1 enabled\n"); 4525 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 4526 "Utility pin enabled\n"); 4527 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, 4528 "PCH GTC enabled\n"); 4529 4530 /* 4531 * In theory we can still leave IRQs enabled, as long as only the HPD 4532 * interrupts remain enabled. We used to check for that, but since it's 4533 * gen-specific and since we only disable LCPLL after we fully disable 4534 * the interrupts, the check below should be enough. 4535 */ 4536 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 4537 } 4538 4539 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 4540 { 4541 if (IS_HASWELL(dev_priv)) 4542 return I915_READ(D_COMP_HSW); 4543 else 4544 return I915_READ(D_COMP_BDW); 4545 } 4546 4547 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 4548 { 4549 if (IS_HASWELL(dev_priv)) { 4550 if (sandybridge_pcode_write(dev_priv, 4551 GEN6_PCODE_WRITE_D_COMP, val)) 4552 DRM_DEBUG_KMS("Failed to write to D_COMP\n"); 4553 } else { 4554 I915_WRITE(D_COMP_BDW, val); 4555 POSTING_READ(D_COMP_BDW); 4556 } 4557 } 4558 4559 /* 4560 * This function implements pieces of two sequences from BSpec: 4561 * - Sequence for display software to disable LCPLL 4562 * - Sequence for display software to allow package C8+ 4563 * The steps implemented here are just the steps that actually touch the LCPLL 4564 * register. Callers should take care of disabling all the display engine 4565 * functions, doing the mode unset, fixing interrupts, etc. 4566 */ 4567 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 4568 bool switch_to_fclk, bool allow_power_down) 4569 { 4570 u32 val; 4571 4572 assert_can_disable_lcpll(dev_priv); 4573 4574 val = I915_READ(LCPLL_CTL); 4575 4576 if (switch_to_fclk) { 4577 val |= LCPLL_CD_SOURCE_FCLK; 4578 I915_WRITE(LCPLL_CTL, val); 4579 4580 if (wait_for_us(I915_READ(LCPLL_CTL) & 4581 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 4582 DRM_ERROR("Switching to FCLK failed\n"); 4583 4584 val = I915_READ(LCPLL_CTL); 4585 } 4586 4587 val |= LCPLL_PLL_DISABLE; 4588 I915_WRITE(LCPLL_CTL, val); 4589 POSTING_READ(LCPLL_CTL); 4590 4591 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 4592 DRM_ERROR("LCPLL still locked\n"); 4593 4594 val = hsw_read_dcomp(dev_priv); 4595 val |= D_COMP_COMP_DISABLE; 4596 hsw_write_dcomp(dev_priv, val); 4597 ndelay(100); 4598 4599 if (wait_for((hsw_read_dcomp(dev_priv) & 4600 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 4601 DRM_ERROR("D_COMP RCOMP still in progress\n"); 4602 4603 if (allow_power_down) { 4604 val = I915_READ(LCPLL_CTL); 4605 val |= LCPLL_POWER_DOWN_ALLOW; 4606 I915_WRITE(LCPLL_CTL, val); 4607 POSTING_READ(LCPLL_CTL); 4608 } 4609 } 4610 4611 /* 4612 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 4613 * source. 4614 */ 4615 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 4616 { 4617 u32 val; 4618 4619 val = I915_READ(LCPLL_CTL); 4620 4621 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 4622 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 4623 return; 4624 4625 /* 4626 * Make sure we're not on PC8 state before disabling PC8, otherwise 4627 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 4628 */ 4629 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 4630 4631 if (val & LCPLL_POWER_DOWN_ALLOW) { 4632 val &= ~LCPLL_POWER_DOWN_ALLOW; 4633 I915_WRITE(LCPLL_CTL, val); 4634 POSTING_READ(LCPLL_CTL); 4635 } 4636 4637 val = hsw_read_dcomp(dev_priv); 4638 val |= D_COMP_COMP_FORCE; 4639 val &= ~D_COMP_COMP_DISABLE; 4640 hsw_write_dcomp(dev_priv, val); 4641 4642 val = I915_READ(LCPLL_CTL); 4643 val &= ~LCPLL_PLL_DISABLE; 4644 I915_WRITE(LCPLL_CTL, val); 4645 4646 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 4647 DRM_ERROR("LCPLL not locked yet\n"); 4648 4649 if (val & LCPLL_CD_SOURCE_FCLK) { 4650 val = I915_READ(LCPLL_CTL); 4651 val &= ~LCPLL_CD_SOURCE_FCLK; 4652 I915_WRITE(LCPLL_CTL, val); 4653 4654 if (wait_for_us((I915_READ(LCPLL_CTL) & 4655 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 4656 DRM_ERROR("Switching back to LCPLL failed\n"); 4657 } 4658 4659 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 4660 4661 intel_update_cdclk(dev_priv); 4662 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); 4663 } 4664 4665 /* 4666 * Package states C8 and deeper are really deep PC states that can only be 4667 * reached when all the devices on the system allow it, so even if the graphics 4668 * device allows PC8+, it doesn't mean the system will actually get to these 4669 * states. Our driver only allows PC8+ when going into runtime PM. 4670 * 4671 * The requirements for PC8+ are that all the outputs are disabled, the power 4672 * well is disabled and most interrupts are disabled, and these are also 4673 * requirements for runtime PM. When these conditions are met, we manually do 4674 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 4675 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 4676 * hang the machine. 4677 * 4678 * When we really reach PC8 or deeper states (not just when we allow it) we lose 4679 * the state of some registers, so when we come back from PC8+ we need to 4680 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 4681 * need to take care of the registers kept by RC6. Notice that this happens even 4682 * if we don't put the device in PCI D3 state (which is what currently happens 4683 * because of the runtime PM support). 4684 * 4685 * For more, read "Display Sequences for Package C8" on the hardware 4686 * documentation. 4687 */ 4688 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 4689 { 4690 u32 val; 4691 4692 DRM_DEBUG_KMS("Enabling package C8+\n"); 4693 4694 if (HAS_PCH_LPT_LP(dev_priv)) { 4695 val = I915_READ(SOUTH_DSPCLK_GATE_D); 4696 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 4697 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 4698 } 4699 4700 lpt_disable_clkout_dp(dev_priv); 4701 hsw_disable_lcpll(dev_priv, true, true); 4702 } 4703 4704 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 4705 { 4706 u32 val; 4707 4708 DRM_DEBUG_KMS("Disabling package C8+\n"); 4709 4710 hsw_restore_lcpll(dev_priv); 4711 intel_init_pch_refclk(dev_priv); 4712 4713 if (HAS_PCH_LPT_LP(dev_priv)) { 4714 val = I915_READ(SOUTH_DSPCLK_GATE_D); 4715 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 4716 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 4717 } 4718 } 4719 4720 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 4721 bool enable) 4722 { 4723 i915_reg_t reg; 4724 u32 reset_bits, val; 4725 4726 if (IS_IVYBRIDGE(dev_priv)) { 4727 reg = GEN7_MSG_CTL; 4728 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 4729 } else { 4730 reg = HSW_NDE_RSTWRN_OPT; 4731 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 4732 } 4733 4734 val = I915_READ(reg); 4735 4736 if (enable) 4737 val |= reset_bits; 4738 else 4739 val &= ~reset_bits; 4740 4741 I915_WRITE(reg, val); 4742 } 4743 4744 static void skl_display_core_init(struct drm_i915_private *dev_priv, 4745 bool resume) 4746 { 4747 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4748 struct i915_power_well *well; 4749 4750 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 4751 4752 /* enable PCH reset handshake */ 4753 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 4754 4755 /* enable PG1 and Misc I/O */ 4756 mutex_lock(&power_domains->lock); 4757 4758 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4759 intel_power_well_enable(dev_priv, well); 4760 4761 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 4762 intel_power_well_enable(dev_priv, well); 4763 4764 mutex_unlock(&power_domains->lock); 4765 4766 intel_cdclk_init(dev_priv); 4767 4768 gen9_dbuf_enable(dev_priv); 4769 4770 if (resume && dev_priv->csr.dmc_payload) 4771 intel_csr_load_program(dev_priv); 4772 } 4773 4774 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 4775 { 4776 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4777 struct i915_power_well *well; 4778 4779 gen9_disable_dc_states(dev_priv); 4780 4781 gen9_dbuf_disable(dev_priv); 4782 4783 intel_cdclk_uninit(dev_priv); 4784 4785 /* The spec doesn't call for removing the reset handshake flag */ 4786 /* disable PG1 and Misc I/O */ 4787 4788 mutex_lock(&power_domains->lock); 4789 4790 /* 4791 * BSpec says to keep the MISC IO power well enabled here, only 4792 * remove our request for power well 1. 4793 * Note that even though the driver's request is removed power well 1 4794 * may stay enabled after this due to DMC's own request on it. 4795 */ 4796 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4797 intel_power_well_disable(dev_priv, well); 4798 4799 mutex_unlock(&power_domains->lock); 4800 4801 usleep_range(10, 30); /* 10 us delay per Bspec */ 4802 } 4803 4804 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 4805 { 4806 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4807 struct i915_power_well *well; 4808 4809 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 4810 4811 /* 4812 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 4813 * or else the reset will hang because there is no PCH to respond. 4814 * Move the handshake programming to initialization sequence. 4815 * Previously was left up to BIOS. 4816 */ 4817 intel_pch_reset_handshake(dev_priv, false); 4818 4819 /* Enable PG1 */ 4820 mutex_lock(&power_domains->lock); 4821 4822 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4823 intel_power_well_enable(dev_priv, well); 4824 4825 mutex_unlock(&power_domains->lock); 4826 4827 intel_cdclk_init(dev_priv); 4828 4829 gen9_dbuf_enable(dev_priv); 4830 4831 if (resume && dev_priv->csr.dmc_payload) 4832 intel_csr_load_program(dev_priv); 4833 } 4834 4835 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 4836 { 4837 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4838 struct i915_power_well *well; 4839 4840 gen9_disable_dc_states(dev_priv); 4841 4842 gen9_dbuf_disable(dev_priv); 4843 4844 intel_cdclk_uninit(dev_priv); 4845 4846 /* The spec doesn't call for removing the reset handshake flag */ 4847 4848 /* 4849 * Disable PW1 (PG1). 4850 * Note that even though the driver's request is removed power well 1 4851 * may stay enabled after this due to DMC's own request on it. 4852 */ 4853 mutex_lock(&power_domains->lock); 4854 4855 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4856 intel_power_well_disable(dev_priv, well); 4857 4858 mutex_unlock(&power_domains->lock); 4859 4860 usleep_range(10, 30); /* 10 us delay per Bspec */ 4861 } 4862 4863 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) 4864 { 4865 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4866 struct i915_power_well *well; 4867 4868 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 4869 4870 /* 1. Enable PCH Reset Handshake */ 4871 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 4872 4873 /* 2-3. */ 4874 intel_combo_phy_init(dev_priv); 4875 4876 /* 4877 * 4. Enable Power Well 1 (PG1). 4878 * The AUX IO power wells will be enabled on demand. 4879 */ 4880 mutex_lock(&power_domains->lock); 4881 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4882 intel_power_well_enable(dev_priv, well); 4883 mutex_unlock(&power_domains->lock); 4884 4885 /* 5. Enable CD clock */ 4886 intel_cdclk_init(dev_priv); 4887 4888 /* 6. Enable DBUF */ 4889 gen9_dbuf_enable(dev_priv); 4890 4891 if (resume && dev_priv->csr.dmc_payload) 4892 intel_csr_load_program(dev_priv); 4893 } 4894 4895 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) 4896 { 4897 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4898 struct i915_power_well *well; 4899 4900 gen9_disable_dc_states(dev_priv); 4901 4902 /* 1. Disable all display engine functions -> aready done */ 4903 4904 /* 2. Disable DBUF */ 4905 gen9_dbuf_disable(dev_priv); 4906 4907 /* 3. Disable CD clock */ 4908 intel_cdclk_uninit(dev_priv); 4909 4910 /* 4911 * 4. Disable Power Well 1 (PG1). 4912 * The AUX IO power wells are toggled on demand, so they are already 4913 * disabled at this point. 4914 */ 4915 mutex_lock(&power_domains->lock); 4916 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4917 intel_power_well_disable(dev_priv, well); 4918 mutex_unlock(&power_domains->lock); 4919 4920 usleep_range(10, 30); /* 10 us delay per Bspec */ 4921 4922 /* 5. */ 4923 intel_combo_phy_uninit(dev_priv); 4924 } 4925 4926 struct buddy_page_mask { 4927 u32 page_mask; 4928 u8 type; 4929 u8 num_channels; 4930 }; 4931 4932 static const struct buddy_page_mask tgl_buddy_page_masks[] = { 4933 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE }, 4934 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, 4935 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, 4936 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, 4937 {} 4938 }; 4939 4940 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { 4941 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, 4942 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, 4943 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, 4944 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, 4945 {} 4946 }; 4947 4948 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) 4949 { 4950 enum intel_dram_type type = dev_priv->dram_info.type; 4951 u8 num_channels = dev_priv->dram_info.num_channels; 4952 const struct buddy_page_mask *table; 4953 int i; 4954 4955 if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0)) 4956 /* Wa_1409767108: tgl */ 4957 table = wa_1409767108_buddy_page_masks; 4958 else 4959 table = tgl_buddy_page_masks; 4960 4961 for (i = 0; table[i].page_mask != 0; i++) 4962 if (table[i].num_channels == num_channels && 4963 table[i].type == type) 4964 break; 4965 4966 if (table[i].page_mask == 0) { 4967 DRM_DEBUG_DRIVER("Unknown memory configuration; disabling address buddy logic.\n"); 4968 I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE); 4969 I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE); 4970 } else { 4971 I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask); 4972 I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask); 4973 } 4974 } 4975 4976 static void icl_display_core_init(struct drm_i915_private *dev_priv, 4977 bool resume) 4978 { 4979 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4980 struct i915_power_well *well; 4981 4982 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 4983 4984 /* 1. Enable PCH reset handshake. */ 4985 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 4986 4987 /* 2. Initialize all combo phys */ 4988 intel_combo_phy_init(dev_priv); 4989 4990 /* 4991 * 3. Enable Power Well 1 (PG1). 4992 * The AUX IO power wells will be enabled on demand. 4993 */ 4994 mutex_lock(&power_domains->lock); 4995 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4996 intel_power_well_enable(dev_priv, well); 4997 mutex_unlock(&power_domains->lock); 4998 4999 /* 4. Enable CDCLK. */ 5000 intel_cdclk_init(dev_priv); 5001 5002 /* 5. Enable DBUF. */ 5003 icl_dbuf_enable(dev_priv); 5004 5005 /* 6. Setup MBUS. */ 5006 icl_mbus_init(dev_priv); 5007 5008 /* 7. Program arbiter BW_BUDDY registers */ 5009 if (INTEL_GEN(dev_priv) >= 12) 5010 tgl_bw_buddy_init(dev_priv); 5011 5012 if (resume && dev_priv->csr.dmc_payload) 5013 intel_csr_load_program(dev_priv); 5014 } 5015 5016 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 5017 { 5018 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5019 struct i915_power_well *well; 5020 5021 gen9_disable_dc_states(dev_priv); 5022 5023 /* 1. Disable all display engine functions -> aready done */ 5024 5025 /* 2. Disable DBUF */ 5026 icl_dbuf_disable(dev_priv); 5027 5028 /* 3. Disable CD clock */ 5029 intel_cdclk_uninit(dev_priv); 5030 5031 /* 5032 * 4. Disable Power Well 1 (PG1). 5033 * The AUX IO power wells are toggled on demand, so they are already 5034 * disabled at this point. 5035 */ 5036 mutex_lock(&power_domains->lock); 5037 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5038 intel_power_well_disable(dev_priv, well); 5039 mutex_unlock(&power_domains->lock); 5040 5041 /* 5. */ 5042 intel_combo_phy_uninit(dev_priv); 5043 } 5044 5045 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 5046 { 5047 struct i915_power_well *cmn_bc = 5048 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5049 struct i915_power_well *cmn_d = 5050 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 5051 5052 /* 5053 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 5054 * workaround never ever read DISPLAY_PHY_CONTROL, and 5055 * instead maintain a shadow copy ourselves. Use the actual 5056 * power well state and lane status to reconstruct the 5057 * expected initial value. 5058 */ 5059 dev_priv->chv_phy_control = 5060 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 5061 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 5062 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 5063 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 5064 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 5065 5066 /* 5067 * If all lanes are disabled we leave the override disabled 5068 * with all power down bits cleared to match the state we 5069 * would use after disabling the port. Otherwise enable the 5070 * override and set the lane powerdown bits accding to the 5071 * current lane status. 5072 */ 5073 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 5074 u32 status = I915_READ(DPLL(PIPE_A)); 5075 unsigned int mask; 5076 5077 mask = status & DPLL_PORTB_READY_MASK; 5078 if (mask == 0xf) 5079 mask = 0x0; 5080 else 5081 dev_priv->chv_phy_control |= 5082 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 5083 5084 dev_priv->chv_phy_control |= 5085 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 5086 5087 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 5088 if (mask == 0xf) 5089 mask = 0x0; 5090 else 5091 dev_priv->chv_phy_control |= 5092 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 5093 5094 dev_priv->chv_phy_control |= 5095 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 5096 5097 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 5098 5099 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 5100 } else { 5101 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 5102 } 5103 5104 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 5105 u32 status = I915_READ(DPIO_PHY_STATUS); 5106 unsigned int mask; 5107 5108 mask = status & DPLL_PORTD_READY_MASK; 5109 5110 if (mask == 0xf) 5111 mask = 0x0; 5112 else 5113 dev_priv->chv_phy_control |= 5114 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 5115 5116 dev_priv->chv_phy_control |= 5117 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 5118 5119 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 5120 5121 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 5122 } else { 5123 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 5124 } 5125 5126 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 5127 5128 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", 5129 dev_priv->chv_phy_control); 5130 } 5131 5132 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 5133 { 5134 struct i915_power_well *cmn = 5135 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5136 struct i915_power_well *disp2d = 5137 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 5138 5139 /* If the display might be already active skip this */ 5140 if (cmn->desc->ops->is_enabled(dev_priv, cmn) && 5141 disp2d->desc->ops->is_enabled(dev_priv, disp2d) && 5142 I915_READ(DPIO_CTL) & DPIO_CMNRST) 5143 return; 5144 5145 DRM_DEBUG_KMS("toggling display PHY side reset\n"); 5146 5147 /* cmnlane needs DPLL registers */ 5148 disp2d->desc->ops->enable(dev_priv, disp2d); 5149 5150 /* 5151 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 5152 * Need to assert and de-assert PHY SB reset by gating the 5153 * common lane power, then un-gating it. 5154 * Simply ungating isn't enough to reset the PHY enough to get 5155 * ports and lanes running. 5156 */ 5157 cmn->desc->ops->disable(dev_priv, cmn); 5158 } 5159 5160 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 5161 { 5162 bool ret; 5163 5164 vlv_punit_get(dev_priv); 5165 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 5166 vlv_punit_put(dev_priv); 5167 5168 return ret; 5169 } 5170 5171 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 5172 { 5173 WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 5174 "VED not power gated\n"); 5175 } 5176 5177 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 5178 { 5179 static const struct pci_device_id isp_ids[] = { 5180 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 5181 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 5182 {} 5183 }; 5184 5185 WARN(!pci_dev_present(isp_ids) && 5186 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 5187 "ISP not power gated\n"); 5188 } 5189 5190 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 5191 5192 /** 5193 * intel_power_domains_init_hw - initialize hardware power domain state 5194 * @i915: i915 device instance 5195 * @resume: Called from resume code paths or not 5196 * 5197 * This function initializes the hardware power domain state and enables all 5198 * power wells belonging to the INIT power domain. Power wells in other 5199 * domains (and not in the INIT domain) are referenced or disabled by 5200 * intel_modeset_readout_hw_state(). After that the reference count of each 5201 * power well must match its HW enabled state, see 5202 * intel_power_domains_verify_state(). 5203 * 5204 * It will return with power domains disabled (to be enabled later by 5205 * intel_power_domains_enable()) and must be paired with 5206 * intel_power_domains_driver_remove(). 5207 */ 5208 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 5209 { 5210 struct i915_power_domains *power_domains = &i915->power_domains; 5211 5212 power_domains->initializing = true; 5213 5214 /* Must happen before power domain init on VLV/CHV */ 5215 intel_update_rawclk(i915); 5216 5217 if (INTEL_GEN(i915) >= 11) { 5218 icl_display_core_init(i915, resume); 5219 } else if (IS_CANNONLAKE(i915)) { 5220 cnl_display_core_init(i915, resume); 5221 } else if (IS_GEN9_BC(i915)) { 5222 skl_display_core_init(i915, resume); 5223 } else if (IS_GEN9_LP(i915)) { 5224 bxt_display_core_init(i915, resume); 5225 } else if (IS_CHERRYVIEW(i915)) { 5226 mutex_lock(&power_domains->lock); 5227 chv_phy_control_init(i915); 5228 mutex_unlock(&power_domains->lock); 5229 assert_isp_power_gated(i915); 5230 } else if (IS_VALLEYVIEW(i915)) { 5231 mutex_lock(&power_domains->lock); 5232 vlv_cmnlane_wa(i915); 5233 mutex_unlock(&power_domains->lock); 5234 assert_ved_power_gated(i915); 5235 assert_isp_power_gated(i915); 5236 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 5237 hsw_assert_cdclk(i915); 5238 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 5239 } else if (IS_IVYBRIDGE(i915)) { 5240 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 5241 } 5242 5243 /* 5244 * Keep all power wells enabled for any dependent HW access during 5245 * initialization and to make sure we keep BIOS enabled display HW 5246 * resources powered until display HW readout is complete. We drop 5247 * this reference in intel_power_domains_enable(). 5248 */ 5249 power_domains->wakeref = 5250 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5251 5252 /* Disable power support if the user asked so. */ 5253 if (!i915_modparams.disable_power_well) 5254 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5255 intel_power_domains_sync_hw(i915); 5256 5257 power_domains->initializing = false; 5258 } 5259 5260 /** 5261 * intel_power_domains_driver_remove - deinitialize hw power domain state 5262 * @i915: i915 device instance 5263 * 5264 * De-initializes the display power domain HW state. It also ensures that the 5265 * device stays powered up so that the driver can be reloaded. 5266 * 5267 * It must be called with power domains already disabled (after a call to 5268 * intel_power_domains_disable()) and must be paired with 5269 * intel_power_domains_init_hw(). 5270 */ 5271 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 5272 { 5273 intel_wakeref_t wakeref __maybe_unused = 5274 fetch_and_zero(&i915->power_domains.wakeref); 5275 5276 /* Remove the refcount we took to keep power well support disabled. */ 5277 if (!i915_modparams.disable_power_well) 5278 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); 5279 5280 intel_display_power_flush_work_sync(i915); 5281 5282 intel_power_domains_verify_state(i915); 5283 5284 /* Keep the power well enabled, but cancel its rpm wakeref. */ 5285 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 5286 } 5287 5288 /** 5289 * intel_power_domains_enable - enable toggling of display power wells 5290 * @i915: i915 device instance 5291 * 5292 * Enable the ondemand enabling/disabling of the display power wells. Note that 5293 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 5294 * only at specific points of the display modeset sequence, thus they are not 5295 * affected by the intel_power_domains_enable()/disable() calls. The purpose 5296 * of these function is to keep the rest of power wells enabled until the end 5297 * of display HW readout (which will acquire the power references reflecting 5298 * the current HW state). 5299 */ 5300 void intel_power_domains_enable(struct drm_i915_private *i915) 5301 { 5302 intel_wakeref_t wakeref __maybe_unused = 5303 fetch_and_zero(&i915->power_domains.wakeref); 5304 5305 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 5306 intel_power_domains_verify_state(i915); 5307 } 5308 5309 /** 5310 * intel_power_domains_disable - disable toggling of display power wells 5311 * @i915: i915 device instance 5312 * 5313 * Disable the ondemand enabling/disabling of the display power wells. See 5314 * intel_power_domains_enable() for which power wells this call controls. 5315 */ 5316 void intel_power_domains_disable(struct drm_i915_private *i915) 5317 { 5318 struct i915_power_domains *power_domains = &i915->power_domains; 5319 5320 WARN_ON(power_domains->wakeref); 5321 power_domains->wakeref = 5322 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5323 5324 intel_power_domains_verify_state(i915); 5325 } 5326 5327 /** 5328 * intel_power_domains_suspend - suspend power domain state 5329 * @i915: i915 device instance 5330 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 5331 * 5332 * This function prepares the hardware power domain state before entering 5333 * system suspend. 5334 * 5335 * It must be called with power domains already disabled (after a call to 5336 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 5337 */ 5338 void intel_power_domains_suspend(struct drm_i915_private *i915, 5339 enum i915_drm_suspend_mode suspend_mode) 5340 { 5341 struct i915_power_domains *power_domains = &i915->power_domains; 5342 intel_wakeref_t wakeref __maybe_unused = 5343 fetch_and_zero(&power_domains->wakeref); 5344 5345 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 5346 5347 /* 5348 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 5349 * support don't manually deinit the power domains. This also means the 5350 * CSR/DMC firmware will stay active, it will power down any HW 5351 * resources as required and also enable deeper system power states 5352 * that would be blocked if the firmware was inactive. 5353 */ 5354 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) && 5355 suspend_mode == I915_DRM_SUSPEND_IDLE && 5356 i915->csr.dmc_payload) { 5357 intel_display_power_flush_work(i915); 5358 intel_power_domains_verify_state(i915); 5359 return; 5360 } 5361 5362 /* 5363 * Even if power well support was disabled we still want to disable 5364 * power wells if power domains must be deinitialized for suspend. 5365 */ 5366 if (!i915_modparams.disable_power_well) 5367 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); 5368 5369 intel_display_power_flush_work(i915); 5370 intel_power_domains_verify_state(i915); 5371 5372 if (INTEL_GEN(i915) >= 11) 5373 icl_display_core_uninit(i915); 5374 else if (IS_CANNONLAKE(i915)) 5375 cnl_display_core_uninit(i915); 5376 else if (IS_GEN9_BC(i915)) 5377 skl_display_core_uninit(i915); 5378 else if (IS_GEN9_LP(i915)) 5379 bxt_display_core_uninit(i915); 5380 5381 power_domains->display_core_suspended = true; 5382 } 5383 5384 /** 5385 * intel_power_domains_resume - resume power domain state 5386 * @i915: i915 device instance 5387 * 5388 * This function resume the hardware power domain state during system resume. 5389 * 5390 * It will return with power domain support disabled (to be enabled later by 5391 * intel_power_domains_enable()) and must be paired with 5392 * intel_power_domains_suspend(). 5393 */ 5394 void intel_power_domains_resume(struct drm_i915_private *i915) 5395 { 5396 struct i915_power_domains *power_domains = &i915->power_domains; 5397 5398 if (power_domains->display_core_suspended) { 5399 intel_power_domains_init_hw(i915, true); 5400 power_domains->display_core_suspended = false; 5401 } else { 5402 WARN_ON(power_domains->wakeref); 5403 power_domains->wakeref = 5404 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5405 } 5406 5407 intel_power_domains_verify_state(i915); 5408 } 5409 5410 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 5411 5412 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 5413 { 5414 struct i915_power_domains *power_domains = &i915->power_domains; 5415 struct i915_power_well *power_well; 5416 5417 for_each_power_well(i915, power_well) { 5418 enum intel_display_power_domain domain; 5419 5420 DRM_DEBUG_DRIVER("%-25s %d\n", 5421 power_well->desc->name, power_well->count); 5422 5423 for_each_power_domain(domain, power_well->desc->domains) 5424 DRM_DEBUG_DRIVER(" %-23s %d\n", 5425 intel_display_power_domain_str(domain), 5426 power_domains->domain_use_count[domain]); 5427 } 5428 } 5429 5430 /** 5431 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 5432 * @i915: i915 device instance 5433 * 5434 * Verify if the reference count of each power well matches its HW enabled 5435 * state and the total refcount of the domains it belongs to. This must be 5436 * called after modeset HW state sanitization, which is responsible for 5437 * acquiring reference counts for any power wells in use and disabling the 5438 * ones left on by BIOS but not required by any active output. 5439 */ 5440 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 5441 { 5442 struct i915_power_domains *power_domains = &i915->power_domains; 5443 struct i915_power_well *power_well; 5444 bool dump_domain_info; 5445 5446 mutex_lock(&power_domains->lock); 5447 5448 verify_async_put_domains_state(power_domains); 5449 5450 dump_domain_info = false; 5451 for_each_power_well(i915, power_well) { 5452 enum intel_display_power_domain domain; 5453 int domains_count; 5454 bool enabled; 5455 5456 enabled = power_well->desc->ops->is_enabled(i915, power_well); 5457 if ((power_well->count || power_well->desc->always_on) != 5458 enabled) 5459 DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)", 5460 power_well->desc->name, 5461 power_well->count, enabled); 5462 5463 domains_count = 0; 5464 for_each_power_domain(domain, power_well->desc->domains) 5465 domains_count += power_domains->domain_use_count[domain]; 5466 5467 if (power_well->count != domains_count) { 5468 DRM_ERROR("power well %s refcount/domain refcount mismatch " 5469 "(refcount %d/domains refcount %d)\n", 5470 power_well->desc->name, power_well->count, 5471 domains_count); 5472 dump_domain_info = true; 5473 } 5474 } 5475 5476 if (dump_domain_info) { 5477 static bool dumped; 5478 5479 if (!dumped) { 5480 intel_power_domains_dump_info(i915); 5481 dumped = true; 5482 } 5483 } 5484 5485 mutex_unlock(&power_domains->lock); 5486 } 5487 5488 #else 5489 5490 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 5491 { 5492 } 5493 5494 #endif 5495 5496 void intel_display_power_suspend_late(struct drm_i915_private *i915) 5497 { 5498 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) 5499 bxt_enable_dc9(i915); 5500 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 5501 hsw_enable_pc8(i915); 5502 } 5503 5504 void intel_display_power_resume_early(struct drm_i915_private *i915) 5505 { 5506 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) { 5507 gen9_sanitize_dc_state(i915); 5508 bxt_disable_dc9(i915); 5509 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5510 hsw_disable_pc8(i915); 5511 } 5512 } 5513 5514 void intel_display_power_suspend(struct drm_i915_private *i915) 5515 { 5516 if (INTEL_GEN(i915) >= 11) { 5517 icl_display_core_uninit(i915); 5518 bxt_enable_dc9(i915); 5519 } else if (IS_GEN9_LP(i915)) { 5520 bxt_display_core_uninit(i915); 5521 bxt_enable_dc9(i915); 5522 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5523 hsw_enable_pc8(i915); 5524 } 5525 } 5526 5527 void intel_display_power_resume(struct drm_i915_private *i915) 5528 { 5529 if (INTEL_GEN(i915) >= 11) { 5530 bxt_disable_dc9(i915); 5531 icl_display_core_init(i915, true); 5532 if (i915->csr.dmc_payload) { 5533 if (i915->csr.allowed_dc_mask & 5534 DC_STATE_EN_UPTO_DC6) 5535 skl_enable_dc6(i915); 5536 else if (i915->csr.allowed_dc_mask & 5537 DC_STATE_EN_UPTO_DC5) 5538 gen9_enable_dc5(i915); 5539 } 5540 } else if (IS_GEN9_LP(i915)) { 5541 bxt_disable_dc9(i915); 5542 bxt_display_core_init(i915, true); 5543 if (i915->csr.dmc_payload && 5544 (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 5545 gen9_enable_dc5(i915); 5546 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5547 hsw_disable_pc8(i915); 5548 } 5549 } 5550