1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "display/intel_crt.h" 7 #include "display/intel_dp.h" 8 9 #include "i915_drv.h" 10 #include "i915_irq.h" 11 #include "intel_cdclk.h" 12 #include "intel_combo_phy.h" 13 #include "intel_csr.h" 14 #include "intel_display_power.h" 15 #include "intel_display_types.h" 16 #include "intel_dpio_phy.h" 17 #include "intel_hotplug.h" 18 #include "intel_sideband.h" 19 #include "intel_tc.h" 20 #include "intel_vga.h" 21 22 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 23 enum i915_power_well_id power_well_id); 24 25 const char * 26 intel_display_power_domain_str(enum intel_display_power_domain domain) 27 { 28 switch (domain) { 29 case POWER_DOMAIN_DISPLAY_CORE: 30 return "DISPLAY_CORE"; 31 case POWER_DOMAIN_PIPE_A: 32 return "PIPE_A"; 33 case POWER_DOMAIN_PIPE_B: 34 return "PIPE_B"; 35 case POWER_DOMAIN_PIPE_C: 36 return "PIPE_C"; 37 case POWER_DOMAIN_PIPE_D: 38 return "PIPE_D"; 39 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 40 return "PIPE_A_PANEL_FITTER"; 41 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 42 return "PIPE_B_PANEL_FITTER"; 43 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 44 return "PIPE_C_PANEL_FITTER"; 45 case POWER_DOMAIN_PIPE_D_PANEL_FITTER: 46 return "PIPE_D_PANEL_FITTER"; 47 case POWER_DOMAIN_TRANSCODER_A: 48 return "TRANSCODER_A"; 49 case POWER_DOMAIN_TRANSCODER_B: 50 return "TRANSCODER_B"; 51 case POWER_DOMAIN_TRANSCODER_C: 52 return "TRANSCODER_C"; 53 case POWER_DOMAIN_TRANSCODER_D: 54 return "TRANSCODER_D"; 55 case POWER_DOMAIN_TRANSCODER_EDP: 56 return "TRANSCODER_EDP"; 57 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 58 return "TRANSCODER_VDSC_PW2"; 59 case POWER_DOMAIN_TRANSCODER_DSI_A: 60 return "TRANSCODER_DSI_A"; 61 case POWER_DOMAIN_TRANSCODER_DSI_C: 62 return "TRANSCODER_DSI_C"; 63 case POWER_DOMAIN_PORT_DDI_A_LANES: 64 return "PORT_DDI_A_LANES"; 65 case POWER_DOMAIN_PORT_DDI_B_LANES: 66 return "PORT_DDI_B_LANES"; 67 case POWER_DOMAIN_PORT_DDI_C_LANES: 68 return "PORT_DDI_C_LANES"; 69 case POWER_DOMAIN_PORT_DDI_D_LANES: 70 return "PORT_DDI_D_LANES"; 71 case POWER_DOMAIN_PORT_DDI_E_LANES: 72 return "PORT_DDI_E_LANES"; 73 case POWER_DOMAIN_PORT_DDI_F_LANES: 74 return "PORT_DDI_F_LANES"; 75 case POWER_DOMAIN_PORT_DDI_G_LANES: 76 return "PORT_DDI_G_LANES"; 77 case POWER_DOMAIN_PORT_DDI_H_LANES: 78 return "PORT_DDI_H_LANES"; 79 case POWER_DOMAIN_PORT_DDI_I_LANES: 80 return "PORT_DDI_I_LANES"; 81 case POWER_DOMAIN_PORT_DDI_A_IO: 82 return "PORT_DDI_A_IO"; 83 case POWER_DOMAIN_PORT_DDI_B_IO: 84 return "PORT_DDI_B_IO"; 85 case POWER_DOMAIN_PORT_DDI_C_IO: 86 return "PORT_DDI_C_IO"; 87 case POWER_DOMAIN_PORT_DDI_D_IO: 88 return "PORT_DDI_D_IO"; 89 case POWER_DOMAIN_PORT_DDI_E_IO: 90 return "PORT_DDI_E_IO"; 91 case POWER_DOMAIN_PORT_DDI_F_IO: 92 return "PORT_DDI_F_IO"; 93 case POWER_DOMAIN_PORT_DDI_G_IO: 94 return "PORT_DDI_G_IO"; 95 case POWER_DOMAIN_PORT_DDI_H_IO: 96 return "PORT_DDI_H_IO"; 97 case POWER_DOMAIN_PORT_DDI_I_IO: 98 return "PORT_DDI_I_IO"; 99 case POWER_DOMAIN_PORT_DSI: 100 return "PORT_DSI"; 101 case POWER_DOMAIN_PORT_CRT: 102 return "PORT_CRT"; 103 case POWER_DOMAIN_PORT_OTHER: 104 return "PORT_OTHER"; 105 case POWER_DOMAIN_VGA: 106 return "VGA"; 107 case POWER_DOMAIN_AUDIO: 108 return "AUDIO"; 109 case POWER_DOMAIN_AUX_A: 110 return "AUX_A"; 111 case POWER_DOMAIN_AUX_B: 112 return "AUX_B"; 113 case POWER_DOMAIN_AUX_C: 114 return "AUX_C"; 115 case POWER_DOMAIN_AUX_D: 116 return "AUX_D"; 117 case POWER_DOMAIN_AUX_E: 118 return "AUX_E"; 119 case POWER_DOMAIN_AUX_F: 120 return "AUX_F"; 121 case POWER_DOMAIN_AUX_G: 122 return "AUX_G"; 123 case POWER_DOMAIN_AUX_H: 124 return "AUX_H"; 125 case POWER_DOMAIN_AUX_I: 126 return "AUX_I"; 127 case POWER_DOMAIN_AUX_IO_A: 128 return "AUX_IO_A"; 129 case POWER_DOMAIN_AUX_C_TBT: 130 return "AUX_C_TBT"; 131 case POWER_DOMAIN_AUX_D_TBT: 132 return "AUX_D_TBT"; 133 case POWER_DOMAIN_AUX_E_TBT: 134 return "AUX_E_TBT"; 135 case POWER_DOMAIN_AUX_F_TBT: 136 return "AUX_F_TBT"; 137 case POWER_DOMAIN_AUX_G_TBT: 138 return "AUX_G_TBT"; 139 case POWER_DOMAIN_AUX_H_TBT: 140 return "AUX_H_TBT"; 141 case POWER_DOMAIN_AUX_I_TBT: 142 return "AUX_I_TBT"; 143 case POWER_DOMAIN_GMBUS: 144 return "GMBUS"; 145 case POWER_DOMAIN_INIT: 146 return "INIT"; 147 case POWER_DOMAIN_MODESET: 148 return "MODESET"; 149 case POWER_DOMAIN_GT_IRQ: 150 return "GT_IRQ"; 151 case POWER_DOMAIN_DPLL_DC_OFF: 152 return "DPLL_DC_OFF"; 153 default: 154 MISSING_CASE(domain); 155 return "?"; 156 } 157 } 158 159 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 160 struct i915_power_well *power_well) 161 { 162 DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name); 163 power_well->desc->ops->enable(dev_priv, power_well); 164 power_well->hw_enabled = true; 165 } 166 167 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 168 struct i915_power_well *power_well) 169 { 170 DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name); 171 power_well->hw_enabled = false; 172 power_well->desc->ops->disable(dev_priv, power_well); 173 } 174 175 static void intel_power_well_get(struct drm_i915_private *dev_priv, 176 struct i915_power_well *power_well) 177 { 178 if (!power_well->count++) 179 intel_power_well_enable(dev_priv, power_well); 180 } 181 182 static void intel_power_well_put(struct drm_i915_private *dev_priv, 183 struct i915_power_well *power_well) 184 { 185 WARN(!power_well->count, "Use count on power well %s is already zero", 186 power_well->desc->name); 187 188 if (!--power_well->count) 189 intel_power_well_disable(dev_priv, power_well); 190 } 191 192 /** 193 * __intel_display_power_is_enabled - unlocked check for a power domain 194 * @dev_priv: i915 device instance 195 * @domain: power domain to check 196 * 197 * This is the unlocked version of intel_display_power_is_enabled() and should 198 * only be used from error capture and recovery code where deadlocks are 199 * possible. 200 * 201 * Returns: 202 * True when the power domain is enabled, false otherwise. 203 */ 204 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 205 enum intel_display_power_domain domain) 206 { 207 struct i915_power_well *power_well; 208 bool is_enabled; 209 210 if (dev_priv->runtime_pm.suspended) 211 return false; 212 213 is_enabled = true; 214 215 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { 216 if (power_well->desc->always_on) 217 continue; 218 219 if (!power_well->hw_enabled) { 220 is_enabled = false; 221 break; 222 } 223 } 224 225 return is_enabled; 226 } 227 228 /** 229 * intel_display_power_is_enabled - check for a power domain 230 * @dev_priv: i915 device instance 231 * @domain: power domain to check 232 * 233 * This function can be used to check the hw power domain state. It is mostly 234 * used in hardware state readout functions. Everywhere else code should rely 235 * upon explicit power domain reference counting to ensure that the hardware 236 * block is powered up before accessing it. 237 * 238 * Callers must hold the relevant modesetting locks to ensure that concurrent 239 * threads can't disable the power well while the caller tries to read a few 240 * registers. 241 * 242 * Returns: 243 * True when the power domain is enabled, false otherwise. 244 */ 245 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 246 enum intel_display_power_domain domain) 247 { 248 struct i915_power_domains *power_domains; 249 bool ret; 250 251 power_domains = &dev_priv->power_domains; 252 253 mutex_lock(&power_domains->lock); 254 ret = __intel_display_power_is_enabled(dev_priv, domain); 255 mutex_unlock(&power_domains->lock); 256 257 return ret; 258 } 259 260 /* 261 * Starting with Haswell, we have a "Power Down Well" that can be turned off 262 * when not needed anymore. We have 4 registers that can request the power well 263 * to be enabled, and it will only be disabled if none of the registers is 264 * requesting it to be enabled. 265 */ 266 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, 267 u8 irq_pipe_mask, bool has_vga) 268 { 269 if (has_vga) 270 intel_vga_reset_io_mem(dev_priv); 271 272 if (irq_pipe_mask) 273 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); 274 } 275 276 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, 277 u8 irq_pipe_mask) 278 { 279 if (irq_pipe_mask) 280 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); 281 } 282 283 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, 284 struct i915_power_well *power_well) 285 { 286 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 287 int pw_idx = power_well->desc->hsw.idx; 288 289 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 290 if (intel_de_wait_for_set(dev_priv, regs->driver, 291 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { 292 DRM_DEBUG_KMS("%s power well enable timeout\n", 293 power_well->desc->name); 294 295 /* An AUX timeout is expected if the TBT DP tunnel is down. */ 296 WARN_ON(!power_well->desc->hsw.is_tc_tbt); 297 } 298 } 299 300 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, 301 const struct i915_power_well_regs *regs, 302 int pw_idx) 303 { 304 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 305 u32 ret; 306 307 ret = I915_READ(regs->bios) & req_mask ? 1 : 0; 308 ret |= I915_READ(regs->driver) & req_mask ? 2 : 0; 309 if (regs->kvmr.reg) 310 ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0; 311 ret |= I915_READ(regs->debug) & req_mask ? 8 : 0; 312 313 return ret; 314 } 315 316 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, 317 struct i915_power_well *power_well) 318 { 319 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 320 int pw_idx = power_well->desc->hsw.idx; 321 bool disabled; 322 u32 reqs; 323 324 /* 325 * Bspec doesn't require waiting for PWs to get disabled, but still do 326 * this for paranoia. The known cases where a PW will be forced on: 327 * - a KVMR request on any power well via the KVMR request register 328 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 329 * DEBUG request registers 330 * Skip the wait in case any of the request bits are set and print a 331 * diagnostic message. 332 */ 333 wait_for((disabled = !(I915_READ(regs->driver) & 334 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 335 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); 336 if (disabled) 337 return; 338 339 DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 340 power_well->desc->name, 341 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 342 } 343 344 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, 345 enum skl_power_gate pg) 346 { 347 /* Timeout 5us for PG#0, for other PGs 1us */ 348 WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, 349 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 350 } 351 352 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 353 struct i915_power_well *power_well) 354 { 355 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 356 int pw_idx = power_well->desc->hsw.idx; 357 bool wait_fuses = power_well->desc->hsw.has_fuses; 358 enum skl_power_gate uninitialized_var(pg); 359 u32 val; 360 361 if (wait_fuses) { 362 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 363 SKL_PW_CTL_IDX_TO_PG(pw_idx); 364 /* 365 * For PW1 we have to wait both for the PW0/PG0 fuse state 366 * before enabling the power well and PW1/PG1's own fuse 367 * state after the enabling. For all other power wells with 368 * fuses we only have to wait for that PW/PG's fuse state 369 * after the enabling. 370 */ 371 if (pg == SKL_PG1) 372 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 373 } 374 375 val = I915_READ(regs->driver); 376 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 377 hsw_wait_for_power_well_enable(dev_priv, power_well); 378 379 /* Display WA #1178: cnl */ 380 if (IS_CANNONLAKE(dev_priv) && 381 pw_idx >= GLK_PW_CTL_IDX_AUX_B && 382 pw_idx <= CNL_PW_CTL_IDX_AUX_F) { 383 val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx)); 384 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS; 385 I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val); 386 } 387 388 if (wait_fuses) 389 gen9_wait_for_power_well_fuses(dev_priv, pg); 390 391 hsw_power_well_post_enable(dev_priv, 392 power_well->desc->hsw.irq_pipe_mask, 393 power_well->desc->hsw.has_vga); 394 } 395 396 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 397 struct i915_power_well *power_well) 398 { 399 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 400 int pw_idx = power_well->desc->hsw.idx; 401 u32 val; 402 403 hsw_power_well_pre_disable(dev_priv, 404 power_well->desc->hsw.irq_pipe_mask); 405 406 val = I915_READ(regs->driver); 407 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 408 hsw_wait_for_power_well_disable(dev_priv, power_well); 409 } 410 411 #define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) 412 413 static void 414 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 415 struct i915_power_well *power_well) 416 { 417 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 418 int pw_idx = power_well->desc->hsw.idx; 419 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); 420 u32 val; 421 int wa_idx_max; 422 423 val = I915_READ(regs->driver); 424 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 425 426 if (INTEL_GEN(dev_priv) < 12) { 427 val = I915_READ(ICL_PORT_CL_DW12(phy)); 428 I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX); 429 } 430 431 hsw_wait_for_power_well_enable(dev_priv, power_well); 432 433 /* Display WA #1178: icl, tgl */ 434 if (IS_TIGERLAKE(dev_priv)) 435 wa_idx_max = ICL_PW_CTL_IDX_AUX_C; 436 else 437 wa_idx_max = ICL_PW_CTL_IDX_AUX_B; 438 439 if (!IS_ELKHARTLAKE(dev_priv) && 440 pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= wa_idx_max && 441 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { 442 val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx)); 443 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; 444 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val); 445 } 446 } 447 448 static void 449 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 450 struct i915_power_well *power_well) 451 { 452 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 453 int pw_idx = power_well->desc->hsw.idx; 454 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); 455 u32 val; 456 457 if (INTEL_GEN(dev_priv) < 12) { 458 val = I915_READ(ICL_PORT_CL_DW12(phy)); 459 I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX); 460 } 461 462 val = I915_READ(regs->driver); 463 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 464 465 hsw_wait_for_power_well_disable(dev_priv, power_well); 466 } 467 468 #define ICL_AUX_PW_TO_CH(pw_idx) \ 469 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 470 471 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 472 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 473 474 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv, 475 struct i915_power_well *power_well) 476 { 477 int pw_idx = power_well->desc->hsw.idx; 478 479 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 480 ICL_AUX_PW_TO_CH(pw_idx); 481 } 482 483 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 484 485 static u64 async_put_domains_mask(struct i915_power_domains *power_domains); 486 487 static int power_well_async_ref_count(struct drm_i915_private *dev_priv, 488 struct i915_power_well *power_well) 489 { 490 int refs = hweight64(power_well->desc->domains & 491 async_put_domains_mask(&dev_priv->power_domains)); 492 493 WARN_ON(refs > power_well->count); 494 495 return refs; 496 } 497 498 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 499 struct i915_power_well *power_well) 500 { 501 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 502 struct intel_digital_port *dig_port = NULL; 503 struct intel_encoder *encoder; 504 505 /* Bypass the check if all references are released asynchronously */ 506 if (power_well_async_ref_count(dev_priv, power_well) == 507 power_well->count) 508 return; 509 510 aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 511 512 for_each_intel_encoder(&dev_priv->drm, encoder) { 513 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 514 515 if (!intel_phy_is_tc(dev_priv, phy)) 516 continue; 517 518 /* We'll check the MST primary port */ 519 if (encoder->type == INTEL_OUTPUT_DP_MST) 520 continue; 521 522 dig_port = enc_to_dig_port(&encoder->base); 523 if (WARN_ON(!dig_port)) 524 continue; 525 526 if (dig_port->aux_ch != aux_ch) { 527 dig_port = NULL; 528 continue; 529 } 530 531 break; 532 } 533 534 if (WARN_ON(!dig_port)) 535 return; 536 537 WARN_ON(!intel_tc_port_ref_held(dig_port)); 538 } 539 540 #else 541 542 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 543 struct i915_power_well *power_well) 544 { 545 } 546 547 #endif 548 549 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 550 551 static void 552 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 553 struct i915_power_well *power_well) 554 { 555 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 556 u32 val; 557 558 icl_tc_port_assert_ref_held(dev_priv, power_well); 559 560 val = I915_READ(DP_AUX_CH_CTL(aux_ch)); 561 val &= ~DP_AUX_CH_CTL_TBT_IO; 562 if (power_well->desc->hsw.is_tc_tbt) 563 val |= DP_AUX_CH_CTL_TBT_IO; 564 I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); 565 566 hsw_power_well_enable(dev_priv, power_well); 567 568 if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) { 569 enum tc_port tc_port; 570 571 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); 572 I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2)); 573 574 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), 575 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 576 DRM_WARN("Timeout waiting TC uC health\n"); 577 } 578 } 579 580 static void 581 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 582 struct i915_power_well *power_well) 583 { 584 icl_tc_port_assert_ref_held(dev_priv, power_well); 585 586 hsw_power_well_disable(dev_priv, power_well); 587 } 588 589 /* 590 * We should only use the power well if we explicitly asked the hardware to 591 * enable it, so check if it's enabled and also check if we've requested it to 592 * be enabled. 593 */ 594 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 595 struct i915_power_well *power_well) 596 { 597 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 598 enum i915_power_well_id id = power_well->desc->id; 599 int pw_idx = power_well->desc->hsw.idx; 600 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 601 HSW_PWR_WELL_CTL_STATE(pw_idx); 602 u32 val; 603 604 val = I915_READ(regs->driver); 605 606 /* 607 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 608 * and the MISC_IO PW will be not restored, so check instead for the 609 * BIOS's own request bits, which are forced-on for these power wells 610 * when exiting DC5/6. 611 */ 612 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) && 613 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 614 val |= I915_READ(regs->bios); 615 616 return (val & mask) == mask; 617 } 618 619 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 620 { 621 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 622 "DC9 already programmed to be enabled.\n"); 623 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 624 "DC5 still not disabled to enable DC9.\n"); 625 WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) & 626 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 627 "Power well 2 on.\n"); 628 WARN_ONCE(intel_irqs_enabled(dev_priv), 629 "Interrupts not disabled yet.\n"); 630 631 /* 632 * TODO: check for the following to verify the conditions to enter DC9 633 * state are satisfied: 634 * 1] Check relevant display engine registers to verify if mode set 635 * disable sequence was followed. 636 * 2] Check if display uninitialize sequence is initialized. 637 */ 638 } 639 640 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 641 { 642 WARN_ONCE(intel_irqs_enabled(dev_priv), 643 "Interrupts not disabled yet.\n"); 644 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 645 "DC5 still not disabled.\n"); 646 647 /* 648 * TODO: check for the following to verify DC9 state was indeed 649 * entered before programming to disable it: 650 * 1] Check relevant display engine registers to verify if mode 651 * set disable sequence was followed. 652 * 2] Check if display uninitialize sequence is initialized. 653 */ 654 } 655 656 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 657 u32 state) 658 { 659 int rewrites = 0; 660 int rereads = 0; 661 u32 v; 662 663 I915_WRITE(DC_STATE_EN, state); 664 665 /* It has been observed that disabling the dc6 state sometimes 666 * doesn't stick and dmc keeps returning old value. Make sure 667 * the write really sticks enough times and also force rewrite until 668 * we are confident that state is exactly what we want. 669 */ 670 do { 671 v = I915_READ(DC_STATE_EN); 672 673 if (v != state) { 674 I915_WRITE(DC_STATE_EN, state); 675 rewrites++; 676 rereads = 0; 677 } else if (rereads++ > 5) { 678 break; 679 } 680 681 } while (rewrites < 100); 682 683 if (v != state) 684 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n", 685 state, v); 686 687 /* Most of the times we need one retry, avoid spam */ 688 if (rewrites > 1) 689 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n", 690 state, rewrites); 691 } 692 693 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 694 { 695 u32 mask; 696 697 mask = DC_STATE_EN_UPTO_DC5; 698 699 if (INTEL_GEN(dev_priv) >= 12) 700 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 701 | DC_STATE_EN_DC9; 702 else if (IS_GEN(dev_priv, 11)) 703 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 704 else if (IS_GEN9_LP(dev_priv)) 705 mask |= DC_STATE_EN_DC9; 706 else 707 mask |= DC_STATE_EN_UPTO_DC6; 708 709 return mask; 710 } 711 712 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 713 { 714 u32 val; 715 716 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv); 717 718 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n", 719 dev_priv->csr.dc_state, val); 720 dev_priv->csr.dc_state = val; 721 } 722 723 /** 724 * gen9_set_dc_state - set target display C power state 725 * @dev_priv: i915 device instance 726 * @state: target DC power state 727 * - DC_STATE_DISABLE 728 * - DC_STATE_EN_UPTO_DC5 729 * - DC_STATE_EN_UPTO_DC6 730 * - DC_STATE_EN_DC9 731 * 732 * Signal to DMC firmware/HW the target DC power state passed in @state. 733 * DMC/HW can turn off individual display clocks and power rails when entering 734 * a deeper DC power state (higher in number) and turns these back when exiting 735 * that state to a shallower power state (lower in number). The HW will decide 736 * when to actually enter a given state on an on-demand basis, for instance 737 * depending on the active state of display pipes. The state of display 738 * registers backed by affected power rails are saved/restored as needed. 739 * 740 * Based on the above enabling a deeper DC power state is asynchronous wrt. 741 * enabling it. Disabling a deeper power state is synchronous: for instance 742 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 743 * back on and register state is restored. This is guaranteed by the MMIO write 744 * to DC_STATE_EN blocking until the state is restored. 745 */ 746 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) 747 { 748 u32 val; 749 u32 mask; 750 751 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) 752 state &= dev_priv->csr.allowed_dc_mask; 753 754 val = I915_READ(DC_STATE_EN); 755 mask = gen9_dc_mask(dev_priv); 756 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", 757 val & mask, state); 758 759 /* Check if DMC is ignoring our DC state requests */ 760 if ((val & mask) != dev_priv->csr.dc_state) 761 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n", 762 dev_priv->csr.dc_state, val & mask); 763 764 val &= ~mask; 765 val |= state; 766 767 gen9_write_dc_state(dev_priv, val); 768 769 dev_priv->csr.dc_state = val & mask; 770 } 771 772 static u32 773 sanitize_target_dc_state(struct drm_i915_private *dev_priv, 774 u32 target_dc_state) 775 { 776 u32 states[] = { 777 DC_STATE_EN_UPTO_DC6, 778 DC_STATE_EN_UPTO_DC5, 779 DC_STATE_EN_DC3CO, 780 DC_STATE_DISABLE, 781 }; 782 int i; 783 784 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 785 if (target_dc_state != states[i]) 786 continue; 787 788 if (dev_priv->csr.allowed_dc_mask & target_dc_state) 789 break; 790 791 target_dc_state = states[i + 1]; 792 } 793 794 return target_dc_state; 795 } 796 797 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) 798 { 799 DRM_DEBUG_KMS("Enabling DC3CO\n"); 800 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); 801 } 802 803 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) 804 { 805 u32 val; 806 807 DRM_DEBUG_KMS("Disabling DC3CO\n"); 808 val = I915_READ(DC_STATE_EN); 809 val &= ~DC_STATE_DC3CO_STATUS; 810 I915_WRITE(DC_STATE_EN, val); 811 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 812 /* 813 * Delay of 200us DC3CO Exit time B.Spec 49196 814 */ 815 usleep_range(200, 210); 816 } 817 818 static void bxt_enable_dc9(struct drm_i915_private *dev_priv) 819 { 820 assert_can_enable_dc9(dev_priv); 821 822 DRM_DEBUG_KMS("Enabling DC9\n"); 823 /* 824 * Power sequencer reset is not needed on 825 * platforms with South Display Engine on PCH, 826 * because PPS registers are always on. 827 */ 828 if (!HAS_PCH_SPLIT(dev_priv)) 829 intel_power_sequencer_reset(dev_priv); 830 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 831 } 832 833 static void bxt_disable_dc9(struct drm_i915_private *dev_priv) 834 { 835 assert_can_disable_dc9(dev_priv); 836 837 DRM_DEBUG_KMS("Disabling DC9\n"); 838 839 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 840 841 intel_pps_unlock_regs_wa(dev_priv); 842 } 843 844 static void assert_csr_loaded(struct drm_i915_private *dev_priv) 845 { 846 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), 847 "CSR program storage start is NULL\n"); 848 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); 849 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); 850 } 851 852 static struct i915_power_well * 853 lookup_power_well(struct drm_i915_private *dev_priv, 854 enum i915_power_well_id power_well_id) 855 { 856 struct i915_power_well *power_well; 857 858 for_each_power_well(dev_priv, power_well) 859 if (power_well->desc->id == power_well_id) 860 return power_well; 861 862 /* 863 * It's not feasible to add error checking code to the callers since 864 * this condition really shouldn't happen and it doesn't even make sense 865 * to abort things like display initialization sequences. Just return 866 * the first power well and hope the WARN gets reported so we can fix 867 * our driver. 868 */ 869 WARN(1, "Power well %d not defined for this platform\n", power_well_id); 870 return &dev_priv->power_domains.power_wells[0]; 871 } 872 873 /** 874 * intel_display_power_set_target_dc_state - Set target dc state. 875 * @dev_priv: i915 device 876 * @state: state which needs to be set as target_dc_state. 877 * 878 * This function set the "DC off" power well target_dc_state, 879 * based upon this target_dc_stste, "DC off" power well will 880 * enable desired DC state. 881 */ 882 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, 883 u32 state) 884 { 885 struct i915_power_well *power_well; 886 bool dc_off_enabled; 887 struct i915_power_domains *power_domains = &dev_priv->power_domains; 888 889 mutex_lock(&power_domains->lock); 890 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); 891 892 if (WARN_ON(!power_well)) 893 goto unlock; 894 895 state = sanitize_target_dc_state(dev_priv, state); 896 897 if (state == dev_priv->csr.target_dc_state) 898 goto unlock; 899 900 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv, 901 power_well); 902 /* 903 * If DC off power well is disabled, need to enable and disable the 904 * DC off power well to effect target DC state. 905 */ 906 if (!dc_off_enabled) 907 power_well->desc->ops->enable(dev_priv, power_well); 908 909 dev_priv->csr.target_dc_state = state; 910 911 if (!dc_off_enabled) 912 power_well->desc->ops->disable(dev_priv, power_well); 913 914 unlock: 915 mutex_unlock(&power_domains->lock); 916 } 917 918 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 919 { 920 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, 921 SKL_DISP_PW_2); 922 923 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); 924 925 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), 926 "DC5 already programmed to be enabled.\n"); 927 assert_rpm_wakelock_held(&dev_priv->runtime_pm); 928 929 assert_csr_loaded(dev_priv); 930 } 931 932 static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 933 { 934 assert_can_enable_dc5(dev_priv); 935 936 DRM_DEBUG_KMS("Enabling DC5\n"); 937 938 /* Wa Display #1183: skl,kbl,cfl */ 939 if (IS_GEN9_BC(dev_priv)) 940 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | 941 SKL_SELECT_ALTERNATE_DC_EXIT); 942 943 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 944 } 945 946 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 947 { 948 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 949 "Backlight is not disabled.\n"); 950 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 951 "DC6 already programmed to be enabled.\n"); 952 953 assert_csr_loaded(dev_priv); 954 } 955 956 static void skl_enable_dc6(struct drm_i915_private *dev_priv) 957 { 958 assert_can_enable_dc6(dev_priv); 959 960 DRM_DEBUG_KMS("Enabling DC6\n"); 961 962 /* Wa Display #1183: skl,kbl,cfl */ 963 if (IS_GEN9_BC(dev_priv)) 964 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | 965 SKL_SELECT_ALTERNATE_DC_EXIT); 966 967 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 968 } 969 970 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 971 struct i915_power_well *power_well) 972 { 973 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 974 int pw_idx = power_well->desc->hsw.idx; 975 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 976 u32 bios_req = I915_READ(regs->bios); 977 978 /* Take over the request bit if set by BIOS. */ 979 if (bios_req & mask) { 980 u32 drv_req = I915_READ(regs->driver); 981 982 if (!(drv_req & mask)) 983 I915_WRITE(regs->driver, drv_req | mask); 984 I915_WRITE(regs->bios, bios_req & ~mask); 985 } 986 } 987 988 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 989 struct i915_power_well *power_well) 990 { 991 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); 992 } 993 994 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 995 struct i915_power_well *power_well) 996 { 997 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); 998 } 999 1000 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 1001 struct i915_power_well *power_well) 1002 { 1003 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); 1004 } 1005 1006 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 1007 { 1008 struct i915_power_well *power_well; 1009 1010 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); 1011 if (power_well->count > 0) 1012 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1013 1014 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1015 if (power_well->count > 0) 1016 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1017 1018 if (IS_GEMINILAKE(dev_priv)) { 1019 power_well = lookup_power_well(dev_priv, 1020 GLK_DISP_PW_DPIO_CMN_C); 1021 if (power_well->count > 0) 1022 bxt_ddi_phy_verify_state(dev_priv, 1023 power_well->desc->bxt.phy); 1024 } 1025 } 1026 1027 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 1028 struct i915_power_well *power_well) 1029 { 1030 return ((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && 1031 (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); 1032 } 1033 1034 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 1035 { 1036 u32 tmp = I915_READ(DBUF_CTL); 1037 1038 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) != 1039 (DBUF_POWER_STATE | DBUF_POWER_REQUEST), 1040 "Unexpected DBuf power power state (0x%08x)\n", tmp); 1041 } 1042 1043 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) 1044 { 1045 struct intel_cdclk_state cdclk_state = {}; 1046 1047 if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) { 1048 tgl_disable_dc3co(dev_priv); 1049 return; 1050 } 1051 1052 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1053 1054 dev_priv->display.get_cdclk(dev_priv, &cdclk_state); 1055 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 1056 WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state)); 1057 1058 gen9_assert_dbuf_enabled(dev_priv); 1059 1060 if (IS_GEN9_LP(dev_priv)) 1061 bxt_verify_ddi_phy_power_wells(dev_priv); 1062 1063 if (INTEL_GEN(dev_priv) >= 11) 1064 /* 1065 * DMC retains HW context only for port A, the other combo 1066 * PHY's HW context for port B is lost after DC transitions, 1067 * so we need to restore it manually. 1068 */ 1069 intel_combo_phy_init(dev_priv); 1070 } 1071 1072 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 1073 struct i915_power_well *power_well) 1074 { 1075 gen9_disable_dc_states(dev_priv); 1076 } 1077 1078 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 1079 struct i915_power_well *power_well) 1080 { 1081 if (!dev_priv->csr.dmc_payload) 1082 return; 1083 1084 switch (dev_priv->csr.target_dc_state) { 1085 case DC_STATE_EN_DC3CO: 1086 tgl_enable_dc3co(dev_priv); 1087 break; 1088 case DC_STATE_EN_UPTO_DC6: 1089 skl_enable_dc6(dev_priv); 1090 break; 1091 case DC_STATE_EN_UPTO_DC5: 1092 gen9_enable_dc5(dev_priv); 1093 break; 1094 } 1095 } 1096 1097 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, 1098 struct i915_power_well *power_well) 1099 { 1100 } 1101 1102 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 1103 struct i915_power_well *power_well) 1104 { 1105 } 1106 1107 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 1108 struct i915_power_well *power_well) 1109 { 1110 return true; 1111 } 1112 1113 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 1114 struct i915_power_well *power_well) 1115 { 1116 if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) 1117 i830_enable_pipe(dev_priv, PIPE_A); 1118 if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) 1119 i830_enable_pipe(dev_priv, PIPE_B); 1120 } 1121 1122 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, 1123 struct i915_power_well *power_well) 1124 { 1125 i830_disable_pipe(dev_priv, PIPE_B); 1126 i830_disable_pipe(dev_priv, PIPE_A); 1127 } 1128 1129 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 1130 struct i915_power_well *power_well) 1131 { 1132 return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE && 1133 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE; 1134 } 1135 1136 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, 1137 struct i915_power_well *power_well) 1138 { 1139 if (power_well->count > 0) 1140 i830_pipes_power_well_enable(dev_priv, power_well); 1141 else 1142 i830_pipes_power_well_disable(dev_priv, power_well); 1143 } 1144 1145 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 1146 struct i915_power_well *power_well, bool enable) 1147 { 1148 int pw_idx = power_well->desc->vlv.idx; 1149 u32 mask; 1150 u32 state; 1151 u32 ctrl; 1152 1153 mask = PUNIT_PWRGT_MASK(pw_idx); 1154 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1155 PUNIT_PWRGT_PWR_GATE(pw_idx); 1156 1157 vlv_punit_get(dev_priv); 1158 1159 #define COND \ 1160 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1161 1162 if (COND) 1163 goto out; 1164 1165 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 1166 ctrl &= ~mask; 1167 ctrl |= state; 1168 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1169 1170 if (wait_for(COND, 100)) 1171 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1172 state, 1173 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1174 1175 #undef COND 1176 1177 out: 1178 vlv_punit_put(dev_priv); 1179 } 1180 1181 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1182 struct i915_power_well *power_well) 1183 { 1184 vlv_set_power_well(dev_priv, power_well, true); 1185 } 1186 1187 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1188 struct i915_power_well *power_well) 1189 { 1190 vlv_set_power_well(dev_priv, power_well, false); 1191 } 1192 1193 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1194 struct i915_power_well *power_well) 1195 { 1196 int pw_idx = power_well->desc->vlv.idx; 1197 bool enabled = false; 1198 u32 mask; 1199 u32 state; 1200 u32 ctrl; 1201 1202 mask = PUNIT_PWRGT_MASK(pw_idx); 1203 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1204 1205 vlv_punit_get(dev_priv); 1206 1207 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1208 /* 1209 * We only ever set the power-on and power-gate states, anything 1210 * else is unexpected. 1211 */ 1212 WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1213 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1214 if (state == ctrl) 1215 enabled = true; 1216 1217 /* 1218 * A transient state at this point would mean some unexpected party 1219 * is poking at the power controls too. 1220 */ 1221 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1222 WARN_ON(ctrl != state); 1223 1224 vlv_punit_put(dev_priv); 1225 1226 return enabled; 1227 } 1228 1229 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1230 { 1231 u32 val; 1232 1233 /* 1234 * On driver load, a pipe may be active and driving a DSI display. 1235 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1236 * (and never recovering) in this case. intel_dsi_post_disable() will 1237 * clear it when we turn off the display. 1238 */ 1239 val = I915_READ(DSPCLK_GATE_D); 1240 val &= DPOUNIT_CLOCK_GATE_DISABLE; 1241 val |= VRHUNIT_CLOCK_GATE_DISABLE; 1242 I915_WRITE(DSPCLK_GATE_D, val); 1243 1244 /* 1245 * Disable trickle feed and enable pnd deadline calculation 1246 */ 1247 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1248 I915_WRITE(CBR1_VLV, 0); 1249 1250 WARN_ON(dev_priv->rawclk_freq == 0); 1251 1252 I915_WRITE(RAWCLK_FREQ_VLV, 1253 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000)); 1254 } 1255 1256 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1257 { 1258 struct intel_encoder *encoder; 1259 enum pipe pipe; 1260 1261 /* 1262 * Enable the CRI clock source so we can get at the 1263 * display and the reference clock for VGA 1264 * hotplug / manual detection. Supposedly DSI also 1265 * needs the ref clock up and running. 1266 * 1267 * CHV DPLL B/C have some issues if VGA mode is enabled. 1268 */ 1269 for_each_pipe(dev_priv, pipe) { 1270 u32 val = I915_READ(DPLL(pipe)); 1271 1272 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1273 if (pipe != PIPE_A) 1274 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1275 1276 I915_WRITE(DPLL(pipe), val); 1277 } 1278 1279 vlv_init_display_clock_gating(dev_priv); 1280 1281 spin_lock_irq(&dev_priv->irq_lock); 1282 valleyview_enable_display_irqs(dev_priv); 1283 spin_unlock_irq(&dev_priv->irq_lock); 1284 1285 /* 1286 * During driver initialization/resume we can avoid restoring the 1287 * part of the HW/SW state that will be inited anyway explicitly. 1288 */ 1289 if (dev_priv->power_domains.initializing) 1290 return; 1291 1292 intel_hpd_init(dev_priv); 1293 1294 /* Re-enable the ADPA, if we have one */ 1295 for_each_intel_encoder(&dev_priv->drm, encoder) { 1296 if (encoder->type == INTEL_OUTPUT_ANALOG) 1297 intel_crt_reset(&encoder->base); 1298 } 1299 1300 intel_vga_redisable_power_on(dev_priv); 1301 1302 intel_pps_unlock_regs_wa(dev_priv); 1303 } 1304 1305 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1306 { 1307 spin_lock_irq(&dev_priv->irq_lock); 1308 valleyview_disable_display_irqs(dev_priv); 1309 spin_unlock_irq(&dev_priv->irq_lock); 1310 1311 /* make sure we're done processing display irqs */ 1312 intel_synchronize_irq(dev_priv); 1313 1314 intel_power_sequencer_reset(dev_priv); 1315 1316 /* Prevent us from re-enabling polling on accident in late suspend */ 1317 if (!dev_priv->drm.dev->power.is_suspended) 1318 intel_hpd_poll_init(dev_priv); 1319 } 1320 1321 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1322 struct i915_power_well *power_well) 1323 { 1324 vlv_set_power_well(dev_priv, power_well, true); 1325 1326 vlv_display_power_well_init(dev_priv); 1327 } 1328 1329 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1330 struct i915_power_well *power_well) 1331 { 1332 vlv_display_power_well_deinit(dev_priv); 1333 1334 vlv_set_power_well(dev_priv, power_well, false); 1335 } 1336 1337 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1338 struct i915_power_well *power_well) 1339 { 1340 /* since ref/cri clock was enabled */ 1341 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1342 1343 vlv_set_power_well(dev_priv, power_well, true); 1344 1345 /* 1346 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1347 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1348 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1349 * b. The other bits such as sfr settings / modesel may all 1350 * be set to 0. 1351 * 1352 * This should only be done on init and resume from S3 with 1353 * both PLLs disabled, or we risk losing DPIO and PLL 1354 * synchronization. 1355 */ 1356 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); 1357 } 1358 1359 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1360 struct i915_power_well *power_well) 1361 { 1362 enum pipe pipe; 1363 1364 for_each_pipe(dev_priv, pipe) 1365 assert_pll_disabled(dev_priv, pipe); 1366 1367 /* Assert common reset */ 1368 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); 1369 1370 vlv_set_power_well(dev_priv, power_well, false); 1371 } 1372 1373 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 1374 1375 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1376 1377 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1378 { 1379 struct i915_power_well *cmn_bc = 1380 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1381 struct i915_power_well *cmn_d = 1382 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1383 u32 phy_control = dev_priv->chv_phy_control; 1384 u32 phy_status = 0; 1385 u32 phy_status_mask = 0xffffffff; 1386 1387 /* 1388 * The BIOS can leave the PHY is some weird state 1389 * where it doesn't fully power down some parts. 1390 * Disable the asserts until the PHY has been fully 1391 * reset (ie. the power well has been disabled at 1392 * least once). 1393 */ 1394 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1395 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1396 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1397 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1398 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1399 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1400 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1401 1402 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1403 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1404 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1405 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1406 1407 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 1408 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1409 1410 /* this assumes override is only used to enable lanes */ 1411 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1412 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1413 1414 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1415 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1416 1417 /* CL1 is on whenever anything is on in either channel */ 1418 if (BITS_SET(phy_control, 1419 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1420 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1421 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1422 1423 /* 1424 * The DPLLB check accounts for the pipe B + port A usage 1425 * with CL2 powered up but all the lanes in the second channel 1426 * powered down. 1427 */ 1428 if (BITS_SET(phy_control, 1429 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1430 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1431 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1432 1433 if (BITS_SET(phy_control, 1434 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1435 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1436 if (BITS_SET(phy_control, 1437 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1438 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1439 1440 if (BITS_SET(phy_control, 1441 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1442 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1443 if (BITS_SET(phy_control, 1444 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1445 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1446 } 1447 1448 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 1449 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1450 1451 /* this assumes override is only used to enable lanes */ 1452 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1453 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1454 1455 if (BITS_SET(phy_control, 1456 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1457 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1458 1459 if (BITS_SET(phy_control, 1460 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1461 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1462 if (BITS_SET(phy_control, 1463 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1464 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1465 } 1466 1467 phy_status &= phy_status_mask; 1468 1469 /* 1470 * The PHY may be busy with some initial calibration and whatnot, 1471 * so the power state can take a while to actually change. 1472 */ 1473 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, 1474 phy_status_mask, phy_status, 10)) 1475 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1476 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask, 1477 phy_status, dev_priv->chv_phy_control); 1478 } 1479 1480 #undef BITS_SET 1481 1482 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1483 struct i915_power_well *power_well) 1484 { 1485 enum dpio_phy phy; 1486 enum pipe pipe; 1487 u32 tmp; 1488 1489 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1490 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1491 1492 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1493 pipe = PIPE_A; 1494 phy = DPIO_PHY0; 1495 } else { 1496 pipe = PIPE_C; 1497 phy = DPIO_PHY1; 1498 } 1499 1500 /* since ref/cri clock was enabled */ 1501 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1502 vlv_set_power_well(dev_priv, power_well, true); 1503 1504 /* Poll for phypwrgood signal */ 1505 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, 1506 PHY_POWERGOOD(phy), 1)) 1507 DRM_ERROR("Display PHY %d is not power up\n", phy); 1508 1509 vlv_dpio_get(dev_priv); 1510 1511 /* Enable dynamic power down */ 1512 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1513 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1514 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1515 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1516 1517 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1518 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1519 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1520 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1521 } else { 1522 /* 1523 * Force the non-existing CL2 off. BXT does this 1524 * too, so maybe it saves some power even though 1525 * CL2 doesn't exist? 1526 */ 1527 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1528 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1529 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1530 } 1531 1532 vlv_dpio_put(dev_priv); 1533 1534 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1535 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1536 1537 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1538 phy, dev_priv->chv_phy_control); 1539 1540 assert_chv_phy_status(dev_priv); 1541 } 1542 1543 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1544 struct i915_power_well *power_well) 1545 { 1546 enum dpio_phy phy; 1547 1548 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1549 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1550 1551 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1552 phy = DPIO_PHY0; 1553 assert_pll_disabled(dev_priv, PIPE_A); 1554 assert_pll_disabled(dev_priv, PIPE_B); 1555 } else { 1556 phy = DPIO_PHY1; 1557 assert_pll_disabled(dev_priv, PIPE_C); 1558 } 1559 1560 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1561 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1562 1563 vlv_set_power_well(dev_priv, power_well, false); 1564 1565 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1566 phy, dev_priv->chv_phy_control); 1567 1568 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1569 dev_priv->chv_phy_assert[phy] = true; 1570 1571 assert_chv_phy_status(dev_priv); 1572 } 1573 1574 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1575 enum dpio_channel ch, bool override, unsigned int mask) 1576 { 1577 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1578 u32 reg, val, expected, actual; 1579 1580 /* 1581 * The BIOS can leave the PHY is some weird state 1582 * where it doesn't fully power down some parts. 1583 * Disable the asserts until the PHY has been fully 1584 * reset (ie. the power well has been disabled at 1585 * least once). 1586 */ 1587 if (!dev_priv->chv_phy_assert[phy]) 1588 return; 1589 1590 if (ch == DPIO_CH0) 1591 reg = _CHV_CMN_DW0_CH0; 1592 else 1593 reg = _CHV_CMN_DW6_CH1; 1594 1595 vlv_dpio_get(dev_priv); 1596 val = vlv_dpio_read(dev_priv, pipe, reg); 1597 vlv_dpio_put(dev_priv); 1598 1599 /* 1600 * This assumes !override is only used when the port is disabled. 1601 * All lanes should power down even without the override when 1602 * the port is disabled. 1603 */ 1604 if (!override || mask == 0xf) { 1605 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1606 /* 1607 * If CH1 common lane is not active anymore 1608 * (eg. for pipe B DPLL) the entire channel will 1609 * shut down, which causes the common lane registers 1610 * to read as 0. That means we can't actually check 1611 * the lane power down status bits, but as the entire 1612 * register reads as 0 it's a good indication that the 1613 * channel is indeed entirely powered down. 1614 */ 1615 if (ch == DPIO_CH1 && val == 0) 1616 expected = 0; 1617 } else if (mask != 0x0) { 1618 expected = DPIO_ANYDL_POWERDOWN; 1619 } else { 1620 expected = 0; 1621 } 1622 1623 if (ch == DPIO_CH0) 1624 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1625 else 1626 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1627 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1628 1629 WARN(actual != expected, 1630 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1631 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), 1632 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), 1633 reg, val); 1634 } 1635 1636 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1637 enum dpio_channel ch, bool override) 1638 { 1639 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1640 bool was_override; 1641 1642 mutex_lock(&power_domains->lock); 1643 1644 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1645 1646 if (override == was_override) 1647 goto out; 1648 1649 if (override) 1650 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1651 else 1652 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1653 1654 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1655 1656 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1657 phy, ch, dev_priv->chv_phy_control); 1658 1659 assert_chv_phy_status(dev_priv); 1660 1661 out: 1662 mutex_unlock(&power_domains->lock); 1663 1664 return was_override; 1665 } 1666 1667 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1668 bool override, unsigned int mask) 1669 { 1670 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1671 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1672 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); 1673 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); 1674 1675 mutex_lock(&power_domains->lock); 1676 1677 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1678 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1679 1680 if (override) 1681 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1682 else 1683 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1684 1685 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1686 1687 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1688 phy, ch, mask, dev_priv->chv_phy_control); 1689 1690 assert_chv_phy_status(dev_priv); 1691 1692 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1693 1694 mutex_unlock(&power_domains->lock); 1695 } 1696 1697 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1698 struct i915_power_well *power_well) 1699 { 1700 enum pipe pipe = PIPE_A; 1701 bool enabled; 1702 u32 state, ctrl; 1703 1704 vlv_punit_get(dev_priv); 1705 1706 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1707 /* 1708 * We only ever set the power-on and power-gate states, anything 1709 * else is unexpected. 1710 */ 1711 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); 1712 enabled = state == DP_SSS_PWR_ON(pipe); 1713 1714 /* 1715 * A transient state at this point would mean some unexpected party 1716 * is poking at the power controls too. 1717 */ 1718 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1719 WARN_ON(ctrl << 16 != state); 1720 1721 vlv_punit_put(dev_priv); 1722 1723 return enabled; 1724 } 1725 1726 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1727 struct i915_power_well *power_well, 1728 bool enable) 1729 { 1730 enum pipe pipe = PIPE_A; 1731 u32 state; 1732 u32 ctrl; 1733 1734 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1735 1736 vlv_punit_get(dev_priv); 1737 1738 #define COND \ 1739 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 1740 1741 if (COND) 1742 goto out; 1743 1744 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 1745 ctrl &= ~DP_SSC_MASK(pipe); 1746 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1747 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); 1748 1749 if (wait_for(COND, 100)) 1750 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1751 state, 1752 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); 1753 1754 #undef COND 1755 1756 out: 1757 vlv_punit_put(dev_priv); 1758 } 1759 1760 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1761 struct i915_power_well *power_well) 1762 { 1763 chv_set_pipe_power_well(dev_priv, power_well, true); 1764 1765 vlv_display_power_well_init(dev_priv); 1766 } 1767 1768 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1769 struct i915_power_well *power_well) 1770 { 1771 vlv_display_power_well_deinit(dev_priv); 1772 1773 chv_set_pipe_power_well(dev_priv, power_well, false); 1774 } 1775 1776 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) 1777 { 1778 return power_domains->async_put_domains[0] | 1779 power_domains->async_put_domains[1]; 1780 } 1781 1782 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 1783 1784 static bool 1785 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 1786 { 1787 return !WARN_ON(power_domains->async_put_domains[0] & 1788 power_domains->async_put_domains[1]); 1789 } 1790 1791 static bool 1792 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 1793 { 1794 enum intel_display_power_domain domain; 1795 bool err = false; 1796 1797 err |= !assert_async_put_domain_masks_disjoint(power_domains); 1798 err |= WARN_ON(!!power_domains->async_put_wakeref != 1799 !!__async_put_domains_mask(power_domains)); 1800 1801 for_each_power_domain(domain, __async_put_domains_mask(power_domains)) 1802 err |= WARN_ON(power_domains->domain_use_count[domain] != 1); 1803 1804 return !err; 1805 } 1806 1807 static void print_power_domains(struct i915_power_domains *power_domains, 1808 const char *prefix, u64 mask) 1809 { 1810 enum intel_display_power_domain domain; 1811 1812 DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask)); 1813 for_each_power_domain(domain, mask) 1814 DRM_DEBUG_DRIVER("%s use_count %d\n", 1815 intel_display_power_domain_str(domain), 1816 power_domains->domain_use_count[domain]); 1817 } 1818 1819 static void 1820 print_async_put_domains_state(struct i915_power_domains *power_domains) 1821 { 1822 DRM_DEBUG_DRIVER("async_put_wakeref %u\n", 1823 power_domains->async_put_wakeref); 1824 1825 print_power_domains(power_domains, "async_put_domains[0]", 1826 power_domains->async_put_domains[0]); 1827 print_power_domains(power_domains, "async_put_domains[1]", 1828 power_domains->async_put_domains[1]); 1829 } 1830 1831 static void 1832 verify_async_put_domains_state(struct i915_power_domains *power_domains) 1833 { 1834 if (!__async_put_domains_state_ok(power_domains)) 1835 print_async_put_domains_state(power_domains); 1836 } 1837 1838 #else 1839 1840 static void 1841 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 1842 { 1843 } 1844 1845 static void 1846 verify_async_put_domains_state(struct i915_power_domains *power_domains) 1847 { 1848 } 1849 1850 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 1851 1852 static u64 async_put_domains_mask(struct i915_power_domains *power_domains) 1853 { 1854 assert_async_put_domain_masks_disjoint(power_domains); 1855 1856 return __async_put_domains_mask(power_domains); 1857 } 1858 1859 static void 1860 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 1861 enum intel_display_power_domain domain) 1862 { 1863 assert_async_put_domain_masks_disjoint(power_domains); 1864 1865 power_domains->async_put_domains[0] &= ~BIT_ULL(domain); 1866 power_domains->async_put_domains[1] &= ~BIT_ULL(domain); 1867 } 1868 1869 static bool 1870 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 1871 enum intel_display_power_domain domain) 1872 { 1873 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1874 bool ret = false; 1875 1876 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) 1877 goto out_verify; 1878 1879 async_put_domains_clear_domain(power_domains, domain); 1880 1881 ret = true; 1882 1883 if (async_put_domains_mask(power_domains)) 1884 goto out_verify; 1885 1886 cancel_delayed_work(&power_domains->async_put_work); 1887 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 1888 fetch_and_zero(&power_domains->async_put_wakeref)); 1889 out_verify: 1890 verify_async_put_domains_state(power_domains); 1891 1892 return ret; 1893 } 1894 1895 static void 1896 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 1897 enum intel_display_power_domain domain) 1898 { 1899 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1900 struct i915_power_well *power_well; 1901 1902 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 1903 return; 1904 1905 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) 1906 intel_power_well_get(dev_priv, power_well); 1907 1908 power_domains->domain_use_count[domain]++; 1909 } 1910 1911 /** 1912 * intel_display_power_get - grab a power domain reference 1913 * @dev_priv: i915 device instance 1914 * @domain: power domain to reference 1915 * 1916 * This function grabs a power domain reference for @domain and ensures that the 1917 * power domain and all its parents are powered up. Therefore users should only 1918 * grab a reference to the innermost power domain they need. 1919 * 1920 * Any power domain reference obtained by this function must have a symmetric 1921 * call to intel_display_power_put() to release the reference again. 1922 */ 1923 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 1924 enum intel_display_power_domain domain) 1925 { 1926 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1927 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 1928 1929 mutex_lock(&power_domains->lock); 1930 __intel_display_power_get_domain(dev_priv, domain); 1931 mutex_unlock(&power_domains->lock); 1932 1933 return wakeref; 1934 } 1935 1936 /** 1937 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 1938 * @dev_priv: i915 device instance 1939 * @domain: power domain to reference 1940 * 1941 * This function grabs a power domain reference for @domain and ensures that the 1942 * power domain and all its parents are powered up. Therefore users should only 1943 * grab a reference to the innermost power domain they need. 1944 * 1945 * Any power domain reference obtained by this function must have a symmetric 1946 * call to intel_display_power_put() to release the reference again. 1947 */ 1948 intel_wakeref_t 1949 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 1950 enum intel_display_power_domain domain) 1951 { 1952 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1953 intel_wakeref_t wakeref; 1954 bool is_enabled; 1955 1956 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 1957 if (!wakeref) 1958 return false; 1959 1960 mutex_lock(&power_domains->lock); 1961 1962 if (__intel_display_power_is_enabled(dev_priv, domain)) { 1963 __intel_display_power_get_domain(dev_priv, domain); 1964 is_enabled = true; 1965 } else { 1966 is_enabled = false; 1967 } 1968 1969 mutex_unlock(&power_domains->lock); 1970 1971 if (!is_enabled) { 1972 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 1973 wakeref = 0; 1974 } 1975 1976 return wakeref; 1977 } 1978 1979 static void 1980 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 1981 enum intel_display_power_domain domain) 1982 { 1983 struct i915_power_domains *power_domains; 1984 struct i915_power_well *power_well; 1985 const char *name = intel_display_power_domain_str(domain); 1986 1987 power_domains = &dev_priv->power_domains; 1988 1989 WARN(!power_domains->domain_use_count[domain], 1990 "Use count on domain %s is already zero\n", 1991 name); 1992 WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain), 1993 "Async disabling of domain %s is pending\n", 1994 name); 1995 1996 power_domains->domain_use_count[domain]--; 1997 1998 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) 1999 intel_power_well_put(dev_priv, power_well); 2000 } 2001 2002 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 2003 enum intel_display_power_domain domain) 2004 { 2005 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2006 2007 mutex_lock(&power_domains->lock); 2008 __intel_display_power_put_domain(dev_priv, domain); 2009 mutex_unlock(&power_domains->lock); 2010 } 2011 2012 /** 2013 * intel_display_power_put_unchecked - release an unchecked power domain reference 2014 * @dev_priv: i915 device instance 2015 * @domain: power domain to reference 2016 * 2017 * This function drops the power domain reference obtained by 2018 * intel_display_power_get() and might power down the corresponding hardware 2019 * block right away if this is the last reference. 2020 * 2021 * This function exists only for historical reasons and should be avoided in 2022 * new code, as the correctness of its use cannot be checked. Always use 2023 * intel_display_power_put() instead. 2024 */ 2025 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 2026 enum intel_display_power_domain domain) 2027 { 2028 __intel_display_power_put(dev_priv, domain); 2029 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 2030 } 2031 2032 static void 2033 queue_async_put_domains_work(struct i915_power_domains *power_domains, 2034 intel_wakeref_t wakeref) 2035 { 2036 WARN_ON(power_domains->async_put_wakeref); 2037 power_domains->async_put_wakeref = wakeref; 2038 WARN_ON(!queue_delayed_work(system_unbound_wq, 2039 &power_domains->async_put_work, 2040 msecs_to_jiffies(100))); 2041 } 2042 2043 static void 2044 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) 2045 { 2046 struct drm_i915_private *dev_priv = 2047 container_of(power_domains, struct drm_i915_private, 2048 power_domains); 2049 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2050 enum intel_display_power_domain domain; 2051 intel_wakeref_t wakeref; 2052 2053 /* 2054 * The caller must hold already raw wakeref, upgrade that to a proper 2055 * wakeref to make the state checker happy about the HW access during 2056 * power well disabling. 2057 */ 2058 assert_rpm_raw_wakeref_held(rpm); 2059 wakeref = intel_runtime_pm_get(rpm); 2060 2061 for_each_power_domain(domain, mask) { 2062 /* Clear before put, so put's sanity check is happy. */ 2063 async_put_domains_clear_domain(power_domains, domain); 2064 __intel_display_power_put_domain(dev_priv, domain); 2065 } 2066 2067 intel_runtime_pm_put(rpm, wakeref); 2068 } 2069 2070 static void 2071 intel_display_power_put_async_work(struct work_struct *work) 2072 { 2073 struct drm_i915_private *dev_priv = 2074 container_of(work, struct drm_i915_private, 2075 power_domains.async_put_work.work); 2076 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2077 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2078 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 2079 intel_wakeref_t old_work_wakeref = 0; 2080 2081 mutex_lock(&power_domains->lock); 2082 2083 /* 2084 * Bail out if all the domain refs pending to be released were grabbed 2085 * by subsequent gets or a flush_work. 2086 */ 2087 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2088 if (!old_work_wakeref) 2089 goto out_verify; 2090 2091 release_async_put_domains(power_domains, 2092 power_domains->async_put_domains[0]); 2093 2094 /* Requeue the work if more domains were async put meanwhile. */ 2095 if (power_domains->async_put_domains[1]) { 2096 power_domains->async_put_domains[0] = 2097 fetch_and_zero(&power_domains->async_put_domains[1]); 2098 queue_async_put_domains_work(power_domains, 2099 fetch_and_zero(&new_work_wakeref)); 2100 } 2101 2102 out_verify: 2103 verify_async_put_domains_state(power_domains); 2104 2105 mutex_unlock(&power_domains->lock); 2106 2107 if (old_work_wakeref) 2108 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 2109 if (new_work_wakeref) 2110 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 2111 } 2112 2113 /** 2114 * intel_display_power_put_async - release a power domain reference asynchronously 2115 * @i915: i915 device instance 2116 * @domain: power domain to reference 2117 * @wakeref: wakeref acquired for the reference that is being released 2118 * 2119 * This function drops the power domain reference obtained by 2120 * intel_display_power_get*() and schedules a work to power down the 2121 * corresponding hardware block if this is the last reference. 2122 */ 2123 void __intel_display_power_put_async(struct drm_i915_private *i915, 2124 enum intel_display_power_domain domain, 2125 intel_wakeref_t wakeref) 2126 { 2127 struct i915_power_domains *power_domains = &i915->power_domains; 2128 struct intel_runtime_pm *rpm = &i915->runtime_pm; 2129 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 2130 2131 mutex_lock(&power_domains->lock); 2132 2133 if (power_domains->domain_use_count[domain] > 1) { 2134 __intel_display_power_put_domain(i915, domain); 2135 2136 goto out_verify; 2137 } 2138 2139 WARN_ON(power_domains->domain_use_count[domain] != 1); 2140 2141 /* Let a pending work requeue itself or queue a new one. */ 2142 if (power_domains->async_put_wakeref) { 2143 power_domains->async_put_domains[1] |= BIT_ULL(domain); 2144 } else { 2145 power_domains->async_put_domains[0] |= BIT_ULL(domain); 2146 queue_async_put_domains_work(power_domains, 2147 fetch_and_zero(&work_wakeref)); 2148 } 2149 2150 out_verify: 2151 verify_async_put_domains_state(power_domains); 2152 2153 mutex_unlock(&power_domains->lock); 2154 2155 if (work_wakeref) 2156 intel_runtime_pm_put_raw(rpm, work_wakeref); 2157 2158 intel_runtime_pm_put(rpm, wakeref); 2159 } 2160 2161 /** 2162 * intel_display_power_flush_work - flushes the async display power disabling work 2163 * @i915: i915 device instance 2164 * 2165 * Flushes any pending work that was scheduled by a preceding 2166 * intel_display_power_put_async() call, completing the disabling of the 2167 * corresponding power domains. 2168 * 2169 * Note that the work handler function may still be running after this 2170 * function returns; to ensure that the work handler isn't running use 2171 * intel_display_power_flush_work_sync() instead. 2172 */ 2173 void intel_display_power_flush_work(struct drm_i915_private *i915) 2174 { 2175 struct i915_power_domains *power_domains = &i915->power_domains; 2176 intel_wakeref_t work_wakeref; 2177 2178 mutex_lock(&power_domains->lock); 2179 2180 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2181 if (!work_wakeref) 2182 goto out_verify; 2183 2184 release_async_put_domains(power_domains, 2185 async_put_domains_mask(power_domains)); 2186 cancel_delayed_work(&power_domains->async_put_work); 2187 2188 out_verify: 2189 verify_async_put_domains_state(power_domains); 2190 2191 mutex_unlock(&power_domains->lock); 2192 2193 if (work_wakeref) 2194 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 2195 } 2196 2197 /** 2198 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 2199 * @i915: i915 device instance 2200 * 2201 * Like intel_display_power_flush_work(), but also ensure that the work 2202 * handler function is not running any more when this function returns. 2203 */ 2204 static void 2205 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 2206 { 2207 struct i915_power_domains *power_domains = &i915->power_domains; 2208 2209 intel_display_power_flush_work(i915); 2210 cancel_delayed_work_sync(&power_domains->async_put_work); 2211 2212 verify_async_put_domains_state(power_domains); 2213 2214 WARN_ON(power_domains->async_put_wakeref); 2215 } 2216 2217 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2218 /** 2219 * intel_display_power_put - release a power domain reference 2220 * @dev_priv: i915 device instance 2221 * @domain: power domain to reference 2222 * @wakeref: wakeref acquired for the reference that is being released 2223 * 2224 * This function drops the power domain reference obtained by 2225 * intel_display_power_get() and might power down the corresponding hardware 2226 * block right away if this is the last reference. 2227 */ 2228 void intel_display_power_put(struct drm_i915_private *dev_priv, 2229 enum intel_display_power_domain domain, 2230 intel_wakeref_t wakeref) 2231 { 2232 __intel_display_power_put(dev_priv, domain); 2233 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2234 } 2235 #endif 2236 2237 #define I830_PIPES_POWER_DOMAINS ( \ 2238 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2239 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2240 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2241 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2242 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2243 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2244 BIT_ULL(POWER_DOMAIN_INIT)) 2245 2246 #define VLV_DISPLAY_POWER_DOMAINS ( \ 2247 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2248 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2249 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2250 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2251 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2252 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2253 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2254 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2255 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2256 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2257 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2258 BIT_ULL(POWER_DOMAIN_VGA) | \ 2259 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2260 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2261 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2262 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2263 BIT_ULL(POWER_DOMAIN_INIT)) 2264 2265 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2266 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2267 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2268 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2269 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2270 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2271 BIT_ULL(POWER_DOMAIN_INIT)) 2272 2273 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 2274 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2275 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2276 BIT_ULL(POWER_DOMAIN_INIT)) 2277 2278 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 2279 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2280 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2281 BIT_ULL(POWER_DOMAIN_INIT)) 2282 2283 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 2284 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2285 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2286 BIT_ULL(POWER_DOMAIN_INIT)) 2287 2288 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 2289 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2290 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2291 BIT_ULL(POWER_DOMAIN_INIT)) 2292 2293 #define CHV_DISPLAY_POWER_DOMAINS ( \ 2294 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2295 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2296 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2297 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2298 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2299 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2300 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2301 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2302 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2303 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2304 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2305 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2306 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2307 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2308 BIT_ULL(POWER_DOMAIN_VGA) | \ 2309 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2310 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2311 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2312 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2313 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2314 BIT_ULL(POWER_DOMAIN_INIT)) 2315 2316 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2317 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2318 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2319 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2320 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2321 BIT_ULL(POWER_DOMAIN_INIT)) 2322 2323 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 2324 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2325 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2326 BIT_ULL(POWER_DOMAIN_INIT)) 2327 2328 #define HSW_DISPLAY_POWER_DOMAINS ( \ 2329 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2330 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2331 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2332 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2333 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2334 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2335 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2336 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2337 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2338 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2339 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2340 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2341 BIT_ULL(POWER_DOMAIN_VGA) | \ 2342 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2343 BIT_ULL(POWER_DOMAIN_INIT)) 2344 2345 #define BDW_DISPLAY_POWER_DOMAINS ( \ 2346 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2347 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2348 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2349 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2350 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2351 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2352 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2353 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2354 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2355 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2356 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2357 BIT_ULL(POWER_DOMAIN_VGA) | \ 2358 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2359 BIT_ULL(POWER_DOMAIN_INIT)) 2360 2361 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2362 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2363 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2364 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2365 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2366 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2367 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2368 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2369 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2370 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2371 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2372 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2373 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2374 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2375 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2376 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2377 BIT_ULL(POWER_DOMAIN_VGA) | \ 2378 BIT_ULL(POWER_DOMAIN_INIT)) 2379 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ 2380 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2381 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ 2382 BIT_ULL(POWER_DOMAIN_INIT)) 2383 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2384 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2385 BIT_ULL(POWER_DOMAIN_INIT)) 2386 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2387 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2388 BIT_ULL(POWER_DOMAIN_INIT)) 2389 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ 2390 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2391 BIT_ULL(POWER_DOMAIN_INIT)) 2392 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2393 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2394 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2395 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2396 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2397 BIT_ULL(POWER_DOMAIN_INIT)) 2398 2399 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2400 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2401 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2402 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2403 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2404 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2405 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2406 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2407 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2408 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2409 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2410 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2411 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2412 BIT_ULL(POWER_DOMAIN_VGA) | \ 2413 BIT_ULL(POWER_DOMAIN_INIT)) 2414 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2415 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2416 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2417 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2418 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2419 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2420 BIT_ULL(POWER_DOMAIN_INIT)) 2421 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 2422 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2423 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2424 BIT_ULL(POWER_DOMAIN_INIT)) 2425 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 2426 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2427 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2428 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2429 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2430 BIT_ULL(POWER_DOMAIN_INIT)) 2431 2432 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2433 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2434 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2435 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2436 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2437 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2438 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2439 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2440 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2441 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2442 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2443 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2444 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2445 BIT_ULL(POWER_DOMAIN_VGA) | \ 2446 BIT_ULL(POWER_DOMAIN_INIT)) 2447 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ 2448 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2449 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2450 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2451 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2452 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2453 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ 2454 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2455 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2456 BIT_ULL(POWER_DOMAIN_INIT)) 2457 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ 2458 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2459 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2460 BIT_ULL(POWER_DOMAIN_INIT)) 2461 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ 2462 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2463 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2464 BIT_ULL(POWER_DOMAIN_INIT)) 2465 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2466 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2467 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2468 BIT_ULL(POWER_DOMAIN_INIT)) 2469 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2470 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2471 BIT_ULL(POWER_DOMAIN_INIT)) 2472 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2473 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2474 BIT_ULL(POWER_DOMAIN_INIT)) 2475 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2476 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2477 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2478 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2479 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2480 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2481 BIT_ULL(POWER_DOMAIN_INIT)) 2482 2483 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2484 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2485 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2486 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2487 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2488 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2489 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2490 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2491 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2492 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2493 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2494 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2495 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2496 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2497 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2498 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2499 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2500 BIT_ULL(POWER_DOMAIN_VGA) | \ 2501 BIT_ULL(POWER_DOMAIN_INIT)) 2502 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \ 2503 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2504 BIT_ULL(POWER_DOMAIN_INIT)) 2505 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \ 2506 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2507 BIT_ULL(POWER_DOMAIN_INIT)) 2508 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \ 2509 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2510 BIT_ULL(POWER_DOMAIN_INIT)) 2511 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \ 2512 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2513 BIT_ULL(POWER_DOMAIN_INIT)) 2514 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2515 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2516 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2517 BIT_ULL(POWER_DOMAIN_INIT)) 2518 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2519 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2520 BIT_ULL(POWER_DOMAIN_INIT)) 2521 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2522 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2523 BIT_ULL(POWER_DOMAIN_INIT)) 2524 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \ 2525 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2526 BIT_ULL(POWER_DOMAIN_INIT)) 2527 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \ 2528 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2529 BIT_ULL(POWER_DOMAIN_INIT)) 2530 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \ 2531 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ 2532 BIT_ULL(POWER_DOMAIN_INIT)) 2533 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2534 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2535 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2536 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2537 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2538 BIT_ULL(POWER_DOMAIN_INIT)) 2539 2540 /* 2541 * ICL PW_0/PG_0 domains (HW/DMC control): 2542 * - PCI 2543 * - clocks except port PLL 2544 * - central power except FBC 2545 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers 2546 * ICL PW_1/PG_1 domains (HW/DMC control): 2547 * - DBUF function 2548 * - PIPE_A and its planes, except VGA 2549 * - transcoder EDP + PSR 2550 * - transcoder DSI 2551 * - DDI_A 2552 * - FBC 2553 */ 2554 #define ICL_PW_4_POWER_DOMAINS ( \ 2555 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2556 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2557 BIT_ULL(POWER_DOMAIN_INIT)) 2558 /* VDSC/joining */ 2559 #define ICL_PW_3_POWER_DOMAINS ( \ 2560 ICL_PW_4_POWER_DOMAINS | \ 2561 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2562 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2563 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2564 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2565 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2566 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2567 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2568 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2569 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2570 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2571 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2572 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2573 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2574 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2575 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2576 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ 2577 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2578 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2579 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2580 BIT_ULL(POWER_DOMAIN_VGA) | \ 2581 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2582 BIT_ULL(POWER_DOMAIN_INIT)) 2583 /* 2584 * - transcoder WD 2585 * - KVMR (HW control) 2586 */ 2587 #define ICL_PW_2_POWER_DOMAINS ( \ 2588 ICL_PW_3_POWER_DOMAINS | \ 2589 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2590 BIT_ULL(POWER_DOMAIN_INIT)) 2591 /* 2592 * - KVMR (HW control) 2593 */ 2594 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2595 ICL_PW_2_POWER_DOMAINS | \ 2596 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2597 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2598 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \ 2599 BIT_ULL(POWER_DOMAIN_INIT)) 2600 2601 #define ICL_DDI_IO_A_POWER_DOMAINS ( \ 2602 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2603 #define ICL_DDI_IO_B_POWER_DOMAINS ( \ 2604 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2605 #define ICL_DDI_IO_C_POWER_DOMAINS ( \ 2606 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2607 #define ICL_DDI_IO_D_POWER_DOMAINS ( \ 2608 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2609 #define ICL_DDI_IO_E_POWER_DOMAINS ( \ 2610 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2611 #define ICL_DDI_IO_F_POWER_DOMAINS ( \ 2612 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2613 2614 #define ICL_AUX_A_IO_POWER_DOMAINS ( \ 2615 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2616 BIT_ULL(POWER_DOMAIN_AUX_A)) 2617 #define ICL_AUX_B_IO_POWER_DOMAINS ( \ 2618 BIT_ULL(POWER_DOMAIN_AUX_B)) 2619 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ 2620 BIT_ULL(POWER_DOMAIN_AUX_C)) 2621 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ 2622 BIT_ULL(POWER_DOMAIN_AUX_D)) 2623 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ 2624 BIT_ULL(POWER_DOMAIN_AUX_E)) 2625 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ 2626 BIT_ULL(POWER_DOMAIN_AUX_F)) 2627 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ 2628 BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) 2629 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ 2630 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2631 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ 2632 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2633 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ 2634 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2635 2636 #define TGL_PW_5_POWER_DOMAINS ( \ 2637 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 2638 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 2639 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 2640 BIT_ULL(POWER_DOMAIN_INIT)) 2641 2642 #define TGL_PW_4_POWER_DOMAINS ( \ 2643 TGL_PW_5_POWER_DOMAINS | \ 2644 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2645 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2646 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2647 BIT_ULL(POWER_DOMAIN_INIT)) 2648 2649 #define TGL_PW_3_POWER_DOMAINS ( \ 2650 TGL_PW_4_POWER_DOMAINS | \ 2651 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2652 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2653 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2654 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2655 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2656 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2657 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \ 2658 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \ 2659 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \ 2660 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2661 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2662 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2663 BIT_ULL(POWER_DOMAIN_AUX_G) | \ 2664 BIT_ULL(POWER_DOMAIN_AUX_H) | \ 2665 BIT_ULL(POWER_DOMAIN_AUX_I) | \ 2666 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2667 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2668 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2669 BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \ 2670 BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \ 2671 BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \ 2672 BIT_ULL(POWER_DOMAIN_VGA) | \ 2673 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2674 BIT_ULL(POWER_DOMAIN_INIT)) 2675 2676 #define TGL_PW_2_POWER_DOMAINS ( \ 2677 TGL_PW_3_POWER_DOMAINS | \ 2678 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2679 BIT_ULL(POWER_DOMAIN_INIT)) 2680 2681 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2682 TGL_PW_2_POWER_DOMAINS | \ 2683 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2684 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2685 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2686 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2687 BIT_ULL(POWER_DOMAIN_INIT)) 2688 2689 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \ 2690 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2691 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \ 2692 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2693 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \ 2694 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2695 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \ 2696 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO)) 2697 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \ 2698 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO)) 2699 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \ 2700 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO)) 2701 2702 #define TGL_AUX_A_IO_POWER_DOMAINS ( \ 2703 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2704 BIT_ULL(POWER_DOMAIN_AUX_A)) 2705 #define TGL_AUX_B_IO_POWER_DOMAINS ( \ 2706 BIT_ULL(POWER_DOMAIN_AUX_B)) 2707 #define TGL_AUX_C_IO_POWER_DOMAINS ( \ 2708 BIT_ULL(POWER_DOMAIN_AUX_C)) 2709 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \ 2710 BIT_ULL(POWER_DOMAIN_AUX_D)) 2711 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \ 2712 BIT_ULL(POWER_DOMAIN_AUX_E)) 2713 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \ 2714 BIT_ULL(POWER_DOMAIN_AUX_F)) 2715 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \ 2716 BIT_ULL(POWER_DOMAIN_AUX_G)) 2717 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \ 2718 BIT_ULL(POWER_DOMAIN_AUX_H)) 2719 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \ 2720 BIT_ULL(POWER_DOMAIN_AUX_I)) 2721 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \ 2722 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2723 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \ 2724 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2725 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \ 2726 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2727 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \ 2728 BIT_ULL(POWER_DOMAIN_AUX_G_TBT)) 2729 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \ 2730 BIT_ULL(POWER_DOMAIN_AUX_H_TBT)) 2731 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \ 2732 BIT_ULL(POWER_DOMAIN_AUX_I_TBT)) 2733 2734 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 2735 .sync_hw = i9xx_power_well_sync_hw_noop, 2736 .enable = i9xx_always_on_power_well_noop, 2737 .disable = i9xx_always_on_power_well_noop, 2738 .is_enabled = i9xx_always_on_power_well_enabled, 2739 }; 2740 2741 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 2742 .sync_hw = i9xx_power_well_sync_hw_noop, 2743 .enable = chv_pipe_power_well_enable, 2744 .disable = chv_pipe_power_well_disable, 2745 .is_enabled = chv_pipe_power_well_enabled, 2746 }; 2747 2748 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 2749 .sync_hw = i9xx_power_well_sync_hw_noop, 2750 .enable = chv_dpio_cmn_power_well_enable, 2751 .disable = chv_dpio_cmn_power_well_disable, 2752 .is_enabled = vlv_power_well_enabled, 2753 }; 2754 2755 static const struct i915_power_well_desc i9xx_always_on_power_well[] = { 2756 { 2757 .name = "always-on", 2758 .always_on = true, 2759 .domains = POWER_DOMAIN_MASK, 2760 .ops = &i9xx_always_on_power_well_ops, 2761 .id = DISP_PW_ID_NONE, 2762 }, 2763 }; 2764 2765 static const struct i915_power_well_ops i830_pipes_power_well_ops = { 2766 .sync_hw = i830_pipes_power_well_sync_hw, 2767 .enable = i830_pipes_power_well_enable, 2768 .disable = i830_pipes_power_well_disable, 2769 .is_enabled = i830_pipes_power_well_enabled, 2770 }; 2771 2772 static const struct i915_power_well_desc i830_power_wells[] = { 2773 { 2774 .name = "always-on", 2775 .always_on = true, 2776 .domains = POWER_DOMAIN_MASK, 2777 .ops = &i9xx_always_on_power_well_ops, 2778 .id = DISP_PW_ID_NONE, 2779 }, 2780 { 2781 .name = "pipes", 2782 .domains = I830_PIPES_POWER_DOMAINS, 2783 .ops = &i830_pipes_power_well_ops, 2784 .id = DISP_PW_ID_NONE, 2785 }, 2786 }; 2787 2788 static const struct i915_power_well_ops hsw_power_well_ops = { 2789 .sync_hw = hsw_power_well_sync_hw, 2790 .enable = hsw_power_well_enable, 2791 .disable = hsw_power_well_disable, 2792 .is_enabled = hsw_power_well_enabled, 2793 }; 2794 2795 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 2796 .sync_hw = i9xx_power_well_sync_hw_noop, 2797 .enable = gen9_dc_off_power_well_enable, 2798 .disable = gen9_dc_off_power_well_disable, 2799 .is_enabled = gen9_dc_off_power_well_enabled, 2800 }; 2801 2802 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 2803 .sync_hw = i9xx_power_well_sync_hw_noop, 2804 .enable = bxt_dpio_cmn_power_well_enable, 2805 .disable = bxt_dpio_cmn_power_well_disable, 2806 .is_enabled = bxt_dpio_cmn_power_well_enabled, 2807 }; 2808 2809 static const struct i915_power_well_regs hsw_power_well_regs = { 2810 .bios = HSW_PWR_WELL_CTL1, 2811 .driver = HSW_PWR_WELL_CTL2, 2812 .kvmr = HSW_PWR_WELL_CTL3, 2813 .debug = HSW_PWR_WELL_CTL4, 2814 }; 2815 2816 static const struct i915_power_well_desc hsw_power_wells[] = { 2817 { 2818 .name = "always-on", 2819 .always_on = true, 2820 .domains = POWER_DOMAIN_MASK, 2821 .ops = &i9xx_always_on_power_well_ops, 2822 .id = DISP_PW_ID_NONE, 2823 }, 2824 { 2825 .name = "display", 2826 .domains = HSW_DISPLAY_POWER_DOMAINS, 2827 .ops = &hsw_power_well_ops, 2828 .id = HSW_DISP_PW_GLOBAL, 2829 { 2830 .hsw.regs = &hsw_power_well_regs, 2831 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 2832 .hsw.has_vga = true, 2833 }, 2834 }, 2835 }; 2836 2837 static const struct i915_power_well_desc bdw_power_wells[] = { 2838 { 2839 .name = "always-on", 2840 .always_on = true, 2841 .domains = POWER_DOMAIN_MASK, 2842 .ops = &i9xx_always_on_power_well_ops, 2843 .id = DISP_PW_ID_NONE, 2844 }, 2845 { 2846 .name = "display", 2847 .domains = BDW_DISPLAY_POWER_DOMAINS, 2848 .ops = &hsw_power_well_ops, 2849 .id = HSW_DISP_PW_GLOBAL, 2850 { 2851 .hsw.regs = &hsw_power_well_regs, 2852 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 2853 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 2854 .hsw.has_vga = true, 2855 }, 2856 }, 2857 }; 2858 2859 static const struct i915_power_well_ops vlv_display_power_well_ops = { 2860 .sync_hw = i9xx_power_well_sync_hw_noop, 2861 .enable = vlv_display_power_well_enable, 2862 .disable = vlv_display_power_well_disable, 2863 .is_enabled = vlv_power_well_enabled, 2864 }; 2865 2866 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 2867 .sync_hw = i9xx_power_well_sync_hw_noop, 2868 .enable = vlv_dpio_cmn_power_well_enable, 2869 .disable = vlv_dpio_cmn_power_well_disable, 2870 .is_enabled = vlv_power_well_enabled, 2871 }; 2872 2873 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 2874 .sync_hw = i9xx_power_well_sync_hw_noop, 2875 .enable = vlv_power_well_enable, 2876 .disable = vlv_power_well_disable, 2877 .is_enabled = vlv_power_well_enabled, 2878 }; 2879 2880 static const struct i915_power_well_desc vlv_power_wells[] = { 2881 { 2882 .name = "always-on", 2883 .always_on = true, 2884 .domains = POWER_DOMAIN_MASK, 2885 .ops = &i9xx_always_on_power_well_ops, 2886 .id = DISP_PW_ID_NONE, 2887 }, 2888 { 2889 .name = "display", 2890 .domains = VLV_DISPLAY_POWER_DOMAINS, 2891 .ops = &vlv_display_power_well_ops, 2892 .id = VLV_DISP_PW_DISP2D, 2893 { 2894 .vlv.idx = PUNIT_PWGT_IDX_DISP2D, 2895 }, 2896 }, 2897 { 2898 .name = "dpio-tx-b-01", 2899 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2900 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2901 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2902 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2903 .ops = &vlv_dpio_power_well_ops, 2904 .id = DISP_PW_ID_NONE, 2905 { 2906 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, 2907 }, 2908 }, 2909 { 2910 .name = "dpio-tx-b-23", 2911 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2912 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2913 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2914 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2915 .ops = &vlv_dpio_power_well_ops, 2916 .id = DISP_PW_ID_NONE, 2917 { 2918 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, 2919 }, 2920 }, 2921 { 2922 .name = "dpio-tx-c-01", 2923 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2924 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2925 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2926 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2927 .ops = &vlv_dpio_power_well_ops, 2928 .id = DISP_PW_ID_NONE, 2929 { 2930 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, 2931 }, 2932 }, 2933 { 2934 .name = "dpio-tx-c-23", 2935 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2936 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2937 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2938 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2939 .ops = &vlv_dpio_power_well_ops, 2940 .id = DISP_PW_ID_NONE, 2941 { 2942 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, 2943 }, 2944 }, 2945 { 2946 .name = "dpio-common", 2947 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 2948 .ops = &vlv_dpio_cmn_power_well_ops, 2949 .id = VLV_DISP_PW_DPIO_CMN_BC, 2950 { 2951 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 2952 }, 2953 }, 2954 }; 2955 2956 static const struct i915_power_well_desc chv_power_wells[] = { 2957 { 2958 .name = "always-on", 2959 .always_on = true, 2960 .domains = POWER_DOMAIN_MASK, 2961 .ops = &i9xx_always_on_power_well_ops, 2962 .id = DISP_PW_ID_NONE, 2963 }, 2964 { 2965 .name = "display", 2966 /* 2967 * Pipe A power well is the new disp2d well. Pipe B and C 2968 * power wells don't actually exist. Pipe A power well is 2969 * required for any pipe to work. 2970 */ 2971 .domains = CHV_DISPLAY_POWER_DOMAINS, 2972 .ops = &chv_pipe_power_well_ops, 2973 .id = DISP_PW_ID_NONE, 2974 }, 2975 { 2976 .name = "dpio-common-bc", 2977 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 2978 .ops = &chv_dpio_cmn_power_well_ops, 2979 .id = VLV_DISP_PW_DPIO_CMN_BC, 2980 { 2981 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 2982 }, 2983 }, 2984 { 2985 .name = "dpio-common-d", 2986 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 2987 .ops = &chv_dpio_cmn_power_well_ops, 2988 .id = CHV_DISP_PW_DPIO_CMN_D, 2989 { 2990 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, 2991 }, 2992 }, 2993 }; 2994 2995 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 2996 enum i915_power_well_id power_well_id) 2997 { 2998 struct i915_power_well *power_well; 2999 bool ret; 3000 3001 power_well = lookup_power_well(dev_priv, power_well_id); 3002 ret = power_well->desc->ops->is_enabled(dev_priv, power_well); 3003 3004 return ret; 3005 } 3006 3007 static const struct i915_power_well_desc skl_power_wells[] = { 3008 { 3009 .name = "always-on", 3010 .always_on = true, 3011 .domains = POWER_DOMAIN_MASK, 3012 .ops = &i9xx_always_on_power_well_ops, 3013 .id = DISP_PW_ID_NONE, 3014 }, 3015 { 3016 .name = "power well 1", 3017 /* Handled by the DMC firmware */ 3018 .always_on = true, 3019 .domains = 0, 3020 .ops = &hsw_power_well_ops, 3021 .id = SKL_DISP_PW_1, 3022 { 3023 .hsw.regs = &hsw_power_well_regs, 3024 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3025 .hsw.has_fuses = true, 3026 }, 3027 }, 3028 { 3029 .name = "MISC IO power well", 3030 /* Handled by the DMC firmware */ 3031 .always_on = true, 3032 .domains = 0, 3033 .ops = &hsw_power_well_ops, 3034 .id = SKL_DISP_PW_MISC_IO, 3035 { 3036 .hsw.regs = &hsw_power_well_regs, 3037 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, 3038 }, 3039 }, 3040 { 3041 .name = "DC off", 3042 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 3043 .ops = &gen9_dc_off_power_well_ops, 3044 .id = SKL_DISP_DC_OFF, 3045 }, 3046 { 3047 .name = "power well 2", 3048 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3049 .ops = &hsw_power_well_ops, 3050 .id = SKL_DISP_PW_2, 3051 { 3052 .hsw.regs = &hsw_power_well_regs, 3053 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3054 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3055 .hsw.has_vga = true, 3056 .hsw.has_fuses = true, 3057 }, 3058 }, 3059 { 3060 .name = "DDI A/E IO power well", 3061 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, 3062 .ops = &hsw_power_well_ops, 3063 .id = DISP_PW_ID_NONE, 3064 { 3065 .hsw.regs = &hsw_power_well_regs, 3066 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, 3067 }, 3068 }, 3069 { 3070 .name = "DDI B IO power well", 3071 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3072 .ops = &hsw_power_well_ops, 3073 .id = DISP_PW_ID_NONE, 3074 { 3075 .hsw.regs = &hsw_power_well_regs, 3076 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3077 }, 3078 }, 3079 { 3080 .name = "DDI C IO power well", 3081 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3082 .ops = &hsw_power_well_ops, 3083 .id = DISP_PW_ID_NONE, 3084 { 3085 .hsw.regs = &hsw_power_well_regs, 3086 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3087 }, 3088 }, 3089 { 3090 .name = "DDI D IO power well", 3091 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, 3092 .ops = &hsw_power_well_ops, 3093 .id = DISP_PW_ID_NONE, 3094 { 3095 .hsw.regs = &hsw_power_well_regs, 3096 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3097 }, 3098 }, 3099 }; 3100 3101 static const struct i915_power_well_desc bxt_power_wells[] = { 3102 { 3103 .name = "always-on", 3104 .always_on = true, 3105 .domains = POWER_DOMAIN_MASK, 3106 .ops = &i9xx_always_on_power_well_ops, 3107 .id = DISP_PW_ID_NONE, 3108 }, 3109 { 3110 .name = "power well 1", 3111 /* Handled by the DMC firmware */ 3112 .always_on = true, 3113 .domains = 0, 3114 .ops = &hsw_power_well_ops, 3115 .id = SKL_DISP_PW_1, 3116 { 3117 .hsw.regs = &hsw_power_well_regs, 3118 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3119 .hsw.has_fuses = true, 3120 }, 3121 }, 3122 { 3123 .name = "DC off", 3124 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 3125 .ops = &gen9_dc_off_power_well_ops, 3126 .id = SKL_DISP_DC_OFF, 3127 }, 3128 { 3129 .name = "power well 2", 3130 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3131 .ops = &hsw_power_well_ops, 3132 .id = SKL_DISP_PW_2, 3133 { 3134 .hsw.regs = &hsw_power_well_regs, 3135 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3136 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3137 .hsw.has_vga = true, 3138 .hsw.has_fuses = true, 3139 }, 3140 }, 3141 { 3142 .name = "dpio-common-a", 3143 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 3144 .ops = &bxt_dpio_cmn_power_well_ops, 3145 .id = BXT_DISP_PW_DPIO_CMN_A, 3146 { 3147 .bxt.phy = DPIO_PHY1, 3148 }, 3149 }, 3150 { 3151 .name = "dpio-common-bc", 3152 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 3153 .ops = &bxt_dpio_cmn_power_well_ops, 3154 .id = VLV_DISP_PW_DPIO_CMN_BC, 3155 { 3156 .bxt.phy = DPIO_PHY0, 3157 }, 3158 }, 3159 }; 3160 3161 static const struct i915_power_well_desc glk_power_wells[] = { 3162 { 3163 .name = "always-on", 3164 .always_on = true, 3165 .domains = POWER_DOMAIN_MASK, 3166 .ops = &i9xx_always_on_power_well_ops, 3167 .id = DISP_PW_ID_NONE, 3168 }, 3169 { 3170 .name = "power well 1", 3171 /* Handled by the DMC firmware */ 3172 .always_on = true, 3173 .domains = 0, 3174 .ops = &hsw_power_well_ops, 3175 .id = SKL_DISP_PW_1, 3176 { 3177 .hsw.regs = &hsw_power_well_regs, 3178 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3179 .hsw.has_fuses = true, 3180 }, 3181 }, 3182 { 3183 .name = "DC off", 3184 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, 3185 .ops = &gen9_dc_off_power_well_ops, 3186 .id = SKL_DISP_DC_OFF, 3187 }, 3188 { 3189 .name = "power well 2", 3190 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3191 .ops = &hsw_power_well_ops, 3192 .id = SKL_DISP_PW_2, 3193 { 3194 .hsw.regs = &hsw_power_well_regs, 3195 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3196 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3197 .hsw.has_vga = true, 3198 .hsw.has_fuses = true, 3199 }, 3200 }, 3201 { 3202 .name = "dpio-common-a", 3203 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, 3204 .ops = &bxt_dpio_cmn_power_well_ops, 3205 .id = BXT_DISP_PW_DPIO_CMN_A, 3206 { 3207 .bxt.phy = DPIO_PHY1, 3208 }, 3209 }, 3210 { 3211 .name = "dpio-common-b", 3212 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, 3213 .ops = &bxt_dpio_cmn_power_well_ops, 3214 .id = VLV_DISP_PW_DPIO_CMN_BC, 3215 { 3216 .bxt.phy = DPIO_PHY0, 3217 }, 3218 }, 3219 { 3220 .name = "dpio-common-c", 3221 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, 3222 .ops = &bxt_dpio_cmn_power_well_ops, 3223 .id = GLK_DISP_PW_DPIO_CMN_C, 3224 { 3225 .bxt.phy = DPIO_PHY2, 3226 }, 3227 }, 3228 { 3229 .name = "AUX A", 3230 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, 3231 .ops = &hsw_power_well_ops, 3232 .id = DISP_PW_ID_NONE, 3233 { 3234 .hsw.regs = &hsw_power_well_regs, 3235 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3236 }, 3237 }, 3238 { 3239 .name = "AUX B", 3240 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, 3241 .ops = &hsw_power_well_ops, 3242 .id = DISP_PW_ID_NONE, 3243 { 3244 .hsw.regs = &hsw_power_well_regs, 3245 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3246 }, 3247 }, 3248 { 3249 .name = "AUX C", 3250 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, 3251 .ops = &hsw_power_well_ops, 3252 .id = DISP_PW_ID_NONE, 3253 { 3254 .hsw.regs = &hsw_power_well_regs, 3255 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3256 }, 3257 }, 3258 { 3259 .name = "DDI A IO power well", 3260 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, 3261 .ops = &hsw_power_well_ops, 3262 .id = DISP_PW_ID_NONE, 3263 { 3264 .hsw.regs = &hsw_power_well_regs, 3265 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3266 }, 3267 }, 3268 { 3269 .name = "DDI B IO power well", 3270 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3271 .ops = &hsw_power_well_ops, 3272 .id = DISP_PW_ID_NONE, 3273 { 3274 .hsw.regs = &hsw_power_well_regs, 3275 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3276 }, 3277 }, 3278 { 3279 .name = "DDI C IO power well", 3280 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3281 .ops = &hsw_power_well_ops, 3282 .id = DISP_PW_ID_NONE, 3283 { 3284 .hsw.regs = &hsw_power_well_regs, 3285 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3286 }, 3287 }, 3288 }; 3289 3290 static const struct i915_power_well_desc cnl_power_wells[] = { 3291 { 3292 .name = "always-on", 3293 .always_on = true, 3294 .domains = POWER_DOMAIN_MASK, 3295 .ops = &i9xx_always_on_power_well_ops, 3296 .id = DISP_PW_ID_NONE, 3297 }, 3298 { 3299 .name = "power well 1", 3300 /* Handled by the DMC firmware */ 3301 .always_on = true, 3302 .domains = 0, 3303 .ops = &hsw_power_well_ops, 3304 .id = SKL_DISP_PW_1, 3305 { 3306 .hsw.regs = &hsw_power_well_regs, 3307 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3308 .hsw.has_fuses = true, 3309 }, 3310 }, 3311 { 3312 .name = "AUX A", 3313 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS, 3314 .ops = &hsw_power_well_ops, 3315 .id = DISP_PW_ID_NONE, 3316 { 3317 .hsw.regs = &hsw_power_well_regs, 3318 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3319 }, 3320 }, 3321 { 3322 .name = "AUX B", 3323 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS, 3324 .ops = &hsw_power_well_ops, 3325 .id = DISP_PW_ID_NONE, 3326 { 3327 .hsw.regs = &hsw_power_well_regs, 3328 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3329 }, 3330 }, 3331 { 3332 .name = "AUX C", 3333 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS, 3334 .ops = &hsw_power_well_ops, 3335 .id = DISP_PW_ID_NONE, 3336 { 3337 .hsw.regs = &hsw_power_well_regs, 3338 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3339 }, 3340 }, 3341 { 3342 .name = "AUX D", 3343 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS, 3344 .ops = &hsw_power_well_ops, 3345 .id = DISP_PW_ID_NONE, 3346 { 3347 .hsw.regs = &hsw_power_well_regs, 3348 .hsw.idx = CNL_PW_CTL_IDX_AUX_D, 3349 }, 3350 }, 3351 { 3352 .name = "DC off", 3353 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, 3354 .ops = &gen9_dc_off_power_well_ops, 3355 .id = SKL_DISP_DC_OFF, 3356 }, 3357 { 3358 .name = "power well 2", 3359 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3360 .ops = &hsw_power_well_ops, 3361 .id = SKL_DISP_PW_2, 3362 { 3363 .hsw.regs = &hsw_power_well_regs, 3364 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3365 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3366 .hsw.has_vga = true, 3367 .hsw.has_fuses = true, 3368 }, 3369 }, 3370 { 3371 .name = "DDI A IO power well", 3372 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS, 3373 .ops = &hsw_power_well_ops, 3374 .id = DISP_PW_ID_NONE, 3375 { 3376 .hsw.regs = &hsw_power_well_regs, 3377 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3378 }, 3379 }, 3380 { 3381 .name = "DDI B IO power well", 3382 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS, 3383 .ops = &hsw_power_well_ops, 3384 .id = DISP_PW_ID_NONE, 3385 { 3386 .hsw.regs = &hsw_power_well_regs, 3387 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3388 }, 3389 }, 3390 { 3391 .name = "DDI C IO power well", 3392 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS, 3393 .ops = &hsw_power_well_ops, 3394 .id = DISP_PW_ID_NONE, 3395 { 3396 .hsw.regs = &hsw_power_well_regs, 3397 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3398 }, 3399 }, 3400 { 3401 .name = "DDI D IO power well", 3402 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS, 3403 .ops = &hsw_power_well_ops, 3404 .id = DISP_PW_ID_NONE, 3405 { 3406 .hsw.regs = &hsw_power_well_regs, 3407 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3408 }, 3409 }, 3410 { 3411 .name = "DDI F IO power well", 3412 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS, 3413 .ops = &hsw_power_well_ops, 3414 .id = DISP_PW_ID_NONE, 3415 { 3416 .hsw.regs = &hsw_power_well_regs, 3417 .hsw.idx = CNL_PW_CTL_IDX_DDI_F, 3418 }, 3419 }, 3420 { 3421 .name = "AUX F", 3422 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS, 3423 .ops = &hsw_power_well_ops, 3424 .id = DISP_PW_ID_NONE, 3425 { 3426 .hsw.regs = &hsw_power_well_regs, 3427 .hsw.idx = CNL_PW_CTL_IDX_AUX_F, 3428 }, 3429 }, 3430 }; 3431 3432 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { 3433 .sync_hw = hsw_power_well_sync_hw, 3434 .enable = icl_combo_phy_aux_power_well_enable, 3435 .disable = icl_combo_phy_aux_power_well_disable, 3436 .is_enabled = hsw_power_well_enabled, 3437 }; 3438 3439 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = { 3440 .sync_hw = hsw_power_well_sync_hw, 3441 .enable = icl_tc_phy_aux_power_well_enable, 3442 .disable = icl_tc_phy_aux_power_well_disable, 3443 .is_enabled = hsw_power_well_enabled, 3444 }; 3445 3446 static const struct i915_power_well_regs icl_aux_power_well_regs = { 3447 .bios = ICL_PWR_WELL_CTL_AUX1, 3448 .driver = ICL_PWR_WELL_CTL_AUX2, 3449 .debug = ICL_PWR_WELL_CTL_AUX4, 3450 }; 3451 3452 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 3453 .bios = ICL_PWR_WELL_CTL_DDI1, 3454 .driver = ICL_PWR_WELL_CTL_DDI2, 3455 .debug = ICL_PWR_WELL_CTL_DDI4, 3456 }; 3457 3458 static const struct i915_power_well_desc icl_power_wells[] = { 3459 { 3460 .name = "always-on", 3461 .always_on = true, 3462 .domains = POWER_DOMAIN_MASK, 3463 .ops = &i9xx_always_on_power_well_ops, 3464 .id = DISP_PW_ID_NONE, 3465 }, 3466 { 3467 .name = "power well 1", 3468 /* Handled by the DMC firmware */ 3469 .always_on = true, 3470 .domains = 0, 3471 .ops = &hsw_power_well_ops, 3472 .id = SKL_DISP_PW_1, 3473 { 3474 .hsw.regs = &hsw_power_well_regs, 3475 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3476 .hsw.has_fuses = true, 3477 }, 3478 }, 3479 { 3480 .name = "DC off", 3481 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, 3482 .ops = &gen9_dc_off_power_well_ops, 3483 .id = SKL_DISP_DC_OFF, 3484 }, 3485 { 3486 .name = "power well 2", 3487 .domains = ICL_PW_2_POWER_DOMAINS, 3488 .ops = &hsw_power_well_ops, 3489 .id = SKL_DISP_PW_2, 3490 { 3491 .hsw.regs = &hsw_power_well_regs, 3492 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3493 .hsw.has_fuses = true, 3494 }, 3495 }, 3496 { 3497 .name = "power well 3", 3498 .domains = ICL_PW_3_POWER_DOMAINS, 3499 .ops = &hsw_power_well_ops, 3500 .id = DISP_PW_ID_NONE, 3501 { 3502 .hsw.regs = &hsw_power_well_regs, 3503 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3504 .hsw.irq_pipe_mask = BIT(PIPE_B), 3505 .hsw.has_vga = true, 3506 .hsw.has_fuses = true, 3507 }, 3508 }, 3509 { 3510 .name = "DDI A IO", 3511 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3512 .ops = &hsw_power_well_ops, 3513 .id = DISP_PW_ID_NONE, 3514 { 3515 .hsw.regs = &icl_ddi_power_well_regs, 3516 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3517 }, 3518 }, 3519 { 3520 .name = "DDI B IO", 3521 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3522 .ops = &hsw_power_well_ops, 3523 .id = DISP_PW_ID_NONE, 3524 { 3525 .hsw.regs = &icl_ddi_power_well_regs, 3526 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3527 }, 3528 }, 3529 { 3530 .name = "DDI C IO", 3531 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3532 .ops = &hsw_power_well_ops, 3533 .id = DISP_PW_ID_NONE, 3534 { 3535 .hsw.regs = &icl_ddi_power_well_regs, 3536 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3537 }, 3538 }, 3539 { 3540 .name = "DDI D IO", 3541 .domains = ICL_DDI_IO_D_POWER_DOMAINS, 3542 .ops = &hsw_power_well_ops, 3543 .id = DISP_PW_ID_NONE, 3544 { 3545 .hsw.regs = &icl_ddi_power_well_regs, 3546 .hsw.idx = ICL_PW_CTL_IDX_DDI_D, 3547 }, 3548 }, 3549 { 3550 .name = "DDI E IO", 3551 .domains = ICL_DDI_IO_E_POWER_DOMAINS, 3552 .ops = &hsw_power_well_ops, 3553 .id = DISP_PW_ID_NONE, 3554 { 3555 .hsw.regs = &icl_ddi_power_well_regs, 3556 .hsw.idx = ICL_PW_CTL_IDX_DDI_E, 3557 }, 3558 }, 3559 { 3560 .name = "DDI F IO", 3561 .domains = ICL_DDI_IO_F_POWER_DOMAINS, 3562 .ops = &hsw_power_well_ops, 3563 .id = DISP_PW_ID_NONE, 3564 { 3565 .hsw.regs = &icl_ddi_power_well_regs, 3566 .hsw.idx = ICL_PW_CTL_IDX_DDI_F, 3567 }, 3568 }, 3569 { 3570 .name = "AUX A", 3571 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 3572 .ops = &icl_combo_phy_aux_power_well_ops, 3573 .id = DISP_PW_ID_NONE, 3574 { 3575 .hsw.regs = &icl_aux_power_well_regs, 3576 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3577 }, 3578 }, 3579 { 3580 .name = "AUX B", 3581 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 3582 .ops = &icl_combo_phy_aux_power_well_ops, 3583 .id = DISP_PW_ID_NONE, 3584 { 3585 .hsw.regs = &icl_aux_power_well_regs, 3586 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3587 }, 3588 }, 3589 { 3590 .name = "AUX C TC1", 3591 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, 3592 .ops = &icl_tc_phy_aux_power_well_ops, 3593 .id = DISP_PW_ID_NONE, 3594 { 3595 .hsw.regs = &icl_aux_power_well_regs, 3596 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3597 .hsw.is_tc_tbt = false, 3598 }, 3599 }, 3600 { 3601 .name = "AUX D TC2", 3602 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, 3603 .ops = &icl_tc_phy_aux_power_well_ops, 3604 .id = DISP_PW_ID_NONE, 3605 { 3606 .hsw.regs = &icl_aux_power_well_regs, 3607 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 3608 .hsw.is_tc_tbt = false, 3609 }, 3610 }, 3611 { 3612 .name = "AUX E TC3", 3613 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, 3614 .ops = &icl_tc_phy_aux_power_well_ops, 3615 .id = DISP_PW_ID_NONE, 3616 { 3617 .hsw.regs = &icl_aux_power_well_regs, 3618 .hsw.idx = ICL_PW_CTL_IDX_AUX_E, 3619 .hsw.is_tc_tbt = false, 3620 }, 3621 }, 3622 { 3623 .name = "AUX F TC4", 3624 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, 3625 .ops = &icl_tc_phy_aux_power_well_ops, 3626 .id = DISP_PW_ID_NONE, 3627 { 3628 .hsw.regs = &icl_aux_power_well_regs, 3629 .hsw.idx = ICL_PW_CTL_IDX_AUX_F, 3630 .hsw.is_tc_tbt = false, 3631 }, 3632 }, 3633 { 3634 .name = "AUX C TBT1", 3635 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, 3636 .ops = &icl_tc_phy_aux_power_well_ops, 3637 .id = DISP_PW_ID_NONE, 3638 { 3639 .hsw.regs = &icl_aux_power_well_regs, 3640 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, 3641 .hsw.is_tc_tbt = true, 3642 }, 3643 }, 3644 { 3645 .name = "AUX D TBT2", 3646 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, 3647 .ops = &icl_tc_phy_aux_power_well_ops, 3648 .id = DISP_PW_ID_NONE, 3649 { 3650 .hsw.regs = &icl_aux_power_well_regs, 3651 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, 3652 .hsw.is_tc_tbt = true, 3653 }, 3654 }, 3655 { 3656 .name = "AUX E TBT3", 3657 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, 3658 .ops = &icl_tc_phy_aux_power_well_ops, 3659 .id = DISP_PW_ID_NONE, 3660 { 3661 .hsw.regs = &icl_aux_power_well_regs, 3662 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, 3663 .hsw.is_tc_tbt = true, 3664 }, 3665 }, 3666 { 3667 .name = "AUX F TBT4", 3668 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, 3669 .ops = &icl_tc_phy_aux_power_well_ops, 3670 .id = DISP_PW_ID_NONE, 3671 { 3672 .hsw.regs = &icl_aux_power_well_regs, 3673 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, 3674 .hsw.is_tc_tbt = true, 3675 }, 3676 }, 3677 { 3678 .name = "power well 4", 3679 .domains = ICL_PW_4_POWER_DOMAINS, 3680 .ops = &hsw_power_well_ops, 3681 .id = DISP_PW_ID_NONE, 3682 { 3683 .hsw.regs = &hsw_power_well_regs, 3684 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 3685 .hsw.has_fuses = true, 3686 .hsw.irq_pipe_mask = BIT(PIPE_C), 3687 }, 3688 }, 3689 }; 3690 3691 static const struct i915_power_well_desc ehl_power_wells[] = { 3692 { 3693 .name = "always-on", 3694 .always_on = true, 3695 .domains = POWER_DOMAIN_MASK, 3696 .ops = &i9xx_always_on_power_well_ops, 3697 .id = DISP_PW_ID_NONE, 3698 }, 3699 { 3700 .name = "power well 1", 3701 /* Handled by the DMC firmware */ 3702 .always_on = true, 3703 .domains = 0, 3704 .ops = &hsw_power_well_ops, 3705 .id = SKL_DISP_PW_1, 3706 { 3707 .hsw.regs = &hsw_power_well_regs, 3708 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3709 .hsw.has_fuses = true, 3710 }, 3711 }, 3712 { 3713 .name = "DC off", 3714 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, 3715 .ops = &gen9_dc_off_power_well_ops, 3716 .id = SKL_DISP_DC_OFF, 3717 }, 3718 { 3719 .name = "power well 2", 3720 .domains = ICL_PW_2_POWER_DOMAINS, 3721 .ops = &hsw_power_well_ops, 3722 .id = SKL_DISP_PW_2, 3723 { 3724 .hsw.regs = &hsw_power_well_regs, 3725 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3726 .hsw.has_fuses = true, 3727 }, 3728 }, 3729 { 3730 .name = "power well 3", 3731 .domains = ICL_PW_3_POWER_DOMAINS, 3732 .ops = &hsw_power_well_ops, 3733 .id = DISP_PW_ID_NONE, 3734 { 3735 .hsw.regs = &hsw_power_well_regs, 3736 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3737 .hsw.irq_pipe_mask = BIT(PIPE_B), 3738 .hsw.has_vga = true, 3739 .hsw.has_fuses = true, 3740 }, 3741 }, 3742 { 3743 .name = "DDI A IO", 3744 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3745 .ops = &hsw_power_well_ops, 3746 .id = DISP_PW_ID_NONE, 3747 { 3748 .hsw.regs = &icl_ddi_power_well_regs, 3749 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3750 }, 3751 }, 3752 { 3753 .name = "DDI B IO", 3754 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3755 .ops = &hsw_power_well_ops, 3756 .id = DISP_PW_ID_NONE, 3757 { 3758 .hsw.regs = &icl_ddi_power_well_regs, 3759 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3760 }, 3761 }, 3762 { 3763 .name = "DDI C IO", 3764 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3765 .ops = &hsw_power_well_ops, 3766 .id = DISP_PW_ID_NONE, 3767 { 3768 .hsw.regs = &icl_ddi_power_well_regs, 3769 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3770 }, 3771 }, 3772 { 3773 .name = "DDI D IO", 3774 .domains = ICL_DDI_IO_D_POWER_DOMAINS, 3775 .ops = &hsw_power_well_ops, 3776 .id = DISP_PW_ID_NONE, 3777 { 3778 .hsw.regs = &icl_ddi_power_well_regs, 3779 .hsw.idx = ICL_PW_CTL_IDX_DDI_D, 3780 }, 3781 }, 3782 { 3783 .name = "AUX A", 3784 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 3785 .ops = &hsw_power_well_ops, 3786 .id = DISP_PW_ID_NONE, 3787 { 3788 .hsw.regs = &icl_aux_power_well_regs, 3789 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3790 }, 3791 }, 3792 { 3793 .name = "AUX B", 3794 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 3795 .ops = &hsw_power_well_ops, 3796 .id = DISP_PW_ID_NONE, 3797 { 3798 .hsw.regs = &icl_aux_power_well_regs, 3799 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3800 }, 3801 }, 3802 { 3803 .name = "AUX C", 3804 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, 3805 .ops = &hsw_power_well_ops, 3806 .id = DISP_PW_ID_NONE, 3807 { 3808 .hsw.regs = &icl_aux_power_well_regs, 3809 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3810 }, 3811 }, 3812 { 3813 .name = "AUX D", 3814 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, 3815 .ops = &hsw_power_well_ops, 3816 .id = DISP_PW_ID_NONE, 3817 { 3818 .hsw.regs = &icl_aux_power_well_regs, 3819 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 3820 }, 3821 }, 3822 { 3823 .name = "power well 4", 3824 .domains = ICL_PW_4_POWER_DOMAINS, 3825 .ops = &hsw_power_well_ops, 3826 .id = DISP_PW_ID_NONE, 3827 { 3828 .hsw.regs = &hsw_power_well_regs, 3829 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 3830 .hsw.has_fuses = true, 3831 .hsw.irq_pipe_mask = BIT(PIPE_C), 3832 }, 3833 }, 3834 }; 3835 3836 static const struct i915_power_well_desc tgl_power_wells[] = { 3837 { 3838 .name = "always-on", 3839 .always_on = true, 3840 .domains = POWER_DOMAIN_MASK, 3841 .ops = &i9xx_always_on_power_well_ops, 3842 .id = DISP_PW_ID_NONE, 3843 }, 3844 { 3845 .name = "power well 1", 3846 /* Handled by the DMC firmware */ 3847 .always_on = true, 3848 .domains = 0, 3849 .ops = &hsw_power_well_ops, 3850 .id = SKL_DISP_PW_1, 3851 { 3852 .hsw.regs = &hsw_power_well_regs, 3853 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3854 .hsw.has_fuses = true, 3855 }, 3856 }, 3857 { 3858 .name = "DC off", 3859 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, 3860 .ops = &gen9_dc_off_power_well_ops, 3861 .id = SKL_DISP_DC_OFF, 3862 }, 3863 { 3864 .name = "power well 2", 3865 .domains = TGL_PW_2_POWER_DOMAINS, 3866 .ops = &hsw_power_well_ops, 3867 .id = SKL_DISP_PW_2, 3868 { 3869 .hsw.regs = &hsw_power_well_regs, 3870 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3871 .hsw.has_fuses = true, 3872 }, 3873 }, 3874 { 3875 .name = "power well 3", 3876 .domains = TGL_PW_3_POWER_DOMAINS, 3877 .ops = &hsw_power_well_ops, 3878 .id = DISP_PW_ID_NONE, 3879 { 3880 .hsw.regs = &hsw_power_well_regs, 3881 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3882 .hsw.irq_pipe_mask = BIT(PIPE_B), 3883 .hsw.has_vga = true, 3884 .hsw.has_fuses = true, 3885 }, 3886 }, 3887 { 3888 .name = "DDI A IO", 3889 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3890 .ops = &hsw_power_well_ops, 3891 .id = DISP_PW_ID_NONE, 3892 { 3893 .hsw.regs = &icl_ddi_power_well_regs, 3894 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3895 } 3896 }, 3897 { 3898 .name = "DDI B IO", 3899 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3900 .ops = &hsw_power_well_ops, 3901 .id = DISP_PW_ID_NONE, 3902 { 3903 .hsw.regs = &icl_ddi_power_well_regs, 3904 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3905 } 3906 }, 3907 { 3908 .name = "DDI C IO", 3909 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3910 .ops = &hsw_power_well_ops, 3911 .id = DISP_PW_ID_NONE, 3912 { 3913 .hsw.regs = &icl_ddi_power_well_regs, 3914 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3915 } 3916 }, 3917 { 3918 .name = "DDI D TC1 IO", 3919 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS, 3920 .ops = &hsw_power_well_ops, 3921 .id = DISP_PW_ID_NONE, 3922 { 3923 .hsw.regs = &icl_ddi_power_well_regs, 3924 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 3925 }, 3926 }, 3927 { 3928 .name = "DDI E TC2 IO", 3929 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS, 3930 .ops = &hsw_power_well_ops, 3931 .id = DISP_PW_ID_NONE, 3932 { 3933 .hsw.regs = &icl_ddi_power_well_regs, 3934 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 3935 }, 3936 }, 3937 { 3938 .name = "DDI F TC3 IO", 3939 .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS, 3940 .ops = &hsw_power_well_ops, 3941 .id = DISP_PW_ID_NONE, 3942 { 3943 .hsw.regs = &icl_ddi_power_well_regs, 3944 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 3945 }, 3946 }, 3947 { 3948 .name = "DDI G TC4 IO", 3949 .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS, 3950 .ops = &hsw_power_well_ops, 3951 .id = DISP_PW_ID_NONE, 3952 { 3953 .hsw.regs = &icl_ddi_power_well_regs, 3954 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 3955 }, 3956 }, 3957 { 3958 .name = "DDI H TC5 IO", 3959 .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS, 3960 .ops = &hsw_power_well_ops, 3961 .id = DISP_PW_ID_NONE, 3962 { 3963 .hsw.regs = &icl_ddi_power_well_regs, 3964 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, 3965 }, 3966 }, 3967 { 3968 .name = "DDI I TC6 IO", 3969 .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS, 3970 .ops = &hsw_power_well_ops, 3971 .id = DISP_PW_ID_NONE, 3972 { 3973 .hsw.regs = &icl_ddi_power_well_regs, 3974 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, 3975 }, 3976 }, 3977 { 3978 .name = "AUX A", 3979 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 3980 .ops = &hsw_power_well_ops, 3981 .id = DISP_PW_ID_NONE, 3982 { 3983 .hsw.regs = &icl_aux_power_well_regs, 3984 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3985 }, 3986 }, 3987 { 3988 .name = "AUX B", 3989 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 3990 .ops = &hsw_power_well_ops, 3991 .id = DISP_PW_ID_NONE, 3992 { 3993 .hsw.regs = &icl_aux_power_well_regs, 3994 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3995 }, 3996 }, 3997 { 3998 .name = "AUX C", 3999 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 4000 .ops = &hsw_power_well_ops, 4001 .id = DISP_PW_ID_NONE, 4002 { 4003 .hsw.regs = &icl_aux_power_well_regs, 4004 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4005 }, 4006 }, 4007 { 4008 .name = "AUX D TC1", 4009 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS, 4010 .ops = &icl_tc_phy_aux_power_well_ops, 4011 .id = DISP_PW_ID_NONE, 4012 { 4013 .hsw.regs = &icl_aux_power_well_regs, 4014 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4015 .hsw.is_tc_tbt = false, 4016 }, 4017 }, 4018 { 4019 .name = "AUX E TC2", 4020 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS, 4021 .ops = &icl_tc_phy_aux_power_well_ops, 4022 .id = DISP_PW_ID_NONE, 4023 { 4024 .hsw.regs = &icl_aux_power_well_regs, 4025 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4026 .hsw.is_tc_tbt = false, 4027 }, 4028 }, 4029 { 4030 .name = "AUX F TC3", 4031 .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS, 4032 .ops = &icl_tc_phy_aux_power_well_ops, 4033 .id = DISP_PW_ID_NONE, 4034 { 4035 .hsw.regs = &icl_aux_power_well_regs, 4036 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 4037 .hsw.is_tc_tbt = false, 4038 }, 4039 }, 4040 { 4041 .name = "AUX G TC4", 4042 .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS, 4043 .ops = &icl_tc_phy_aux_power_well_ops, 4044 .id = DISP_PW_ID_NONE, 4045 { 4046 .hsw.regs = &icl_aux_power_well_regs, 4047 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 4048 .hsw.is_tc_tbt = false, 4049 }, 4050 }, 4051 { 4052 .name = "AUX H TC5", 4053 .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS, 4054 .ops = &icl_tc_phy_aux_power_well_ops, 4055 .id = DISP_PW_ID_NONE, 4056 { 4057 .hsw.regs = &icl_aux_power_well_regs, 4058 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, 4059 .hsw.is_tc_tbt = false, 4060 }, 4061 }, 4062 { 4063 .name = "AUX I TC6", 4064 .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS, 4065 .ops = &icl_tc_phy_aux_power_well_ops, 4066 .id = DISP_PW_ID_NONE, 4067 { 4068 .hsw.regs = &icl_aux_power_well_regs, 4069 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, 4070 .hsw.is_tc_tbt = false, 4071 }, 4072 }, 4073 { 4074 .name = "AUX D TBT1", 4075 .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS, 4076 .ops = &hsw_power_well_ops, 4077 .id = DISP_PW_ID_NONE, 4078 { 4079 .hsw.regs = &icl_aux_power_well_regs, 4080 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 4081 .hsw.is_tc_tbt = true, 4082 }, 4083 }, 4084 { 4085 .name = "AUX E TBT2", 4086 .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS, 4087 .ops = &hsw_power_well_ops, 4088 .id = DISP_PW_ID_NONE, 4089 { 4090 .hsw.regs = &icl_aux_power_well_regs, 4091 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 4092 .hsw.is_tc_tbt = true, 4093 }, 4094 }, 4095 { 4096 .name = "AUX F TBT3", 4097 .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS, 4098 .ops = &hsw_power_well_ops, 4099 .id = DISP_PW_ID_NONE, 4100 { 4101 .hsw.regs = &icl_aux_power_well_regs, 4102 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 4103 .hsw.is_tc_tbt = true, 4104 }, 4105 }, 4106 { 4107 .name = "AUX G TBT4", 4108 .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS, 4109 .ops = &hsw_power_well_ops, 4110 .id = DISP_PW_ID_NONE, 4111 { 4112 .hsw.regs = &icl_aux_power_well_regs, 4113 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 4114 .hsw.is_tc_tbt = true, 4115 }, 4116 }, 4117 { 4118 .name = "AUX H TBT5", 4119 .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS, 4120 .ops = &hsw_power_well_ops, 4121 .id = DISP_PW_ID_NONE, 4122 { 4123 .hsw.regs = &icl_aux_power_well_regs, 4124 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, 4125 .hsw.is_tc_tbt = true, 4126 }, 4127 }, 4128 { 4129 .name = "AUX I TBT6", 4130 .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS, 4131 .ops = &hsw_power_well_ops, 4132 .id = DISP_PW_ID_NONE, 4133 { 4134 .hsw.regs = &icl_aux_power_well_regs, 4135 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, 4136 .hsw.is_tc_tbt = true, 4137 }, 4138 }, 4139 { 4140 .name = "power well 4", 4141 .domains = TGL_PW_4_POWER_DOMAINS, 4142 .ops = &hsw_power_well_ops, 4143 .id = DISP_PW_ID_NONE, 4144 { 4145 .hsw.regs = &hsw_power_well_regs, 4146 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4147 .hsw.has_fuses = true, 4148 .hsw.irq_pipe_mask = BIT(PIPE_C), 4149 } 4150 }, 4151 { 4152 .name = "power well 5", 4153 .domains = TGL_PW_5_POWER_DOMAINS, 4154 .ops = &hsw_power_well_ops, 4155 .id = DISP_PW_ID_NONE, 4156 { 4157 .hsw.regs = &hsw_power_well_regs, 4158 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4159 .hsw.has_fuses = true, 4160 .hsw.irq_pipe_mask = BIT(PIPE_D), 4161 }, 4162 }, 4163 }; 4164 4165 static int 4166 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 4167 int disable_power_well) 4168 { 4169 if (disable_power_well >= 0) 4170 return !!disable_power_well; 4171 4172 return 1; 4173 } 4174 4175 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 4176 int enable_dc) 4177 { 4178 u32 mask; 4179 int requested_dc; 4180 int max_dc; 4181 4182 if (INTEL_GEN(dev_priv) >= 12) { 4183 max_dc = 4; 4184 /* 4185 * DC9 has a separate HW flow from the rest of the DC states, 4186 * not depending on the DMC firmware. It's needed by system 4187 * suspend/resume, so allow it unconditionally. 4188 */ 4189 mask = DC_STATE_EN_DC9; 4190 } else if (IS_GEN(dev_priv, 11)) { 4191 max_dc = 2; 4192 mask = DC_STATE_EN_DC9; 4193 } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) { 4194 max_dc = 2; 4195 mask = 0; 4196 } else if (IS_GEN9_LP(dev_priv)) { 4197 max_dc = 1; 4198 mask = DC_STATE_EN_DC9; 4199 } else { 4200 max_dc = 0; 4201 mask = 0; 4202 } 4203 4204 if (!i915_modparams.disable_power_well) 4205 max_dc = 0; 4206 4207 if (enable_dc >= 0 && enable_dc <= max_dc) { 4208 requested_dc = enable_dc; 4209 } else if (enable_dc == -1) { 4210 requested_dc = max_dc; 4211 } else if (enable_dc > max_dc && enable_dc <= 4) { 4212 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n", 4213 enable_dc, max_dc); 4214 requested_dc = max_dc; 4215 } else { 4216 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc); 4217 requested_dc = max_dc; 4218 } 4219 4220 switch (requested_dc) { 4221 case 4: 4222 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 4223 break; 4224 case 3: 4225 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 4226 break; 4227 case 2: 4228 mask |= DC_STATE_EN_UPTO_DC6; 4229 break; 4230 case 1: 4231 mask |= DC_STATE_EN_UPTO_DC5; 4232 break; 4233 } 4234 4235 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask); 4236 4237 return mask; 4238 } 4239 4240 static int 4241 __set_power_wells(struct i915_power_domains *power_domains, 4242 const struct i915_power_well_desc *power_well_descs, 4243 int power_well_count) 4244 { 4245 u64 power_well_ids = 0; 4246 int i; 4247 4248 power_domains->power_well_count = power_well_count; 4249 power_domains->power_wells = 4250 kcalloc(power_well_count, 4251 sizeof(*power_domains->power_wells), 4252 GFP_KERNEL); 4253 if (!power_domains->power_wells) 4254 return -ENOMEM; 4255 4256 for (i = 0; i < power_well_count; i++) { 4257 enum i915_power_well_id id = power_well_descs[i].id; 4258 4259 power_domains->power_wells[i].desc = &power_well_descs[i]; 4260 4261 if (id == DISP_PW_ID_NONE) 4262 continue; 4263 4264 WARN_ON(id >= sizeof(power_well_ids) * 8); 4265 WARN_ON(power_well_ids & BIT_ULL(id)); 4266 power_well_ids |= BIT_ULL(id); 4267 } 4268 4269 return 0; 4270 } 4271 4272 #define set_power_wells(power_domains, __power_well_descs) \ 4273 __set_power_wells(power_domains, __power_well_descs, \ 4274 ARRAY_SIZE(__power_well_descs)) 4275 4276 /** 4277 * intel_power_domains_init - initializes the power domain structures 4278 * @dev_priv: i915 device instance 4279 * 4280 * Initializes the power domain structures for @dev_priv depending upon the 4281 * supported platform. 4282 */ 4283 int intel_power_domains_init(struct drm_i915_private *dev_priv) 4284 { 4285 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4286 int err; 4287 4288 i915_modparams.disable_power_well = 4289 sanitize_disable_power_well_option(dev_priv, 4290 i915_modparams.disable_power_well); 4291 dev_priv->csr.allowed_dc_mask = 4292 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc); 4293 4294 dev_priv->csr.target_dc_state = 4295 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 4296 4297 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); 4298 4299 mutex_init(&power_domains->lock); 4300 4301 INIT_DELAYED_WORK(&power_domains->async_put_work, 4302 intel_display_power_put_async_work); 4303 4304 /* 4305 * The enabling order will be from lower to higher indexed wells, 4306 * the disabling order is reversed. 4307 */ 4308 if (IS_GEN(dev_priv, 12)) { 4309 err = set_power_wells(power_domains, tgl_power_wells); 4310 } else if (IS_ELKHARTLAKE(dev_priv)) { 4311 err = set_power_wells(power_domains, ehl_power_wells); 4312 } else if (IS_GEN(dev_priv, 11)) { 4313 err = set_power_wells(power_domains, icl_power_wells); 4314 } else if (IS_CANNONLAKE(dev_priv)) { 4315 err = set_power_wells(power_domains, cnl_power_wells); 4316 4317 /* 4318 * DDI and Aux IO are getting enabled for all ports 4319 * regardless the presence or use. So, in order to avoid 4320 * timeouts, lets remove them from the list 4321 * for the SKUs without port F. 4322 */ 4323 if (!IS_CNL_WITH_PORT_F(dev_priv)) 4324 power_domains->power_well_count -= 2; 4325 } else if (IS_GEMINILAKE(dev_priv)) { 4326 err = set_power_wells(power_domains, glk_power_wells); 4327 } else if (IS_BROXTON(dev_priv)) { 4328 err = set_power_wells(power_domains, bxt_power_wells); 4329 } else if (IS_GEN9_BC(dev_priv)) { 4330 err = set_power_wells(power_domains, skl_power_wells); 4331 } else if (IS_CHERRYVIEW(dev_priv)) { 4332 err = set_power_wells(power_domains, chv_power_wells); 4333 } else if (IS_BROADWELL(dev_priv)) { 4334 err = set_power_wells(power_domains, bdw_power_wells); 4335 } else if (IS_HASWELL(dev_priv)) { 4336 err = set_power_wells(power_domains, hsw_power_wells); 4337 } else if (IS_VALLEYVIEW(dev_priv)) { 4338 err = set_power_wells(power_domains, vlv_power_wells); 4339 } else if (IS_I830(dev_priv)) { 4340 err = set_power_wells(power_domains, i830_power_wells); 4341 } else { 4342 err = set_power_wells(power_domains, i9xx_always_on_power_well); 4343 } 4344 4345 return err; 4346 } 4347 4348 /** 4349 * intel_power_domains_cleanup - clean up power domains resources 4350 * @dev_priv: i915 device instance 4351 * 4352 * Release any resources acquired by intel_power_domains_init() 4353 */ 4354 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 4355 { 4356 kfree(dev_priv->power_domains.power_wells); 4357 } 4358 4359 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 4360 { 4361 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4362 struct i915_power_well *power_well; 4363 4364 mutex_lock(&power_domains->lock); 4365 for_each_power_well(dev_priv, power_well) { 4366 power_well->desc->ops->sync_hw(dev_priv, power_well); 4367 power_well->hw_enabled = 4368 power_well->desc->ops->is_enabled(dev_priv, power_well); 4369 } 4370 mutex_unlock(&power_domains->lock); 4371 } 4372 4373 static inline 4374 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv, 4375 i915_reg_t reg, bool enable) 4376 { 4377 u32 val, status; 4378 4379 val = I915_READ(reg); 4380 val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST); 4381 I915_WRITE(reg, val); 4382 POSTING_READ(reg); 4383 udelay(10); 4384 4385 status = I915_READ(reg) & DBUF_POWER_STATE; 4386 if ((enable && !status) || (!enable && status)) { 4387 DRM_ERROR("DBus power %s timeout!\n", 4388 enable ? "enable" : "disable"); 4389 return false; 4390 } 4391 return true; 4392 } 4393 4394 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 4395 { 4396 intel_dbuf_slice_set(dev_priv, DBUF_CTL, true); 4397 } 4398 4399 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 4400 { 4401 intel_dbuf_slice_set(dev_priv, DBUF_CTL, false); 4402 } 4403 4404 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv) 4405 { 4406 if (INTEL_GEN(dev_priv) < 11) 4407 return 1; 4408 return 2; 4409 } 4410 4411 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, 4412 u8 req_slices) 4413 { 4414 const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; 4415 bool ret; 4416 4417 if (req_slices > intel_dbuf_max_slices(dev_priv)) { 4418 DRM_ERROR("Invalid number of dbuf slices requested\n"); 4419 return; 4420 } 4421 4422 if (req_slices == hw_enabled_slices || req_slices == 0) 4423 return; 4424 4425 if (req_slices > hw_enabled_slices) 4426 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); 4427 else 4428 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false); 4429 4430 if (ret) 4431 dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices; 4432 } 4433 4434 static void icl_dbuf_enable(struct drm_i915_private *dev_priv) 4435 { 4436 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST); 4437 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST); 4438 POSTING_READ(DBUF_CTL_S2); 4439 4440 udelay(10); 4441 4442 if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || 4443 !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) 4444 DRM_ERROR("DBuf power enable timeout\n"); 4445 else 4446 /* 4447 * FIXME: for now pretend that we only have 1 slice, see 4448 * intel_enabled_dbuf_slices_num(). 4449 */ 4450 dev_priv->wm.skl_hw.ddb.enabled_slices = 1; 4451 } 4452 4453 static void icl_dbuf_disable(struct drm_i915_private *dev_priv) 4454 { 4455 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST); 4456 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST); 4457 POSTING_READ(DBUF_CTL_S2); 4458 4459 udelay(10); 4460 4461 if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || 4462 (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) 4463 DRM_ERROR("DBuf power disable timeout!\n"); 4464 else 4465 /* 4466 * FIXME: for now pretend that the first slice is always 4467 * enabled, see intel_enabled_dbuf_slices_num(). 4468 */ 4469 dev_priv->wm.skl_hw.ddb.enabled_slices = 1; 4470 } 4471 4472 static void icl_mbus_init(struct drm_i915_private *dev_priv) 4473 { 4474 u32 val; 4475 4476 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 4477 MBUS_ABOX_BT_CREDIT_POOL2(16) | 4478 MBUS_ABOX_B_CREDIT(1) | 4479 MBUS_ABOX_BW_CREDIT(1); 4480 4481 I915_WRITE(MBUS_ABOX_CTL, val); 4482 } 4483 4484 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 4485 { 4486 u32 val = I915_READ(LCPLL_CTL); 4487 4488 /* 4489 * The LCPLL register should be turned on by the BIOS. For now 4490 * let's just check its state and print errors in case 4491 * something is wrong. Don't even try to turn it on. 4492 */ 4493 4494 if (val & LCPLL_CD_SOURCE_FCLK) 4495 DRM_ERROR("CDCLK source is not LCPLL\n"); 4496 4497 if (val & LCPLL_PLL_DISABLE) 4498 DRM_ERROR("LCPLL is disabled\n"); 4499 4500 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 4501 DRM_ERROR("LCPLL not using non-SSC reference\n"); 4502 } 4503 4504 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 4505 { 4506 struct drm_device *dev = &dev_priv->drm; 4507 struct intel_crtc *crtc; 4508 4509 for_each_intel_crtc(dev, crtc) 4510 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 4511 pipe_name(crtc->pipe)); 4512 4513 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2), 4514 "Display power well on\n"); 4515 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, 4516 "SPLL enabled\n"); 4517 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 4518 "WRPLL1 enabled\n"); 4519 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 4520 "WRPLL2 enabled\n"); 4521 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, 4522 "Panel power on\n"); 4523 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 4524 "CPU PWM1 enabled\n"); 4525 if (IS_HASWELL(dev_priv)) 4526 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 4527 "CPU PWM2 enabled\n"); 4528 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 4529 "PCH PWM1 enabled\n"); 4530 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 4531 "Utility pin enabled\n"); 4532 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, 4533 "PCH GTC enabled\n"); 4534 4535 /* 4536 * In theory we can still leave IRQs enabled, as long as only the HPD 4537 * interrupts remain enabled. We used to check for that, but since it's 4538 * gen-specific and since we only disable LCPLL after we fully disable 4539 * the interrupts, the check below should be enough. 4540 */ 4541 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 4542 } 4543 4544 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 4545 { 4546 if (IS_HASWELL(dev_priv)) 4547 return I915_READ(D_COMP_HSW); 4548 else 4549 return I915_READ(D_COMP_BDW); 4550 } 4551 4552 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 4553 { 4554 if (IS_HASWELL(dev_priv)) { 4555 if (sandybridge_pcode_write(dev_priv, 4556 GEN6_PCODE_WRITE_D_COMP, val)) 4557 DRM_DEBUG_KMS("Failed to write to D_COMP\n"); 4558 } else { 4559 I915_WRITE(D_COMP_BDW, val); 4560 POSTING_READ(D_COMP_BDW); 4561 } 4562 } 4563 4564 /* 4565 * This function implements pieces of two sequences from BSpec: 4566 * - Sequence for display software to disable LCPLL 4567 * - Sequence for display software to allow package C8+ 4568 * The steps implemented here are just the steps that actually touch the LCPLL 4569 * register. Callers should take care of disabling all the display engine 4570 * functions, doing the mode unset, fixing interrupts, etc. 4571 */ 4572 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 4573 bool switch_to_fclk, bool allow_power_down) 4574 { 4575 u32 val; 4576 4577 assert_can_disable_lcpll(dev_priv); 4578 4579 val = I915_READ(LCPLL_CTL); 4580 4581 if (switch_to_fclk) { 4582 val |= LCPLL_CD_SOURCE_FCLK; 4583 I915_WRITE(LCPLL_CTL, val); 4584 4585 if (wait_for_us(I915_READ(LCPLL_CTL) & 4586 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 4587 DRM_ERROR("Switching to FCLK failed\n"); 4588 4589 val = I915_READ(LCPLL_CTL); 4590 } 4591 4592 val |= LCPLL_PLL_DISABLE; 4593 I915_WRITE(LCPLL_CTL, val); 4594 POSTING_READ(LCPLL_CTL); 4595 4596 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 4597 DRM_ERROR("LCPLL still locked\n"); 4598 4599 val = hsw_read_dcomp(dev_priv); 4600 val |= D_COMP_COMP_DISABLE; 4601 hsw_write_dcomp(dev_priv, val); 4602 ndelay(100); 4603 4604 if (wait_for((hsw_read_dcomp(dev_priv) & 4605 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 4606 DRM_ERROR("D_COMP RCOMP still in progress\n"); 4607 4608 if (allow_power_down) { 4609 val = I915_READ(LCPLL_CTL); 4610 val |= LCPLL_POWER_DOWN_ALLOW; 4611 I915_WRITE(LCPLL_CTL, val); 4612 POSTING_READ(LCPLL_CTL); 4613 } 4614 } 4615 4616 /* 4617 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 4618 * source. 4619 */ 4620 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 4621 { 4622 u32 val; 4623 4624 val = I915_READ(LCPLL_CTL); 4625 4626 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 4627 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 4628 return; 4629 4630 /* 4631 * Make sure we're not on PC8 state before disabling PC8, otherwise 4632 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 4633 */ 4634 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 4635 4636 if (val & LCPLL_POWER_DOWN_ALLOW) { 4637 val &= ~LCPLL_POWER_DOWN_ALLOW; 4638 I915_WRITE(LCPLL_CTL, val); 4639 POSTING_READ(LCPLL_CTL); 4640 } 4641 4642 val = hsw_read_dcomp(dev_priv); 4643 val |= D_COMP_COMP_FORCE; 4644 val &= ~D_COMP_COMP_DISABLE; 4645 hsw_write_dcomp(dev_priv, val); 4646 4647 val = I915_READ(LCPLL_CTL); 4648 val &= ~LCPLL_PLL_DISABLE; 4649 I915_WRITE(LCPLL_CTL, val); 4650 4651 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 4652 DRM_ERROR("LCPLL not locked yet\n"); 4653 4654 if (val & LCPLL_CD_SOURCE_FCLK) { 4655 val = I915_READ(LCPLL_CTL); 4656 val &= ~LCPLL_CD_SOURCE_FCLK; 4657 I915_WRITE(LCPLL_CTL, val); 4658 4659 if (wait_for_us((I915_READ(LCPLL_CTL) & 4660 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 4661 DRM_ERROR("Switching back to LCPLL failed\n"); 4662 } 4663 4664 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 4665 4666 intel_update_cdclk(dev_priv); 4667 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); 4668 } 4669 4670 /* 4671 * Package states C8 and deeper are really deep PC states that can only be 4672 * reached when all the devices on the system allow it, so even if the graphics 4673 * device allows PC8+, it doesn't mean the system will actually get to these 4674 * states. Our driver only allows PC8+ when going into runtime PM. 4675 * 4676 * The requirements for PC8+ are that all the outputs are disabled, the power 4677 * well is disabled and most interrupts are disabled, and these are also 4678 * requirements for runtime PM. When these conditions are met, we manually do 4679 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 4680 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 4681 * hang the machine. 4682 * 4683 * When we really reach PC8 or deeper states (not just when we allow it) we lose 4684 * the state of some registers, so when we come back from PC8+ we need to 4685 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 4686 * need to take care of the registers kept by RC6. Notice that this happens even 4687 * if we don't put the device in PCI D3 state (which is what currently happens 4688 * because of the runtime PM support). 4689 * 4690 * For more, read "Display Sequences for Package C8" on the hardware 4691 * documentation. 4692 */ 4693 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 4694 { 4695 u32 val; 4696 4697 DRM_DEBUG_KMS("Enabling package C8+\n"); 4698 4699 if (HAS_PCH_LPT_LP(dev_priv)) { 4700 val = I915_READ(SOUTH_DSPCLK_GATE_D); 4701 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 4702 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 4703 } 4704 4705 lpt_disable_clkout_dp(dev_priv); 4706 hsw_disable_lcpll(dev_priv, true, true); 4707 } 4708 4709 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 4710 { 4711 u32 val; 4712 4713 DRM_DEBUG_KMS("Disabling package C8+\n"); 4714 4715 hsw_restore_lcpll(dev_priv); 4716 intel_init_pch_refclk(dev_priv); 4717 4718 if (HAS_PCH_LPT_LP(dev_priv)) { 4719 val = I915_READ(SOUTH_DSPCLK_GATE_D); 4720 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 4721 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 4722 } 4723 } 4724 4725 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 4726 bool enable) 4727 { 4728 i915_reg_t reg; 4729 u32 reset_bits, val; 4730 4731 if (IS_IVYBRIDGE(dev_priv)) { 4732 reg = GEN7_MSG_CTL; 4733 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 4734 } else { 4735 reg = HSW_NDE_RSTWRN_OPT; 4736 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 4737 } 4738 4739 val = I915_READ(reg); 4740 4741 if (enable) 4742 val |= reset_bits; 4743 else 4744 val &= ~reset_bits; 4745 4746 I915_WRITE(reg, val); 4747 } 4748 4749 static void skl_display_core_init(struct drm_i915_private *dev_priv, 4750 bool resume) 4751 { 4752 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4753 struct i915_power_well *well; 4754 4755 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 4756 4757 /* enable PCH reset handshake */ 4758 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 4759 4760 /* enable PG1 and Misc I/O */ 4761 mutex_lock(&power_domains->lock); 4762 4763 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4764 intel_power_well_enable(dev_priv, well); 4765 4766 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 4767 intel_power_well_enable(dev_priv, well); 4768 4769 mutex_unlock(&power_domains->lock); 4770 4771 intel_cdclk_init(dev_priv); 4772 4773 gen9_dbuf_enable(dev_priv); 4774 4775 if (resume && dev_priv->csr.dmc_payload) 4776 intel_csr_load_program(dev_priv); 4777 } 4778 4779 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 4780 { 4781 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4782 struct i915_power_well *well; 4783 4784 gen9_disable_dc_states(dev_priv); 4785 4786 gen9_dbuf_disable(dev_priv); 4787 4788 intel_cdclk_uninit(dev_priv); 4789 4790 /* The spec doesn't call for removing the reset handshake flag */ 4791 /* disable PG1 and Misc I/O */ 4792 4793 mutex_lock(&power_domains->lock); 4794 4795 /* 4796 * BSpec says to keep the MISC IO power well enabled here, only 4797 * remove our request for power well 1. 4798 * Note that even though the driver's request is removed power well 1 4799 * may stay enabled after this due to DMC's own request on it. 4800 */ 4801 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4802 intel_power_well_disable(dev_priv, well); 4803 4804 mutex_unlock(&power_domains->lock); 4805 4806 usleep_range(10, 30); /* 10 us delay per Bspec */ 4807 } 4808 4809 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 4810 { 4811 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4812 struct i915_power_well *well; 4813 4814 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 4815 4816 /* 4817 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 4818 * or else the reset will hang because there is no PCH to respond. 4819 * Move the handshake programming to initialization sequence. 4820 * Previously was left up to BIOS. 4821 */ 4822 intel_pch_reset_handshake(dev_priv, false); 4823 4824 /* Enable PG1 */ 4825 mutex_lock(&power_domains->lock); 4826 4827 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4828 intel_power_well_enable(dev_priv, well); 4829 4830 mutex_unlock(&power_domains->lock); 4831 4832 intel_cdclk_init(dev_priv); 4833 4834 gen9_dbuf_enable(dev_priv); 4835 4836 if (resume && dev_priv->csr.dmc_payload) 4837 intel_csr_load_program(dev_priv); 4838 } 4839 4840 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 4841 { 4842 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4843 struct i915_power_well *well; 4844 4845 gen9_disable_dc_states(dev_priv); 4846 4847 gen9_dbuf_disable(dev_priv); 4848 4849 intel_cdclk_uninit(dev_priv); 4850 4851 /* The spec doesn't call for removing the reset handshake flag */ 4852 4853 /* 4854 * Disable PW1 (PG1). 4855 * Note that even though the driver's request is removed power well 1 4856 * may stay enabled after this due to DMC's own request on it. 4857 */ 4858 mutex_lock(&power_domains->lock); 4859 4860 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4861 intel_power_well_disable(dev_priv, well); 4862 4863 mutex_unlock(&power_domains->lock); 4864 4865 usleep_range(10, 30); /* 10 us delay per Bspec */ 4866 } 4867 4868 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) 4869 { 4870 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4871 struct i915_power_well *well; 4872 4873 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 4874 4875 /* 1. Enable PCH Reset Handshake */ 4876 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 4877 4878 /* 2-3. */ 4879 intel_combo_phy_init(dev_priv); 4880 4881 /* 4882 * 4. Enable Power Well 1 (PG1). 4883 * The AUX IO power wells will be enabled on demand. 4884 */ 4885 mutex_lock(&power_domains->lock); 4886 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4887 intel_power_well_enable(dev_priv, well); 4888 mutex_unlock(&power_domains->lock); 4889 4890 /* 5. Enable CD clock */ 4891 intel_cdclk_init(dev_priv); 4892 4893 /* 6. Enable DBUF */ 4894 gen9_dbuf_enable(dev_priv); 4895 4896 if (resume && dev_priv->csr.dmc_payload) 4897 intel_csr_load_program(dev_priv); 4898 } 4899 4900 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) 4901 { 4902 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4903 struct i915_power_well *well; 4904 4905 gen9_disable_dc_states(dev_priv); 4906 4907 /* 1. Disable all display engine functions -> aready done */ 4908 4909 /* 2. Disable DBUF */ 4910 gen9_dbuf_disable(dev_priv); 4911 4912 /* 3. Disable CD clock */ 4913 intel_cdclk_uninit(dev_priv); 4914 4915 /* 4916 * 4. Disable Power Well 1 (PG1). 4917 * The AUX IO power wells are toggled on demand, so they are already 4918 * disabled at this point. 4919 */ 4920 mutex_lock(&power_domains->lock); 4921 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4922 intel_power_well_disable(dev_priv, well); 4923 mutex_unlock(&power_domains->lock); 4924 4925 usleep_range(10, 30); /* 10 us delay per Bspec */ 4926 4927 /* 5. */ 4928 intel_combo_phy_uninit(dev_priv); 4929 } 4930 4931 static void icl_display_core_init(struct drm_i915_private *dev_priv, 4932 bool resume) 4933 { 4934 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4935 struct i915_power_well *well; 4936 4937 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 4938 4939 /* 1. Enable PCH reset handshake. */ 4940 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 4941 4942 /* 2. Initialize all combo phys */ 4943 intel_combo_phy_init(dev_priv); 4944 4945 /* 4946 * 3. Enable Power Well 1 (PG1). 4947 * The AUX IO power wells will be enabled on demand. 4948 */ 4949 mutex_lock(&power_domains->lock); 4950 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4951 intel_power_well_enable(dev_priv, well); 4952 mutex_unlock(&power_domains->lock); 4953 4954 /* 4. Enable CDCLK. */ 4955 intel_cdclk_init(dev_priv); 4956 4957 /* 5. Enable DBUF. */ 4958 icl_dbuf_enable(dev_priv); 4959 4960 /* 6. Setup MBUS. */ 4961 icl_mbus_init(dev_priv); 4962 4963 if (resume && dev_priv->csr.dmc_payload) 4964 intel_csr_load_program(dev_priv); 4965 } 4966 4967 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 4968 { 4969 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4970 struct i915_power_well *well; 4971 4972 gen9_disable_dc_states(dev_priv); 4973 4974 /* 1. Disable all display engine functions -> aready done */ 4975 4976 /* 2. Disable DBUF */ 4977 icl_dbuf_disable(dev_priv); 4978 4979 /* 3. Disable CD clock */ 4980 intel_cdclk_uninit(dev_priv); 4981 4982 /* 4983 * 4. Disable Power Well 1 (PG1). 4984 * The AUX IO power wells are toggled on demand, so they are already 4985 * disabled at this point. 4986 */ 4987 mutex_lock(&power_domains->lock); 4988 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4989 intel_power_well_disable(dev_priv, well); 4990 mutex_unlock(&power_domains->lock); 4991 4992 /* 5. */ 4993 intel_combo_phy_uninit(dev_priv); 4994 } 4995 4996 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 4997 { 4998 struct i915_power_well *cmn_bc = 4999 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5000 struct i915_power_well *cmn_d = 5001 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 5002 5003 /* 5004 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 5005 * workaround never ever read DISPLAY_PHY_CONTROL, and 5006 * instead maintain a shadow copy ourselves. Use the actual 5007 * power well state and lane status to reconstruct the 5008 * expected initial value. 5009 */ 5010 dev_priv->chv_phy_control = 5011 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 5012 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 5013 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 5014 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 5015 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 5016 5017 /* 5018 * If all lanes are disabled we leave the override disabled 5019 * with all power down bits cleared to match the state we 5020 * would use after disabling the port. Otherwise enable the 5021 * override and set the lane powerdown bits accding to the 5022 * current lane status. 5023 */ 5024 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 5025 u32 status = I915_READ(DPLL(PIPE_A)); 5026 unsigned int mask; 5027 5028 mask = status & DPLL_PORTB_READY_MASK; 5029 if (mask == 0xf) 5030 mask = 0x0; 5031 else 5032 dev_priv->chv_phy_control |= 5033 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 5034 5035 dev_priv->chv_phy_control |= 5036 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 5037 5038 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 5039 if (mask == 0xf) 5040 mask = 0x0; 5041 else 5042 dev_priv->chv_phy_control |= 5043 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 5044 5045 dev_priv->chv_phy_control |= 5046 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 5047 5048 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 5049 5050 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 5051 } else { 5052 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 5053 } 5054 5055 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 5056 u32 status = I915_READ(DPIO_PHY_STATUS); 5057 unsigned int mask; 5058 5059 mask = status & DPLL_PORTD_READY_MASK; 5060 5061 if (mask == 0xf) 5062 mask = 0x0; 5063 else 5064 dev_priv->chv_phy_control |= 5065 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 5066 5067 dev_priv->chv_phy_control |= 5068 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 5069 5070 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 5071 5072 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 5073 } else { 5074 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 5075 } 5076 5077 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 5078 5079 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", 5080 dev_priv->chv_phy_control); 5081 } 5082 5083 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 5084 { 5085 struct i915_power_well *cmn = 5086 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5087 struct i915_power_well *disp2d = 5088 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 5089 5090 /* If the display might be already active skip this */ 5091 if (cmn->desc->ops->is_enabled(dev_priv, cmn) && 5092 disp2d->desc->ops->is_enabled(dev_priv, disp2d) && 5093 I915_READ(DPIO_CTL) & DPIO_CMNRST) 5094 return; 5095 5096 DRM_DEBUG_KMS("toggling display PHY side reset\n"); 5097 5098 /* cmnlane needs DPLL registers */ 5099 disp2d->desc->ops->enable(dev_priv, disp2d); 5100 5101 /* 5102 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 5103 * Need to assert and de-assert PHY SB reset by gating the 5104 * common lane power, then un-gating it. 5105 * Simply ungating isn't enough to reset the PHY enough to get 5106 * ports and lanes running. 5107 */ 5108 cmn->desc->ops->disable(dev_priv, cmn); 5109 } 5110 5111 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 5112 { 5113 bool ret; 5114 5115 vlv_punit_get(dev_priv); 5116 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 5117 vlv_punit_put(dev_priv); 5118 5119 return ret; 5120 } 5121 5122 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 5123 { 5124 WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 5125 "VED not power gated\n"); 5126 } 5127 5128 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 5129 { 5130 static const struct pci_device_id isp_ids[] = { 5131 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 5132 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 5133 {} 5134 }; 5135 5136 WARN(!pci_dev_present(isp_ids) && 5137 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 5138 "ISP not power gated\n"); 5139 } 5140 5141 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 5142 5143 /** 5144 * intel_power_domains_init_hw - initialize hardware power domain state 5145 * @i915: i915 device instance 5146 * @resume: Called from resume code paths or not 5147 * 5148 * This function initializes the hardware power domain state and enables all 5149 * power wells belonging to the INIT power domain. Power wells in other 5150 * domains (and not in the INIT domain) are referenced or disabled by 5151 * intel_modeset_readout_hw_state(). After that the reference count of each 5152 * power well must match its HW enabled state, see 5153 * intel_power_domains_verify_state(). 5154 * 5155 * It will return with power domains disabled (to be enabled later by 5156 * intel_power_domains_enable()) and must be paired with 5157 * intel_power_domains_driver_remove(). 5158 */ 5159 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 5160 { 5161 struct i915_power_domains *power_domains = &i915->power_domains; 5162 5163 power_domains->initializing = true; 5164 5165 /* Must happen before power domain init on VLV/CHV */ 5166 intel_update_rawclk(i915); 5167 5168 if (INTEL_GEN(i915) >= 11) { 5169 icl_display_core_init(i915, resume); 5170 } else if (IS_CANNONLAKE(i915)) { 5171 cnl_display_core_init(i915, resume); 5172 } else if (IS_GEN9_BC(i915)) { 5173 skl_display_core_init(i915, resume); 5174 } else if (IS_GEN9_LP(i915)) { 5175 bxt_display_core_init(i915, resume); 5176 } else if (IS_CHERRYVIEW(i915)) { 5177 mutex_lock(&power_domains->lock); 5178 chv_phy_control_init(i915); 5179 mutex_unlock(&power_domains->lock); 5180 assert_isp_power_gated(i915); 5181 } else if (IS_VALLEYVIEW(i915)) { 5182 mutex_lock(&power_domains->lock); 5183 vlv_cmnlane_wa(i915); 5184 mutex_unlock(&power_domains->lock); 5185 assert_ved_power_gated(i915); 5186 assert_isp_power_gated(i915); 5187 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 5188 hsw_assert_cdclk(i915); 5189 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 5190 } else if (IS_IVYBRIDGE(i915)) { 5191 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 5192 } 5193 5194 /* 5195 * Keep all power wells enabled for any dependent HW access during 5196 * initialization and to make sure we keep BIOS enabled display HW 5197 * resources powered until display HW readout is complete. We drop 5198 * this reference in intel_power_domains_enable(). 5199 */ 5200 power_domains->wakeref = 5201 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5202 5203 /* Disable power support if the user asked so. */ 5204 if (!i915_modparams.disable_power_well) 5205 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5206 intel_power_domains_sync_hw(i915); 5207 5208 power_domains->initializing = false; 5209 } 5210 5211 /** 5212 * intel_power_domains_driver_remove - deinitialize hw power domain state 5213 * @i915: i915 device instance 5214 * 5215 * De-initializes the display power domain HW state. It also ensures that the 5216 * device stays powered up so that the driver can be reloaded. 5217 * 5218 * It must be called with power domains already disabled (after a call to 5219 * intel_power_domains_disable()) and must be paired with 5220 * intel_power_domains_init_hw(). 5221 */ 5222 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 5223 { 5224 intel_wakeref_t wakeref __maybe_unused = 5225 fetch_and_zero(&i915->power_domains.wakeref); 5226 5227 /* Remove the refcount we took to keep power well support disabled. */ 5228 if (!i915_modparams.disable_power_well) 5229 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); 5230 5231 intel_display_power_flush_work_sync(i915); 5232 5233 intel_power_domains_verify_state(i915); 5234 5235 /* Keep the power well enabled, but cancel its rpm wakeref. */ 5236 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 5237 } 5238 5239 /** 5240 * intel_power_domains_enable - enable toggling of display power wells 5241 * @i915: i915 device instance 5242 * 5243 * Enable the ondemand enabling/disabling of the display power wells. Note that 5244 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 5245 * only at specific points of the display modeset sequence, thus they are not 5246 * affected by the intel_power_domains_enable()/disable() calls. The purpose 5247 * of these function is to keep the rest of power wells enabled until the end 5248 * of display HW readout (which will acquire the power references reflecting 5249 * the current HW state). 5250 */ 5251 void intel_power_domains_enable(struct drm_i915_private *i915) 5252 { 5253 intel_wakeref_t wakeref __maybe_unused = 5254 fetch_and_zero(&i915->power_domains.wakeref); 5255 5256 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 5257 intel_power_domains_verify_state(i915); 5258 } 5259 5260 /** 5261 * intel_power_domains_disable - disable toggling of display power wells 5262 * @i915: i915 device instance 5263 * 5264 * Disable the ondemand enabling/disabling of the display power wells. See 5265 * intel_power_domains_enable() for which power wells this call controls. 5266 */ 5267 void intel_power_domains_disable(struct drm_i915_private *i915) 5268 { 5269 struct i915_power_domains *power_domains = &i915->power_domains; 5270 5271 WARN_ON(power_domains->wakeref); 5272 power_domains->wakeref = 5273 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5274 5275 intel_power_domains_verify_state(i915); 5276 } 5277 5278 /** 5279 * intel_power_domains_suspend - suspend power domain state 5280 * @i915: i915 device instance 5281 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 5282 * 5283 * This function prepares the hardware power domain state before entering 5284 * system suspend. 5285 * 5286 * It must be called with power domains already disabled (after a call to 5287 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 5288 */ 5289 void intel_power_domains_suspend(struct drm_i915_private *i915, 5290 enum i915_drm_suspend_mode suspend_mode) 5291 { 5292 struct i915_power_domains *power_domains = &i915->power_domains; 5293 intel_wakeref_t wakeref __maybe_unused = 5294 fetch_and_zero(&power_domains->wakeref); 5295 5296 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 5297 5298 /* 5299 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 5300 * support don't manually deinit the power domains. This also means the 5301 * CSR/DMC firmware will stay active, it will power down any HW 5302 * resources as required and also enable deeper system power states 5303 * that would be blocked if the firmware was inactive. 5304 */ 5305 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) && 5306 suspend_mode == I915_DRM_SUSPEND_IDLE && 5307 i915->csr.dmc_payload) { 5308 intel_display_power_flush_work(i915); 5309 intel_power_domains_verify_state(i915); 5310 return; 5311 } 5312 5313 /* 5314 * Even if power well support was disabled we still want to disable 5315 * power wells if power domains must be deinitialized for suspend. 5316 */ 5317 if (!i915_modparams.disable_power_well) 5318 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); 5319 5320 intel_display_power_flush_work(i915); 5321 intel_power_domains_verify_state(i915); 5322 5323 if (INTEL_GEN(i915) >= 11) 5324 icl_display_core_uninit(i915); 5325 else if (IS_CANNONLAKE(i915)) 5326 cnl_display_core_uninit(i915); 5327 else if (IS_GEN9_BC(i915)) 5328 skl_display_core_uninit(i915); 5329 else if (IS_GEN9_LP(i915)) 5330 bxt_display_core_uninit(i915); 5331 5332 power_domains->display_core_suspended = true; 5333 } 5334 5335 /** 5336 * intel_power_domains_resume - resume power domain state 5337 * @i915: i915 device instance 5338 * 5339 * This function resume the hardware power domain state during system resume. 5340 * 5341 * It will return with power domain support disabled (to be enabled later by 5342 * intel_power_domains_enable()) and must be paired with 5343 * intel_power_domains_suspend(). 5344 */ 5345 void intel_power_domains_resume(struct drm_i915_private *i915) 5346 { 5347 struct i915_power_domains *power_domains = &i915->power_domains; 5348 5349 if (power_domains->display_core_suspended) { 5350 intel_power_domains_init_hw(i915, true); 5351 power_domains->display_core_suspended = false; 5352 } else { 5353 WARN_ON(power_domains->wakeref); 5354 power_domains->wakeref = 5355 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5356 } 5357 5358 intel_power_domains_verify_state(i915); 5359 } 5360 5361 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 5362 5363 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 5364 { 5365 struct i915_power_domains *power_domains = &i915->power_domains; 5366 struct i915_power_well *power_well; 5367 5368 for_each_power_well(i915, power_well) { 5369 enum intel_display_power_domain domain; 5370 5371 DRM_DEBUG_DRIVER("%-25s %d\n", 5372 power_well->desc->name, power_well->count); 5373 5374 for_each_power_domain(domain, power_well->desc->domains) 5375 DRM_DEBUG_DRIVER(" %-23s %d\n", 5376 intel_display_power_domain_str(domain), 5377 power_domains->domain_use_count[domain]); 5378 } 5379 } 5380 5381 /** 5382 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 5383 * @i915: i915 device instance 5384 * 5385 * Verify if the reference count of each power well matches its HW enabled 5386 * state and the total refcount of the domains it belongs to. This must be 5387 * called after modeset HW state sanitization, which is responsible for 5388 * acquiring reference counts for any power wells in use and disabling the 5389 * ones left on by BIOS but not required by any active output. 5390 */ 5391 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 5392 { 5393 struct i915_power_domains *power_domains = &i915->power_domains; 5394 struct i915_power_well *power_well; 5395 bool dump_domain_info; 5396 5397 mutex_lock(&power_domains->lock); 5398 5399 verify_async_put_domains_state(power_domains); 5400 5401 dump_domain_info = false; 5402 for_each_power_well(i915, power_well) { 5403 enum intel_display_power_domain domain; 5404 int domains_count; 5405 bool enabled; 5406 5407 enabled = power_well->desc->ops->is_enabled(i915, power_well); 5408 if ((power_well->count || power_well->desc->always_on) != 5409 enabled) 5410 DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)", 5411 power_well->desc->name, 5412 power_well->count, enabled); 5413 5414 domains_count = 0; 5415 for_each_power_domain(domain, power_well->desc->domains) 5416 domains_count += power_domains->domain_use_count[domain]; 5417 5418 if (power_well->count != domains_count) { 5419 DRM_ERROR("power well %s refcount/domain refcount mismatch " 5420 "(refcount %d/domains refcount %d)\n", 5421 power_well->desc->name, power_well->count, 5422 domains_count); 5423 dump_domain_info = true; 5424 } 5425 } 5426 5427 if (dump_domain_info) { 5428 static bool dumped; 5429 5430 if (!dumped) { 5431 intel_power_domains_dump_info(i915); 5432 dumped = true; 5433 } 5434 } 5435 5436 mutex_unlock(&power_domains->lock); 5437 } 5438 5439 #else 5440 5441 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 5442 { 5443 } 5444 5445 #endif 5446 5447 void intel_display_power_suspend_late(struct drm_i915_private *i915) 5448 { 5449 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) 5450 bxt_enable_dc9(i915); 5451 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 5452 hsw_enable_pc8(i915); 5453 } 5454 5455 void intel_display_power_resume_early(struct drm_i915_private *i915) 5456 { 5457 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) { 5458 gen9_sanitize_dc_state(i915); 5459 bxt_disable_dc9(i915); 5460 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5461 hsw_disable_pc8(i915); 5462 } 5463 } 5464 5465 void intel_display_power_suspend(struct drm_i915_private *i915) 5466 { 5467 if (INTEL_GEN(i915) >= 11) { 5468 icl_display_core_uninit(i915); 5469 bxt_enable_dc9(i915); 5470 } else if (IS_GEN9_LP(i915)) { 5471 bxt_display_core_uninit(i915); 5472 bxt_enable_dc9(i915); 5473 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5474 hsw_enable_pc8(i915); 5475 } 5476 } 5477 5478 void intel_display_power_resume(struct drm_i915_private *i915) 5479 { 5480 if (INTEL_GEN(i915) >= 11) { 5481 bxt_disable_dc9(i915); 5482 icl_display_core_init(i915, true); 5483 if (i915->csr.dmc_payload) { 5484 if (i915->csr.allowed_dc_mask & 5485 DC_STATE_EN_UPTO_DC6) 5486 skl_enable_dc6(i915); 5487 else if (i915->csr.allowed_dc_mask & 5488 DC_STATE_EN_UPTO_DC5) 5489 gen9_enable_dc5(i915); 5490 } 5491 } else if (IS_GEN9_LP(i915)) { 5492 bxt_disable_dc9(i915); 5493 bxt_display_core_init(i915, true); 5494 if (i915->csr.dmc_payload && 5495 (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 5496 gen9_enable_dc5(i915); 5497 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5498 hsw_disable_pc8(i915); 5499 } 5500 } 5501