1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "display/intel_crt.h" 7 #include "display/intel_dp.h" 8 9 #include "i915_drv.h" 10 #include "i915_irq.h" 11 #include "intel_cdclk.h" 12 #include "intel_combo_phy.h" 13 #include "intel_csr.h" 14 #include "intel_display_power.h" 15 #include "intel_display_types.h" 16 #include "intel_dpio_phy.h" 17 #include "intel_hotplug.h" 18 #include "intel_sideband.h" 19 #include "intel_tc.h" 20 #include "intel_vga.h" 21 22 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 23 enum i915_power_well_id power_well_id); 24 25 const char * 26 intel_display_power_domain_str(enum intel_display_power_domain domain) 27 { 28 switch (domain) { 29 case POWER_DOMAIN_DISPLAY_CORE: 30 return "DISPLAY_CORE"; 31 case POWER_DOMAIN_PIPE_A: 32 return "PIPE_A"; 33 case POWER_DOMAIN_PIPE_B: 34 return "PIPE_B"; 35 case POWER_DOMAIN_PIPE_C: 36 return "PIPE_C"; 37 case POWER_DOMAIN_PIPE_D: 38 return "PIPE_D"; 39 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 40 return "PIPE_A_PANEL_FITTER"; 41 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 42 return "PIPE_B_PANEL_FITTER"; 43 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 44 return "PIPE_C_PANEL_FITTER"; 45 case POWER_DOMAIN_PIPE_D_PANEL_FITTER: 46 return "PIPE_D_PANEL_FITTER"; 47 case POWER_DOMAIN_TRANSCODER_A: 48 return "TRANSCODER_A"; 49 case POWER_DOMAIN_TRANSCODER_B: 50 return "TRANSCODER_B"; 51 case POWER_DOMAIN_TRANSCODER_C: 52 return "TRANSCODER_C"; 53 case POWER_DOMAIN_TRANSCODER_D: 54 return "TRANSCODER_D"; 55 case POWER_DOMAIN_TRANSCODER_EDP: 56 return "TRANSCODER_EDP"; 57 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 58 return "TRANSCODER_VDSC_PW2"; 59 case POWER_DOMAIN_TRANSCODER_DSI_A: 60 return "TRANSCODER_DSI_A"; 61 case POWER_DOMAIN_TRANSCODER_DSI_C: 62 return "TRANSCODER_DSI_C"; 63 case POWER_DOMAIN_PORT_DDI_A_LANES: 64 return "PORT_DDI_A_LANES"; 65 case POWER_DOMAIN_PORT_DDI_B_LANES: 66 return "PORT_DDI_B_LANES"; 67 case POWER_DOMAIN_PORT_DDI_C_LANES: 68 return "PORT_DDI_C_LANES"; 69 case POWER_DOMAIN_PORT_DDI_D_LANES: 70 return "PORT_DDI_D_LANES"; 71 case POWER_DOMAIN_PORT_DDI_E_LANES: 72 return "PORT_DDI_E_LANES"; 73 case POWER_DOMAIN_PORT_DDI_F_LANES: 74 return "PORT_DDI_F_LANES"; 75 case POWER_DOMAIN_PORT_DDI_G_LANES: 76 return "PORT_DDI_G_LANES"; 77 case POWER_DOMAIN_PORT_DDI_H_LANES: 78 return "PORT_DDI_H_LANES"; 79 case POWER_DOMAIN_PORT_DDI_I_LANES: 80 return "PORT_DDI_I_LANES"; 81 case POWER_DOMAIN_PORT_DDI_A_IO: 82 return "PORT_DDI_A_IO"; 83 case POWER_DOMAIN_PORT_DDI_B_IO: 84 return "PORT_DDI_B_IO"; 85 case POWER_DOMAIN_PORT_DDI_C_IO: 86 return "PORT_DDI_C_IO"; 87 case POWER_DOMAIN_PORT_DDI_D_IO: 88 return "PORT_DDI_D_IO"; 89 case POWER_DOMAIN_PORT_DDI_E_IO: 90 return "PORT_DDI_E_IO"; 91 case POWER_DOMAIN_PORT_DDI_F_IO: 92 return "PORT_DDI_F_IO"; 93 case POWER_DOMAIN_PORT_DDI_G_IO: 94 return "PORT_DDI_G_IO"; 95 case POWER_DOMAIN_PORT_DDI_H_IO: 96 return "PORT_DDI_H_IO"; 97 case POWER_DOMAIN_PORT_DDI_I_IO: 98 return "PORT_DDI_I_IO"; 99 case POWER_DOMAIN_PORT_DSI: 100 return "PORT_DSI"; 101 case POWER_DOMAIN_PORT_CRT: 102 return "PORT_CRT"; 103 case POWER_DOMAIN_PORT_OTHER: 104 return "PORT_OTHER"; 105 case POWER_DOMAIN_VGA: 106 return "VGA"; 107 case POWER_DOMAIN_AUDIO: 108 return "AUDIO"; 109 case POWER_DOMAIN_AUX_A: 110 return "AUX_A"; 111 case POWER_DOMAIN_AUX_B: 112 return "AUX_B"; 113 case POWER_DOMAIN_AUX_C: 114 return "AUX_C"; 115 case POWER_DOMAIN_AUX_D: 116 return "AUX_D"; 117 case POWER_DOMAIN_AUX_E: 118 return "AUX_E"; 119 case POWER_DOMAIN_AUX_F: 120 return "AUX_F"; 121 case POWER_DOMAIN_AUX_G: 122 return "AUX_G"; 123 case POWER_DOMAIN_AUX_H: 124 return "AUX_H"; 125 case POWER_DOMAIN_AUX_I: 126 return "AUX_I"; 127 case POWER_DOMAIN_AUX_IO_A: 128 return "AUX_IO_A"; 129 case POWER_DOMAIN_AUX_C_TBT: 130 return "AUX_C_TBT"; 131 case POWER_DOMAIN_AUX_D_TBT: 132 return "AUX_D_TBT"; 133 case POWER_DOMAIN_AUX_E_TBT: 134 return "AUX_E_TBT"; 135 case POWER_DOMAIN_AUX_F_TBT: 136 return "AUX_F_TBT"; 137 case POWER_DOMAIN_AUX_G_TBT: 138 return "AUX_G_TBT"; 139 case POWER_DOMAIN_AUX_H_TBT: 140 return "AUX_H_TBT"; 141 case POWER_DOMAIN_AUX_I_TBT: 142 return "AUX_I_TBT"; 143 case POWER_DOMAIN_GMBUS: 144 return "GMBUS"; 145 case POWER_DOMAIN_INIT: 146 return "INIT"; 147 case POWER_DOMAIN_MODESET: 148 return "MODESET"; 149 case POWER_DOMAIN_GT_IRQ: 150 return "GT_IRQ"; 151 case POWER_DOMAIN_DPLL_DC_OFF: 152 return "DPLL_DC_OFF"; 153 default: 154 MISSING_CASE(domain); 155 return "?"; 156 } 157 } 158 159 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 160 struct i915_power_well *power_well) 161 { 162 DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name); 163 power_well->desc->ops->enable(dev_priv, power_well); 164 power_well->hw_enabled = true; 165 } 166 167 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 168 struct i915_power_well *power_well) 169 { 170 DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name); 171 power_well->hw_enabled = false; 172 power_well->desc->ops->disable(dev_priv, power_well); 173 } 174 175 static void intel_power_well_get(struct drm_i915_private *dev_priv, 176 struct i915_power_well *power_well) 177 { 178 if (!power_well->count++) 179 intel_power_well_enable(dev_priv, power_well); 180 } 181 182 static void intel_power_well_put(struct drm_i915_private *dev_priv, 183 struct i915_power_well *power_well) 184 { 185 WARN(!power_well->count, "Use count on power well %s is already zero", 186 power_well->desc->name); 187 188 if (!--power_well->count) 189 intel_power_well_disable(dev_priv, power_well); 190 } 191 192 /** 193 * __intel_display_power_is_enabled - unlocked check for a power domain 194 * @dev_priv: i915 device instance 195 * @domain: power domain to check 196 * 197 * This is the unlocked version of intel_display_power_is_enabled() and should 198 * only be used from error capture and recovery code where deadlocks are 199 * possible. 200 * 201 * Returns: 202 * True when the power domain is enabled, false otherwise. 203 */ 204 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 205 enum intel_display_power_domain domain) 206 { 207 struct i915_power_well *power_well; 208 bool is_enabled; 209 210 if (dev_priv->runtime_pm.suspended) 211 return false; 212 213 is_enabled = true; 214 215 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { 216 if (power_well->desc->always_on) 217 continue; 218 219 if (!power_well->hw_enabled) { 220 is_enabled = false; 221 break; 222 } 223 } 224 225 return is_enabled; 226 } 227 228 /** 229 * intel_display_power_is_enabled - check for a power domain 230 * @dev_priv: i915 device instance 231 * @domain: power domain to check 232 * 233 * This function can be used to check the hw power domain state. It is mostly 234 * used in hardware state readout functions. Everywhere else code should rely 235 * upon explicit power domain reference counting to ensure that the hardware 236 * block is powered up before accessing it. 237 * 238 * Callers must hold the relevant modesetting locks to ensure that concurrent 239 * threads can't disable the power well while the caller tries to read a few 240 * registers. 241 * 242 * Returns: 243 * True when the power domain is enabled, false otherwise. 244 */ 245 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 246 enum intel_display_power_domain domain) 247 { 248 struct i915_power_domains *power_domains; 249 bool ret; 250 251 power_domains = &dev_priv->power_domains; 252 253 mutex_lock(&power_domains->lock); 254 ret = __intel_display_power_is_enabled(dev_priv, domain); 255 mutex_unlock(&power_domains->lock); 256 257 return ret; 258 } 259 260 /* 261 * Starting with Haswell, we have a "Power Down Well" that can be turned off 262 * when not needed anymore. We have 4 registers that can request the power well 263 * to be enabled, and it will only be disabled if none of the registers is 264 * requesting it to be enabled. 265 */ 266 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, 267 u8 irq_pipe_mask, bool has_vga) 268 { 269 if (has_vga) 270 intel_vga_reset_io_mem(dev_priv); 271 272 if (irq_pipe_mask) 273 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); 274 } 275 276 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, 277 u8 irq_pipe_mask) 278 { 279 if (irq_pipe_mask) 280 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); 281 } 282 283 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, 284 struct i915_power_well *power_well) 285 { 286 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 287 int pw_idx = power_well->desc->hsw.idx; 288 289 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 290 if (intel_de_wait_for_set(dev_priv, regs->driver, 291 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { 292 DRM_DEBUG_KMS("%s power well enable timeout\n", 293 power_well->desc->name); 294 295 /* An AUX timeout is expected if the TBT DP tunnel is down. */ 296 WARN_ON(!power_well->desc->hsw.is_tc_tbt); 297 } 298 } 299 300 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, 301 const struct i915_power_well_regs *regs, 302 int pw_idx) 303 { 304 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 305 u32 ret; 306 307 ret = I915_READ(regs->bios) & req_mask ? 1 : 0; 308 ret |= I915_READ(regs->driver) & req_mask ? 2 : 0; 309 if (regs->kvmr.reg) 310 ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0; 311 ret |= I915_READ(regs->debug) & req_mask ? 8 : 0; 312 313 return ret; 314 } 315 316 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, 317 struct i915_power_well *power_well) 318 { 319 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 320 int pw_idx = power_well->desc->hsw.idx; 321 bool disabled; 322 u32 reqs; 323 324 /* 325 * Bspec doesn't require waiting for PWs to get disabled, but still do 326 * this for paranoia. The known cases where a PW will be forced on: 327 * - a KVMR request on any power well via the KVMR request register 328 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 329 * DEBUG request registers 330 * Skip the wait in case any of the request bits are set and print a 331 * diagnostic message. 332 */ 333 wait_for((disabled = !(I915_READ(regs->driver) & 334 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 335 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); 336 if (disabled) 337 return; 338 339 DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 340 power_well->desc->name, 341 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 342 } 343 344 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, 345 enum skl_power_gate pg) 346 { 347 /* Timeout 5us for PG#0, for other PGs 1us */ 348 WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, 349 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 350 } 351 352 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 353 struct i915_power_well *power_well) 354 { 355 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 356 int pw_idx = power_well->desc->hsw.idx; 357 bool wait_fuses = power_well->desc->hsw.has_fuses; 358 enum skl_power_gate uninitialized_var(pg); 359 u32 val; 360 361 if (wait_fuses) { 362 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 363 SKL_PW_CTL_IDX_TO_PG(pw_idx); 364 /* 365 * For PW1 we have to wait both for the PW0/PG0 fuse state 366 * before enabling the power well and PW1/PG1's own fuse 367 * state after the enabling. For all other power wells with 368 * fuses we only have to wait for that PW/PG's fuse state 369 * after the enabling. 370 */ 371 if (pg == SKL_PG1) 372 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 373 } 374 375 val = I915_READ(regs->driver); 376 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 377 hsw_wait_for_power_well_enable(dev_priv, power_well); 378 379 /* Display WA #1178: cnl */ 380 if (IS_CANNONLAKE(dev_priv) && 381 pw_idx >= GLK_PW_CTL_IDX_AUX_B && 382 pw_idx <= CNL_PW_CTL_IDX_AUX_F) { 383 val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx)); 384 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS; 385 I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val); 386 } 387 388 if (wait_fuses) 389 gen9_wait_for_power_well_fuses(dev_priv, pg); 390 391 hsw_power_well_post_enable(dev_priv, 392 power_well->desc->hsw.irq_pipe_mask, 393 power_well->desc->hsw.has_vga); 394 } 395 396 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 397 struct i915_power_well *power_well) 398 { 399 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 400 int pw_idx = power_well->desc->hsw.idx; 401 u32 val; 402 403 hsw_power_well_pre_disable(dev_priv, 404 power_well->desc->hsw.irq_pipe_mask); 405 406 val = I915_READ(regs->driver); 407 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 408 hsw_wait_for_power_well_disable(dev_priv, power_well); 409 } 410 411 #define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) 412 413 static void 414 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 415 struct i915_power_well *power_well) 416 { 417 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 418 int pw_idx = power_well->desc->hsw.idx; 419 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); 420 u32 val; 421 int wa_idx_max; 422 423 val = I915_READ(regs->driver); 424 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 425 426 if (INTEL_GEN(dev_priv) < 12) { 427 val = I915_READ(ICL_PORT_CL_DW12(phy)); 428 I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX); 429 } 430 431 hsw_wait_for_power_well_enable(dev_priv, power_well); 432 433 /* Display WA #1178: icl, tgl */ 434 if (IS_TIGERLAKE(dev_priv)) 435 wa_idx_max = ICL_PW_CTL_IDX_AUX_C; 436 else 437 wa_idx_max = ICL_PW_CTL_IDX_AUX_B; 438 439 if (!IS_ELKHARTLAKE(dev_priv) && 440 pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= wa_idx_max && 441 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { 442 val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx)); 443 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; 444 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val); 445 } 446 } 447 448 static void 449 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 450 struct i915_power_well *power_well) 451 { 452 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 453 int pw_idx = power_well->desc->hsw.idx; 454 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); 455 u32 val; 456 457 if (INTEL_GEN(dev_priv) < 12) { 458 val = I915_READ(ICL_PORT_CL_DW12(phy)); 459 I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX); 460 } 461 462 val = I915_READ(regs->driver); 463 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 464 465 hsw_wait_for_power_well_disable(dev_priv, power_well); 466 } 467 468 #define ICL_AUX_PW_TO_CH(pw_idx) \ 469 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 470 471 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 472 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 473 474 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv, 475 struct i915_power_well *power_well) 476 { 477 int pw_idx = power_well->desc->hsw.idx; 478 479 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 480 ICL_AUX_PW_TO_CH(pw_idx); 481 } 482 483 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 484 485 static u64 async_put_domains_mask(struct i915_power_domains *power_domains); 486 487 static int power_well_async_ref_count(struct drm_i915_private *dev_priv, 488 struct i915_power_well *power_well) 489 { 490 int refs = hweight64(power_well->desc->domains & 491 async_put_domains_mask(&dev_priv->power_domains)); 492 493 WARN_ON(refs > power_well->count); 494 495 return refs; 496 } 497 498 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 499 struct i915_power_well *power_well) 500 { 501 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 502 struct intel_digital_port *dig_port = NULL; 503 struct intel_encoder *encoder; 504 505 /* Bypass the check if all references are released asynchronously */ 506 if (power_well_async_ref_count(dev_priv, power_well) == 507 power_well->count) 508 return; 509 510 aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 511 512 for_each_intel_encoder(&dev_priv->drm, encoder) { 513 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 514 515 if (!intel_phy_is_tc(dev_priv, phy)) 516 continue; 517 518 /* We'll check the MST primary port */ 519 if (encoder->type == INTEL_OUTPUT_DP_MST) 520 continue; 521 522 dig_port = enc_to_dig_port(&encoder->base); 523 if (WARN_ON(!dig_port)) 524 continue; 525 526 if (dig_port->aux_ch != aux_ch) { 527 dig_port = NULL; 528 continue; 529 } 530 531 break; 532 } 533 534 if (WARN_ON(!dig_port)) 535 return; 536 537 WARN_ON(!intel_tc_port_ref_held(dig_port)); 538 } 539 540 #else 541 542 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 543 struct i915_power_well *power_well) 544 { 545 } 546 547 #endif 548 549 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 550 551 static void 552 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 553 struct i915_power_well *power_well) 554 { 555 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 556 u32 val; 557 558 icl_tc_port_assert_ref_held(dev_priv, power_well); 559 560 val = I915_READ(DP_AUX_CH_CTL(aux_ch)); 561 val &= ~DP_AUX_CH_CTL_TBT_IO; 562 if (power_well->desc->hsw.is_tc_tbt) 563 val |= DP_AUX_CH_CTL_TBT_IO; 564 I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); 565 566 hsw_power_well_enable(dev_priv, power_well); 567 568 if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) { 569 enum tc_port tc_port; 570 571 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); 572 I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2)); 573 574 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), 575 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 576 DRM_WARN("Timeout waiting TC uC health\n"); 577 } 578 } 579 580 static void 581 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 582 struct i915_power_well *power_well) 583 { 584 icl_tc_port_assert_ref_held(dev_priv, power_well); 585 586 hsw_power_well_disable(dev_priv, power_well); 587 } 588 589 /* 590 * We should only use the power well if we explicitly asked the hardware to 591 * enable it, so check if it's enabled and also check if we've requested it to 592 * be enabled. 593 */ 594 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 595 struct i915_power_well *power_well) 596 { 597 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 598 enum i915_power_well_id id = power_well->desc->id; 599 int pw_idx = power_well->desc->hsw.idx; 600 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 601 HSW_PWR_WELL_CTL_STATE(pw_idx); 602 u32 val; 603 604 val = I915_READ(regs->driver); 605 606 /* 607 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 608 * and the MISC_IO PW will be not restored, so check instead for the 609 * BIOS's own request bits, which are forced-on for these power wells 610 * when exiting DC5/6. 611 */ 612 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) && 613 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 614 val |= I915_READ(regs->bios); 615 616 return (val & mask) == mask; 617 } 618 619 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 620 { 621 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 622 "DC9 already programmed to be enabled.\n"); 623 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 624 "DC5 still not disabled to enable DC9.\n"); 625 WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) & 626 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 627 "Power well 2 on.\n"); 628 WARN_ONCE(intel_irqs_enabled(dev_priv), 629 "Interrupts not disabled yet.\n"); 630 631 /* 632 * TODO: check for the following to verify the conditions to enter DC9 633 * state are satisfied: 634 * 1] Check relevant display engine registers to verify if mode set 635 * disable sequence was followed. 636 * 2] Check if display uninitialize sequence is initialized. 637 */ 638 } 639 640 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 641 { 642 WARN_ONCE(intel_irqs_enabled(dev_priv), 643 "Interrupts not disabled yet.\n"); 644 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 645 "DC5 still not disabled.\n"); 646 647 /* 648 * TODO: check for the following to verify DC9 state was indeed 649 * entered before programming to disable it: 650 * 1] Check relevant display engine registers to verify if mode 651 * set disable sequence was followed. 652 * 2] Check if display uninitialize sequence is initialized. 653 */ 654 } 655 656 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 657 u32 state) 658 { 659 int rewrites = 0; 660 int rereads = 0; 661 u32 v; 662 663 I915_WRITE(DC_STATE_EN, state); 664 665 /* It has been observed that disabling the dc6 state sometimes 666 * doesn't stick and dmc keeps returning old value. Make sure 667 * the write really sticks enough times and also force rewrite until 668 * we are confident that state is exactly what we want. 669 */ 670 do { 671 v = I915_READ(DC_STATE_EN); 672 673 if (v != state) { 674 I915_WRITE(DC_STATE_EN, state); 675 rewrites++; 676 rereads = 0; 677 } else if (rereads++ > 5) { 678 break; 679 } 680 681 } while (rewrites < 100); 682 683 if (v != state) 684 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n", 685 state, v); 686 687 /* Most of the times we need one retry, avoid spam */ 688 if (rewrites > 1) 689 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n", 690 state, rewrites); 691 } 692 693 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 694 { 695 u32 mask; 696 697 mask = DC_STATE_EN_UPTO_DC5; 698 if (INTEL_GEN(dev_priv) >= 11) 699 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 700 else if (IS_GEN9_LP(dev_priv)) 701 mask |= DC_STATE_EN_DC9; 702 else 703 mask |= DC_STATE_EN_UPTO_DC6; 704 705 return mask; 706 } 707 708 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 709 { 710 u32 val; 711 712 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv); 713 714 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n", 715 dev_priv->csr.dc_state, val); 716 dev_priv->csr.dc_state = val; 717 } 718 719 /** 720 * gen9_set_dc_state - set target display C power state 721 * @dev_priv: i915 device instance 722 * @state: target DC power state 723 * - DC_STATE_DISABLE 724 * - DC_STATE_EN_UPTO_DC5 725 * - DC_STATE_EN_UPTO_DC6 726 * - DC_STATE_EN_DC9 727 * 728 * Signal to DMC firmware/HW the target DC power state passed in @state. 729 * DMC/HW can turn off individual display clocks and power rails when entering 730 * a deeper DC power state (higher in number) and turns these back when exiting 731 * that state to a shallower power state (lower in number). The HW will decide 732 * when to actually enter a given state on an on-demand basis, for instance 733 * depending on the active state of display pipes. The state of display 734 * registers backed by affected power rails are saved/restored as needed. 735 * 736 * Based on the above enabling a deeper DC power state is asynchronous wrt. 737 * enabling it. Disabling a deeper power state is synchronous: for instance 738 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 739 * back on and register state is restored. This is guaranteed by the MMIO write 740 * to DC_STATE_EN blocking until the state is restored. 741 */ 742 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) 743 { 744 u32 val; 745 u32 mask; 746 747 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) 748 state &= dev_priv->csr.allowed_dc_mask; 749 750 val = I915_READ(DC_STATE_EN); 751 mask = gen9_dc_mask(dev_priv); 752 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", 753 val & mask, state); 754 755 /* Check if DMC is ignoring our DC state requests */ 756 if ((val & mask) != dev_priv->csr.dc_state) 757 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n", 758 dev_priv->csr.dc_state, val & mask); 759 760 val &= ~mask; 761 val |= state; 762 763 gen9_write_dc_state(dev_priv, val); 764 765 dev_priv->csr.dc_state = val & mask; 766 } 767 768 static void bxt_enable_dc9(struct drm_i915_private *dev_priv) 769 { 770 assert_can_enable_dc9(dev_priv); 771 772 DRM_DEBUG_KMS("Enabling DC9\n"); 773 /* 774 * Power sequencer reset is not needed on 775 * platforms with South Display Engine on PCH, 776 * because PPS registers are always on. 777 */ 778 if (!HAS_PCH_SPLIT(dev_priv)) 779 intel_power_sequencer_reset(dev_priv); 780 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 781 } 782 783 static void bxt_disable_dc9(struct drm_i915_private *dev_priv) 784 { 785 assert_can_disable_dc9(dev_priv); 786 787 DRM_DEBUG_KMS("Disabling DC9\n"); 788 789 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 790 791 intel_pps_unlock_regs_wa(dev_priv); 792 } 793 794 static void assert_csr_loaded(struct drm_i915_private *dev_priv) 795 { 796 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), 797 "CSR program storage start is NULL\n"); 798 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); 799 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); 800 } 801 802 static struct i915_power_well * 803 lookup_power_well(struct drm_i915_private *dev_priv, 804 enum i915_power_well_id power_well_id) 805 { 806 struct i915_power_well *power_well; 807 808 for_each_power_well(dev_priv, power_well) 809 if (power_well->desc->id == power_well_id) 810 return power_well; 811 812 /* 813 * It's not feasible to add error checking code to the callers since 814 * this condition really shouldn't happen and it doesn't even make sense 815 * to abort things like display initialization sequences. Just return 816 * the first power well and hope the WARN gets reported so we can fix 817 * our driver. 818 */ 819 WARN(1, "Power well %d not defined for this platform\n", power_well_id); 820 return &dev_priv->power_domains.power_wells[0]; 821 } 822 823 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 824 { 825 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, 826 SKL_DISP_PW_2); 827 828 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); 829 830 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), 831 "DC5 already programmed to be enabled.\n"); 832 assert_rpm_wakelock_held(&dev_priv->runtime_pm); 833 834 assert_csr_loaded(dev_priv); 835 } 836 837 static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 838 { 839 assert_can_enable_dc5(dev_priv); 840 841 DRM_DEBUG_KMS("Enabling DC5\n"); 842 843 /* Wa Display #1183: skl,kbl,cfl */ 844 if (IS_GEN9_BC(dev_priv)) 845 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | 846 SKL_SELECT_ALTERNATE_DC_EXIT); 847 848 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 849 } 850 851 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 852 { 853 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 854 "Backlight is not disabled.\n"); 855 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 856 "DC6 already programmed to be enabled.\n"); 857 858 assert_csr_loaded(dev_priv); 859 } 860 861 static void skl_enable_dc6(struct drm_i915_private *dev_priv) 862 { 863 assert_can_enable_dc6(dev_priv); 864 865 DRM_DEBUG_KMS("Enabling DC6\n"); 866 867 /* Wa Display #1183: skl,kbl,cfl */ 868 if (IS_GEN9_BC(dev_priv)) 869 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | 870 SKL_SELECT_ALTERNATE_DC_EXIT); 871 872 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 873 } 874 875 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 876 struct i915_power_well *power_well) 877 { 878 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 879 int pw_idx = power_well->desc->hsw.idx; 880 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 881 u32 bios_req = I915_READ(regs->bios); 882 883 /* Take over the request bit if set by BIOS. */ 884 if (bios_req & mask) { 885 u32 drv_req = I915_READ(regs->driver); 886 887 if (!(drv_req & mask)) 888 I915_WRITE(regs->driver, drv_req | mask); 889 I915_WRITE(regs->bios, bios_req & ~mask); 890 } 891 } 892 893 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 894 struct i915_power_well *power_well) 895 { 896 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); 897 } 898 899 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 900 struct i915_power_well *power_well) 901 { 902 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); 903 } 904 905 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 906 struct i915_power_well *power_well) 907 { 908 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); 909 } 910 911 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 912 { 913 struct i915_power_well *power_well; 914 915 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); 916 if (power_well->count > 0) 917 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 918 919 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 920 if (power_well->count > 0) 921 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 922 923 if (IS_GEMINILAKE(dev_priv)) { 924 power_well = lookup_power_well(dev_priv, 925 GLK_DISP_PW_DPIO_CMN_C); 926 if (power_well->count > 0) 927 bxt_ddi_phy_verify_state(dev_priv, 928 power_well->desc->bxt.phy); 929 } 930 } 931 932 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 933 struct i915_power_well *power_well) 934 { 935 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; 936 } 937 938 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 939 { 940 u32 tmp = I915_READ(DBUF_CTL); 941 942 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) != 943 (DBUF_POWER_STATE | DBUF_POWER_REQUEST), 944 "Unexpected DBuf power power state (0x%08x)\n", tmp); 945 } 946 947 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) 948 { 949 struct intel_cdclk_state cdclk_state = {}; 950 951 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 952 953 dev_priv->display.get_cdclk(dev_priv, &cdclk_state); 954 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 955 WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state)); 956 957 gen9_assert_dbuf_enabled(dev_priv); 958 959 if (IS_GEN9_LP(dev_priv)) 960 bxt_verify_ddi_phy_power_wells(dev_priv); 961 962 if (INTEL_GEN(dev_priv) >= 11) 963 /* 964 * DMC retains HW context only for port A, the other combo 965 * PHY's HW context for port B is lost after DC transitions, 966 * so we need to restore it manually. 967 */ 968 intel_combo_phy_init(dev_priv); 969 } 970 971 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 972 struct i915_power_well *power_well) 973 { 974 gen9_disable_dc_states(dev_priv); 975 } 976 977 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 978 struct i915_power_well *power_well) 979 { 980 if (!dev_priv->csr.dmc_payload) 981 return; 982 983 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6) 984 skl_enable_dc6(dev_priv); 985 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) 986 gen9_enable_dc5(dev_priv); 987 } 988 989 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, 990 struct i915_power_well *power_well) 991 { 992 } 993 994 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 995 struct i915_power_well *power_well) 996 { 997 } 998 999 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 1000 struct i915_power_well *power_well) 1001 { 1002 return true; 1003 } 1004 1005 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 1006 struct i915_power_well *power_well) 1007 { 1008 if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) 1009 i830_enable_pipe(dev_priv, PIPE_A); 1010 if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) 1011 i830_enable_pipe(dev_priv, PIPE_B); 1012 } 1013 1014 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, 1015 struct i915_power_well *power_well) 1016 { 1017 i830_disable_pipe(dev_priv, PIPE_B); 1018 i830_disable_pipe(dev_priv, PIPE_A); 1019 } 1020 1021 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 1022 struct i915_power_well *power_well) 1023 { 1024 return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE && 1025 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE; 1026 } 1027 1028 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, 1029 struct i915_power_well *power_well) 1030 { 1031 if (power_well->count > 0) 1032 i830_pipes_power_well_enable(dev_priv, power_well); 1033 else 1034 i830_pipes_power_well_disable(dev_priv, power_well); 1035 } 1036 1037 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 1038 struct i915_power_well *power_well, bool enable) 1039 { 1040 int pw_idx = power_well->desc->vlv.idx; 1041 u32 mask; 1042 u32 state; 1043 u32 ctrl; 1044 1045 mask = PUNIT_PWRGT_MASK(pw_idx); 1046 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1047 PUNIT_PWRGT_PWR_GATE(pw_idx); 1048 1049 vlv_punit_get(dev_priv); 1050 1051 #define COND \ 1052 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1053 1054 if (COND) 1055 goto out; 1056 1057 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 1058 ctrl &= ~mask; 1059 ctrl |= state; 1060 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1061 1062 if (wait_for(COND, 100)) 1063 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1064 state, 1065 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1066 1067 #undef COND 1068 1069 out: 1070 vlv_punit_put(dev_priv); 1071 } 1072 1073 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1074 struct i915_power_well *power_well) 1075 { 1076 vlv_set_power_well(dev_priv, power_well, true); 1077 } 1078 1079 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1080 struct i915_power_well *power_well) 1081 { 1082 vlv_set_power_well(dev_priv, power_well, false); 1083 } 1084 1085 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1086 struct i915_power_well *power_well) 1087 { 1088 int pw_idx = power_well->desc->vlv.idx; 1089 bool enabled = false; 1090 u32 mask; 1091 u32 state; 1092 u32 ctrl; 1093 1094 mask = PUNIT_PWRGT_MASK(pw_idx); 1095 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1096 1097 vlv_punit_get(dev_priv); 1098 1099 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1100 /* 1101 * We only ever set the power-on and power-gate states, anything 1102 * else is unexpected. 1103 */ 1104 WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1105 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1106 if (state == ctrl) 1107 enabled = true; 1108 1109 /* 1110 * A transient state at this point would mean some unexpected party 1111 * is poking at the power controls too. 1112 */ 1113 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1114 WARN_ON(ctrl != state); 1115 1116 vlv_punit_put(dev_priv); 1117 1118 return enabled; 1119 } 1120 1121 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1122 { 1123 u32 val; 1124 1125 /* 1126 * On driver load, a pipe may be active and driving a DSI display. 1127 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1128 * (and never recovering) in this case. intel_dsi_post_disable() will 1129 * clear it when we turn off the display. 1130 */ 1131 val = I915_READ(DSPCLK_GATE_D); 1132 val &= DPOUNIT_CLOCK_GATE_DISABLE; 1133 val |= VRHUNIT_CLOCK_GATE_DISABLE; 1134 I915_WRITE(DSPCLK_GATE_D, val); 1135 1136 /* 1137 * Disable trickle feed and enable pnd deadline calculation 1138 */ 1139 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1140 I915_WRITE(CBR1_VLV, 0); 1141 1142 WARN_ON(dev_priv->rawclk_freq == 0); 1143 1144 I915_WRITE(RAWCLK_FREQ_VLV, 1145 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000)); 1146 } 1147 1148 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1149 { 1150 struct intel_encoder *encoder; 1151 enum pipe pipe; 1152 1153 /* 1154 * Enable the CRI clock source so we can get at the 1155 * display and the reference clock for VGA 1156 * hotplug / manual detection. Supposedly DSI also 1157 * needs the ref clock up and running. 1158 * 1159 * CHV DPLL B/C have some issues if VGA mode is enabled. 1160 */ 1161 for_each_pipe(dev_priv, pipe) { 1162 u32 val = I915_READ(DPLL(pipe)); 1163 1164 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1165 if (pipe != PIPE_A) 1166 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1167 1168 I915_WRITE(DPLL(pipe), val); 1169 } 1170 1171 vlv_init_display_clock_gating(dev_priv); 1172 1173 spin_lock_irq(&dev_priv->irq_lock); 1174 valleyview_enable_display_irqs(dev_priv); 1175 spin_unlock_irq(&dev_priv->irq_lock); 1176 1177 /* 1178 * During driver initialization/resume we can avoid restoring the 1179 * part of the HW/SW state that will be inited anyway explicitly. 1180 */ 1181 if (dev_priv->power_domains.initializing) 1182 return; 1183 1184 intel_hpd_init(dev_priv); 1185 1186 /* Re-enable the ADPA, if we have one */ 1187 for_each_intel_encoder(&dev_priv->drm, encoder) { 1188 if (encoder->type == INTEL_OUTPUT_ANALOG) 1189 intel_crt_reset(&encoder->base); 1190 } 1191 1192 intel_vga_redisable_power_on(dev_priv); 1193 1194 intel_pps_unlock_regs_wa(dev_priv); 1195 } 1196 1197 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1198 { 1199 spin_lock_irq(&dev_priv->irq_lock); 1200 valleyview_disable_display_irqs(dev_priv); 1201 spin_unlock_irq(&dev_priv->irq_lock); 1202 1203 /* make sure we're done processing display irqs */ 1204 intel_synchronize_irq(dev_priv); 1205 1206 intel_power_sequencer_reset(dev_priv); 1207 1208 /* Prevent us from re-enabling polling on accident in late suspend */ 1209 if (!dev_priv->drm.dev->power.is_suspended) 1210 intel_hpd_poll_init(dev_priv); 1211 } 1212 1213 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1214 struct i915_power_well *power_well) 1215 { 1216 vlv_set_power_well(dev_priv, power_well, true); 1217 1218 vlv_display_power_well_init(dev_priv); 1219 } 1220 1221 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1222 struct i915_power_well *power_well) 1223 { 1224 vlv_display_power_well_deinit(dev_priv); 1225 1226 vlv_set_power_well(dev_priv, power_well, false); 1227 } 1228 1229 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1230 struct i915_power_well *power_well) 1231 { 1232 /* since ref/cri clock was enabled */ 1233 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1234 1235 vlv_set_power_well(dev_priv, power_well, true); 1236 1237 /* 1238 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1239 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1240 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1241 * b. The other bits such as sfr settings / modesel may all 1242 * be set to 0. 1243 * 1244 * This should only be done on init and resume from S3 with 1245 * both PLLs disabled, or we risk losing DPIO and PLL 1246 * synchronization. 1247 */ 1248 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); 1249 } 1250 1251 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1252 struct i915_power_well *power_well) 1253 { 1254 enum pipe pipe; 1255 1256 for_each_pipe(dev_priv, pipe) 1257 assert_pll_disabled(dev_priv, pipe); 1258 1259 /* Assert common reset */ 1260 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); 1261 1262 vlv_set_power_well(dev_priv, power_well, false); 1263 } 1264 1265 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 1266 1267 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1268 1269 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1270 { 1271 struct i915_power_well *cmn_bc = 1272 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1273 struct i915_power_well *cmn_d = 1274 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1275 u32 phy_control = dev_priv->chv_phy_control; 1276 u32 phy_status = 0; 1277 u32 phy_status_mask = 0xffffffff; 1278 1279 /* 1280 * The BIOS can leave the PHY is some weird state 1281 * where it doesn't fully power down some parts. 1282 * Disable the asserts until the PHY has been fully 1283 * reset (ie. the power well has been disabled at 1284 * least once). 1285 */ 1286 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1287 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1288 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1289 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1290 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1291 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1292 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1293 1294 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1295 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1296 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1297 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1298 1299 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 1300 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1301 1302 /* this assumes override is only used to enable lanes */ 1303 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1304 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1305 1306 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1307 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1308 1309 /* CL1 is on whenever anything is on in either channel */ 1310 if (BITS_SET(phy_control, 1311 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1312 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1313 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1314 1315 /* 1316 * The DPLLB check accounts for the pipe B + port A usage 1317 * with CL2 powered up but all the lanes in the second channel 1318 * powered down. 1319 */ 1320 if (BITS_SET(phy_control, 1321 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1322 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1323 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1324 1325 if (BITS_SET(phy_control, 1326 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1327 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1328 if (BITS_SET(phy_control, 1329 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1330 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1331 1332 if (BITS_SET(phy_control, 1333 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1334 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1335 if (BITS_SET(phy_control, 1336 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1337 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1338 } 1339 1340 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 1341 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1342 1343 /* this assumes override is only used to enable lanes */ 1344 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1345 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1346 1347 if (BITS_SET(phy_control, 1348 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1349 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1350 1351 if (BITS_SET(phy_control, 1352 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1353 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1354 if (BITS_SET(phy_control, 1355 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1356 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1357 } 1358 1359 phy_status &= phy_status_mask; 1360 1361 /* 1362 * The PHY may be busy with some initial calibration and whatnot, 1363 * so the power state can take a while to actually change. 1364 */ 1365 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, 1366 phy_status_mask, phy_status, 10)) 1367 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1368 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask, 1369 phy_status, dev_priv->chv_phy_control); 1370 } 1371 1372 #undef BITS_SET 1373 1374 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1375 struct i915_power_well *power_well) 1376 { 1377 enum dpio_phy phy; 1378 enum pipe pipe; 1379 u32 tmp; 1380 1381 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1382 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1383 1384 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1385 pipe = PIPE_A; 1386 phy = DPIO_PHY0; 1387 } else { 1388 pipe = PIPE_C; 1389 phy = DPIO_PHY1; 1390 } 1391 1392 /* since ref/cri clock was enabled */ 1393 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1394 vlv_set_power_well(dev_priv, power_well, true); 1395 1396 /* Poll for phypwrgood signal */ 1397 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, 1398 PHY_POWERGOOD(phy), 1)) 1399 DRM_ERROR("Display PHY %d is not power up\n", phy); 1400 1401 vlv_dpio_get(dev_priv); 1402 1403 /* Enable dynamic power down */ 1404 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1405 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1406 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1407 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1408 1409 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1410 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1411 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1412 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1413 } else { 1414 /* 1415 * Force the non-existing CL2 off. BXT does this 1416 * too, so maybe it saves some power even though 1417 * CL2 doesn't exist? 1418 */ 1419 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1420 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1421 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1422 } 1423 1424 vlv_dpio_put(dev_priv); 1425 1426 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1427 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1428 1429 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1430 phy, dev_priv->chv_phy_control); 1431 1432 assert_chv_phy_status(dev_priv); 1433 } 1434 1435 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1436 struct i915_power_well *power_well) 1437 { 1438 enum dpio_phy phy; 1439 1440 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1441 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1442 1443 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1444 phy = DPIO_PHY0; 1445 assert_pll_disabled(dev_priv, PIPE_A); 1446 assert_pll_disabled(dev_priv, PIPE_B); 1447 } else { 1448 phy = DPIO_PHY1; 1449 assert_pll_disabled(dev_priv, PIPE_C); 1450 } 1451 1452 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1453 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1454 1455 vlv_set_power_well(dev_priv, power_well, false); 1456 1457 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1458 phy, dev_priv->chv_phy_control); 1459 1460 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1461 dev_priv->chv_phy_assert[phy] = true; 1462 1463 assert_chv_phy_status(dev_priv); 1464 } 1465 1466 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1467 enum dpio_channel ch, bool override, unsigned int mask) 1468 { 1469 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1470 u32 reg, val, expected, actual; 1471 1472 /* 1473 * The BIOS can leave the PHY is some weird state 1474 * where it doesn't fully power down some parts. 1475 * Disable the asserts until the PHY has been fully 1476 * reset (ie. the power well has been disabled at 1477 * least once). 1478 */ 1479 if (!dev_priv->chv_phy_assert[phy]) 1480 return; 1481 1482 if (ch == DPIO_CH0) 1483 reg = _CHV_CMN_DW0_CH0; 1484 else 1485 reg = _CHV_CMN_DW6_CH1; 1486 1487 vlv_dpio_get(dev_priv); 1488 val = vlv_dpio_read(dev_priv, pipe, reg); 1489 vlv_dpio_put(dev_priv); 1490 1491 /* 1492 * This assumes !override is only used when the port is disabled. 1493 * All lanes should power down even without the override when 1494 * the port is disabled. 1495 */ 1496 if (!override || mask == 0xf) { 1497 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1498 /* 1499 * If CH1 common lane is not active anymore 1500 * (eg. for pipe B DPLL) the entire channel will 1501 * shut down, which causes the common lane registers 1502 * to read as 0. That means we can't actually check 1503 * the lane power down status bits, but as the entire 1504 * register reads as 0 it's a good indication that the 1505 * channel is indeed entirely powered down. 1506 */ 1507 if (ch == DPIO_CH1 && val == 0) 1508 expected = 0; 1509 } else if (mask != 0x0) { 1510 expected = DPIO_ANYDL_POWERDOWN; 1511 } else { 1512 expected = 0; 1513 } 1514 1515 if (ch == DPIO_CH0) 1516 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1517 else 1518 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1519 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1520 1521 WARN(actual != expected, 1522 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1523 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), 1524 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), 1525 reg, val); 1526 } 1527 1528 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1529 enum dpio_channel ch, bool override) 1530 { 1531 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1532 bool was_override; 1533 1534 mutex_lock(&power_domains->lock); 1535 1536 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1537 1538 if (override == was_override) 1539 goto out; 1540 1541 if (override) 1542 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1543 else 1544 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1545 1546 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1547 1548 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1549 phy, ch, dev_priv->chv_phy_control); 1550 1551 assert_chv_phy_status(dev_priv); 1552 1553 out: 1554 mutex_unlock(&power_domains->lock); 1555 1556 return was_override; 1557 } 1558 1559 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1560 bool override, unsigned int mask) 1561 { 1562 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1563 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1564 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); 1565 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); 1566 1567 mutex_lock(&power_domains->lock); 1568 1569 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1570 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1571 1572 if (override) 1573 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1574 else 1575 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1576 1577 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1578 1579 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1580 phy, ch, mask, dev_priv->chv_phy_control); 1581 1582 assert_chv_phy_status(dev_priv); 1583 1584 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1585 1586 mutex_unlock(&power_domains->lock); 1587 } 1588 1589 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1590 struct i915_power_well *power_well) 1591 { 1592 enum pipe pipe = PIPE_A; 1593 bool enabled; 1594 u32 state, ctrl; 1595 1596 vlv_punit_get(dev_priv); 1597 1598 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1599 /* 1600 * We only ever set the power-on and power-gate states, anything 1601 * else is unexpected. 1602 */ 1603 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); 1604 enabled = state == DP_SSS_PWR_ON(pipe); 1605 1606 /* 1607 * A transient state at this point would mean some unexpected party 1608 * is poking at the power controls too. 1609 */ 1610 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1611 WARN_ON(ctrl << 16 != state); 1612 1613 vlv_punit_put(dev_priv); 1614 1615 return enabled; 1616 } 1617 1618 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1619 struct i915_power_well *power_well, 1620 bool enable) 1621 { 1622 enum pipe pipe = PIPE_A; 1623 u32 state; 1624 u32 ctrl; 1625 1626 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1627 1628 vlv_punit_get(dev_priv); 1629 1630 #define COND \ 1631 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 1632 1633 if (COND) 1634 goto out; 1635 1636 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 1637 ctrl &= ~DP_SSC_MASK(pipe); 1638 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1639 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); 1640 1641 if (wait_for(COND, 100)) 1642 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1643 state, 1644 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); 1645 1646 #undef COND 1647 1648 out: 1649 vlv_punit_put(dev_priv); 1650 } 1651 1652 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1653 struct i915_power_well *power_well) 1654 { 1655 chv_set_pipe_power_well(dev_priv, power_well, true); 1656 1657 vlv_display_power_well_init(dev_priv); 1658 } 1659 1660 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1661 struct i915_power_well *power_well) 1662 { 1663 vlv_display_power_well_deinit(dev_priv); 1664 1665 chv_set_pipe_power_well(dev_priv, power_well, false); 1666 } 1667 1668 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) 1669 { 1670 return power_domains->async_put_domains[0] | 1671 power_domains->async_put_domains[1]; 1672 } 1673 1674 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 1675 1676 static bool 1677 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 1678 { 1679 return !WARN_ON(power_domains->async_put_domains[0] & 1680 power_domains->async_put_domains[1]); 1681 } 1682 1683 static bool 1684 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 1685 { 1686 enum intel_display_power_domain domain; 1687 bool err = false; 1688 1689 err |= !assert_async_put_domain_masks_disjoint(power_domains); 1690 err |= WARN_ON(!!power_domains->async_put_wakeref != 1691 !!__async_put_domains_mask(power_domains)); 1692 1693 for_each_power_domain(domain, __async_put_domains_mask(power_domains)) 1694 err |= WARN_ON(power_domains->domain_use_count[domain] != 1); 1695 1696 return !err; 1697 } 1698 1699 static void print_power_domains(struct i915_power_domains *power_domains, 1700 const char *prefix, u64 mask) 1701 { 1702 enum intel_display_power_domain domain; 1703 1704 DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask)); 1705 for_each_power_domain(domain, mask) 1706 DRM_DEBUG_DRIVER("%s use_count %d\n", 1707 intel_display_power_domain_str(domain), 1708 power_domains->domain_use_count[domain]); 1709 } 1710 1711 static void 1712 print_async_put_domains_state(struct i915_power_domains *power_domains) 1713 { 1714 DRM_DEBUG_DRIVER("async_put_wakeref %u\n", 1715 power_domains->async_put_wakeref); 1716 1717 print_power_domains(power_domains, "async_put_domains[0]", 1718 power_domains->async_put_domains[0]); 1719 print_power_domains(power_domains, "async_put_domains[1]", 1720 power_domains->async_put_domains[1]); 1721 } 1722 1723 static void 1724 verify_async_put_domains_state(struct i915_power_domains *power_domains) 1725 { 1726 if (!__async_put_domains_state_ok(power_domains)) 1727 print_async_put_domains_state(power_domains); 1728 } 1729 1730 #else 1731 1732 static void 1733 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 1734 { 1735 } 1736 1737 static void 1738 verify_async_put_domains_state(struct i915_power_domains *power_domains) 1739 { 1740 } 1741 1742 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 1743 1744 static u64 async_put_domains_mask(struct i915_power_domains *power_domains) 1745 { 1746 assert_async_put_domain_masks_disjoint(power_domains); 1747 1748 return __async_put_domains_mask(power_domains); 1749 } 1750 1751 static void 1752 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 1753 enum intel_display_power_domain domain) 1754 { 1755 assert_async_put_domain_masks_disjoint(power_domains); 1756 1757 power_domains->async_put_domains[0] &= ~BIT_ULL(domain); 1758 power_domains->async_put_domains[1] &= ~BIT_ULL(domain); 1759 } 1760 1761 static bool 1762 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 1763 enum intel_display_power_domain domain) 1764 { 1765 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1766 bool ret = false; 1767 1768 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) 1769 goto out_verify; 1770 1771 async_put_domains_clear_domain(power_domains, domain); 1772 1773 ret = true; 1774 1775 if (async_put_domains_mask(power_domains)) 1776 goto out_verify; 1777 1778 cancel_delayed_work(&power_domains->async_put_work); 1779 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 1780 fetch_and_zero(&power_domains->async_put_wakeref)); 1781 out_verify: 1782 verify_async_put_domains_state(power_domains); 1783 1784 return ret; 1785 } 1786 1787 static void 1788 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 1789 enum intel_display_power_domain domain) 1790 { 1791 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1792 struct i915_power_well *power_well; 1793 1794 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 1795 return; 1796 1797 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) 1798 intel_power_well_get(dev_priv, power_well); 1799 1800 power_domains->domain_use_count[domain]++; 1801 } 1802 1803 /** 1804 * intel_display_power_get - grab a power domain reference 1805 * @dev_priv: i915 device instance 1806 * @domain: power domain to reference 1807 * 1808 * This function grabs a power domain reference for @domain and ensures that the 1809 * power domain and all its parents are powered up. Therefore users should only 1810 * grab a reference to the innermost power domain they need. 1811 * 1812 * Any power domain reference obtained by this function must have a symmetric 1813 * call to intel_display_power_put() to release the reference again. 1814 */ 1815 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 1816 enum intel_display_power_domain domain) 1817 { 1818 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1819 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 1820 1821 mutex_lock(&power_domains->lock); 1822 __intel_display_power_get_domain(dev_priv, domain); 1823 mutex_unlock(&power_domains->lock); 1824 1825 return wakeref; 1826 } 1827 1828 /** 1829 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 1830 * @dev_priv: i915 device instance 1831 * @domain: power domain to reference 1832 * 1833 * This function grabs a power domain reference for @domain and ensures that the 1834 * power domain and all its parents are powered up. Therefore users should only 1835 * grab a reference to the innermost power domain they need. 1836 * 1837 * Any power domain reference obtained by this function must have a symmetric 1838 * call to intel_display_power_put() to release the reference again. 1839 */ 1840 intel_wakeref_t 1841 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 1842 enum intel_display_power_domain domain) 1843 { 1844 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1845 intel_wakeref_t wakeref; 1846 bool is_enabled; 1847 1848 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 1849 if (!wakeref) 1850 return false; 1851 1852 mutex_lock(&power_domains->lock); 1853 1854 if (__intel_display_power_is_enabled(dev_priv, domain)) { 1855 __intel_display_power_get_domain(dev_priv, domain); 1856 is_enabled = true; 1857 } else { 1858 is_enabled = false; 1859 } 1860 1861 mutex_unlock(&power_domains->lock); 1862 1863 if (!is_enabled) { 1864 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 1865 wakeref = 0; 1866 } 1867 1868 return wakeref; 1869 } 1870 1871 static void 1872 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 1873 enum intel_display_power_domain domain) 1874 { 1875 struct i915_power_domains *power_domains; 1876 struct i915_power_well *power_well; 1877 const char *name = intel_display_power_domain_str(domain); 1878 1879 power_domains = &dev_priv->power_domains; 1880 1881 WARN(!power_domains->domain_use_count[domain], 1882 "Use count on domain %s is already zero\n", 1883 name); 1884 WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain), 1885 "Async disabling of domain %s is pending\n", 1886 name); 1887 1888 power_domains->domain_use_count[domain]--; 1889 1890 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) 1891 intel_power_well_put(dev_priv, power_well); 1892 } 1893 1894 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 1895 enum intel_display_power_domain domain) 1896 { 1897 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1898 1899 mutex_lock(&power_domains->lock); 1900 __intel_display_power_put_domain(dev_priv, domain); 1901 mutex_unlock(&power_domains->lock); 1902 } 1903 1904 /** 1905 * intel_display_power_put_unchecked - release an unchecked power domain reference 1906 * @dev_priv: i915 device instance 1907 * @domain: power domain to reference 1908 * 1909 * This function drops the power domain reference obtained by 1910 * intel_display_power_get() and might power down the corresponding hardware 1911 * block right away if this is the last reference. 1912 * 1913 * This function exists only for historical reasons and should be avoided in 1914 * new code, as the correctness of its use cannot be checked. Always use 1915 * intel_display_power_put() instead. 1916 */ 1917 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 1918 enum intel_display_power_domain domain) 1919 { 1920 __intel_display_power_put(dev_priv, domain); 1921 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 1922 } 1923 1924 static void 1925 queue_async_put_domains_work(struct i915_power_domains *power_domains, 1926 intel_wakeref_t wakeref) 1927 { 1928 WARN_ON(power_domains->async_put_wakeref); 1929 power_domains->async_put_wakeref = wakeref; 1930 WARN_ON(!queue_delayed_work(system_unbound_wq, 1931 &power_domains->async_put_work, 1932 msecs_to_jiffies(100))); 1933 } 1934 1935 static void 1936 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) 1937 { 1938 struct drm_i915_private *dev_priv = 1939 container_of(power_domains, struct drm_i915_private, 1940 power_domains); 1941 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 1942 enum intel_display_power_domain domain; 1943 intel_wakeref_t wakeref; 1944 1945 /* 1946 * The caller must hold already raw wakeref, upgrade that to a proper 1947 * wakeref to make the state checker happy about the HW access during 1948 * power well disabling. 1949 */ 1950 assert_rpm_raw_wakeref_held(rpm); 1951 wakeref = intel_runtime_pm_get(rpm); 1952 1953 for_each_power_domain(domain, mask) { 1954 /* Clear before put, so put's sanity check is happy. */ 1955 async_put_domains_clear_domain(power_domains, domain); 1956 __intel_display_power_put_domain(dev_priv, domain); 1957 } 1958 1959 intel_runtime_pm_put(rpm, wakeref); 1960 } 1961 1962 static void 1963 intel_display_power_put_async_work(struct work_struct *work) 1964 { 1965 struct drm_i915_private *dev_priv = 1966 container_of(work, struct drm_i915_private, 1967 power_domains.async_put_work.work); 1968 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1969 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 1970 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 1971 intel_wakeref_t old_work_wakeref = 0; 1972 1973 mutex_lock(&power_domains->lock); 1974 1975 /* 1976 * Bail out if all the domain refs pending to be released were grabbed 1977 * by subsequent gets or a flush_work. 1978 */ 1979 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 1980 if (!old_work_wakeref) 1981 goto out_verify; 1982 1983 release_async_put_domains(power_domains, 1984 power_domains->async_put_domains[0]); 1985 1986 /* Requeue the work if more domains were async put meanwhile. */ 1987 if (power_domains->async_put_domains[1]) { 1988 power_domains->async_put_domains[0] = 1989 fetch_and_zero(&power_domains->async_put_domains[1]); 1990 queue_async_put_domains_work(power_domains, 1991 fetch_and_zero(&new_work_wakeref)); 1992 } 1993 1994 out_verify: 1995 verify_async_put_domains_state(power_domains); 1996 1997 mutex_unlock(&power_domains->lock); 1998 1999 if (old_work_wakeref) 2000 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 2001 if (new_work_wakeref) 2002 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 2003 } 2004 2005 /** 2006 * intel_display_power_put_async - release a power domain reference asynchronously 2007 * @i915: i915 device instance 2008 * @domain: power domain to reference 2009 * @wakeref: wakeref acquired for the reference that is being released 2010 * 2011 * This function drops the power domain reference obtained by 2012 * intel_display_power_get*() and schedules a work to power down the 2013 * corresponding hardware block if this is the last reference. 2014 */ 2015 void __intel_display_power_put_async(struct drm_i915_private *i915, 2016 enum intel_display_power_domain domain, 2017 intel_wakeref_t wakeref) 2018 { 2019 struct i915_power_domains *power_domains = &i915->power_domains; 2020 struct intel_runtime_pm *rpm = &i915->runtime_pm; 2021 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 2022 2023 mutex_lock(&power_domains->lock); 2024 2025 if (power_domains->domain_use_count[domain] > 1) { 2026 __intel_display_power_put_domain(i915, domain); 2027 2028 goto out_verify; 2029 } 2030 2031 WARN_ON(power_domains->domain_use_count[domain] != 1); 2032 2033 /* Let a pending work requeue itself or queue a new one. */ 2034 if (power_domains->async_put_wakeref) { 2035 power_domains->async_put_domains[1] |= BIT_ULL(domain); 2036 } else { 2037 power_domains->async_put_domains[0] |= BIT_ULL(domain); 2038 queue_async_put_domains_work(power_domains, 2039 fetch_and_zero(&work_wakeref)); 2040 } 2041 2042 out_verify: 2043 verify_async_put_domains_state(power_domains); 2044 2045 mutex_unlock(&power_domains->lock); 2046 2047 if (work_wakeref) 2048 intel_runtime_pm_put_raw(rpm, work_wakeref); 2049 2050 intel_runtime_pm_put(rpm, wakeref); 2051 } 2052 2053 /** 2054 * intel_display_power_flush_work - flushes the async display power disabling work 2055 * @i915: i915 device instance 2056 * 2057 * Flushes any pending work that was scheduled by a preceding 2058 * intel_display_power_put_async() call, completing the disabling of the 2059 * corresponding power domains. 2060 * 2061 * Note that the work handler function may still be running after this 2062 * function returns; to ensure that the work handler isn't running use 2063 * intel_display_power_flush_work_sync() instead. 2064 */ 2065 void intel_display_power_flush_work(struct drm_i915_private *i915) 2066 { 2067 struct i915_power_domains *power_domains = &i915->power_domains; 2068 intel_wakeref_t work_wakeref; 2069 2070 mutex_lock(&power_domains->lock); 2071 2072 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2073 if (!work_wakeref) 2074 goto out_verify; 2075 2076 release_async_put_domains(power_domains, 2077 async_put_domains_mask(power_domains)); 2078 cancel_delayed_work(&power_domains->async_put_work); 2079 2080 out_verify: 2081 verify_async_put_domains_state(power_domains); 2082 2083 mutex_unlock(&power_domains->lock); 2084 2085 if (work_wakeref) 2086 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 2087 } 2088 2089 /** 2090 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 2091 * @i915: i915 device instance 2092 * 2093 * Like intel_display_power_flush_work(), but also ensure that the work 2094 * handler function is not running any more when this function returns. 2095 */ 2096 static void 2097 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 2098 { 2099 struct i915_power_domains *power_domains = &i915->power_domains; 2100 2101 intel_display_power_flush_work(i915); 2102 cancel_delayed_work_sync(&power_domains->async_put_work); 2103 2104 verify_async_put_domains_state(power_domains); 2105 2106 WARN_ON(power_domains->async_put_wakeref); 2107 } 2108 2109 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2110 /** 2111 * intel_display_power_put - release a power domain reference 2112 * @dev_priv: i915 device instance 2113 * @domain: power domain to reference 2114 * @wakeref: wakeref acquired for the reference that is being released 2115 * 2116 * This function drops the power domain reference obtained by 2117 * intel_display_power_get() and might power down the corresponding hardware 2118 * block right away if this is the last reference. 2119 */ 2120 void intel_display_power_put(struct drm_i915_private *dev_priv, 2121 enum intel_display_power_domain domain, 2122 intel_wakeref_t wakeref) 2123 { 2124 __intel_display_power_put(dev_priv, domain); 2125 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2126 } 2127 #endif 2128 2129 #define I830_PIPES_POWER_DOMAINS ( \ 2130 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2131 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2132 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2133 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2134 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2135 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2136 BIT_ULL(POWER_DOMAIN_INIT)) 2137 2138 #define VLV_DISPLAY_POWER_DOMAINS ( \ 2139 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2140 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2141 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2142 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2143 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2144 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2145 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2146 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2147 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2148 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2149 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2150 BIT_ULL(POWER_DOMAIN_VGA) | \ 2151 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2152 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2153 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2154 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2155 BIT_ULL(POWER_DOMAIN_INIT)) 2156 2157 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2158 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2159 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2160 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2161 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2162 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2163 BIT_ULL(POWER_DOMAIN_INIT)) 2164 2165 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 2166 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2167 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2168 BIT_ULL(POWER_DOMAIN_INIT)) 2169 2170 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 2171 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2172 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2173 BIT_ULL(POWER_DOMAIN_INIT)) 2174 2175 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 2176 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2177 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2178 BIT_ULL(POWER_DOMAIN_INIT)) 2179 2180 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 2181 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2182 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2183 BIT_ULL(POWER_DOMAIN_INIT)) 2184 2185 #define CHV_DISPLAY_POWER_DOMAINS ( \ 2186 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2187 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2188 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2189 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2190 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2191 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2192 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2193 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2194 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2195 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2196 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2197 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2198 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2199 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2200 BIT_ULL(POWER_DOMAIN_VGA) | \ 2201 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2202 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2203 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2204 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2205 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2206 BIT_ULL(POWER_DOMAIN_INIT)) 2207 2208 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2209 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2210 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2211 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2212 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2213 BIT_ULL(POWER_DOMAIN_INIT)) 2214 2215 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 2216 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2217 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2218 BIT_ULL(POWER_DOMAIN_INIT)) 2219 2220 #define HSW_DISPLAY_POWER_DOMAINS ( \ 2221 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2222 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2223 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2224 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2225 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2226 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2227 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2228 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2229 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2230 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2231 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2232 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2233 BIT_ULL(POWER_DOMAIN_VGA) | \ 2234 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2235 BIT_ULL(POWER_DOMAIN_INIT)) 2236 2237 #define BDW_DISPLAY_POWER_DOMAINS ( \ 2238 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2239 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2240 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2241 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2242 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2243 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2244 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2245 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2246 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2247 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2248 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2249 BIT_ULL(POWER_DOMAIN_VGA) | \ 2250 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2251 BIT_ULL(POWER_DOMAIN_INIT)) 2252 2253 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2254 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2255 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2256 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2257 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2258 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2259 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2260 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2261 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2262 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2263 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2264 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2265 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2266 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2267 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2268 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2269 BIT_ULL(POWER_DOMAIN_VGA) | \ 2270 BIT_ULL(POWER_DOMAIN_INIT)) 2271 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ 2272 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2273 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ 2274 BIT_ULL(POWER_DOMAIN_INIT)) 2275 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2276 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2277 BIT_ULL(POWER_DOMAIN_INIT)) 2278 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2279 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2280 BIT_ULL(POWER_DOMAIN_INIT)) 2281 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ 2282 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2283 BIT_ULL(POWER_DOMAIN_INIT)) 2284 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2285 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2286 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2287 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2288 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2289 BIT_ULL(POWER_DOMAIN_INIT)) 2290 2291 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2292 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2293 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2294 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2295 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2296 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2297 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2298 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2299 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2300 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2301 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2302 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2303 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2304 BIT_ULL(POWER_DOMAIN_VGA) | \ 2305 BIT_ULL(POWER_DOMAIN_INIT)) 2306 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2307 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2308 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2309 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2310 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2311 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2312 BIT_ULL(POWER_DOMAIN_INIT)) 2313 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 2314 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2315 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2316 BIT_ULL(POWER_DOMAIN_INIT)) 2317 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 2318 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2319 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2320 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2321 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2322 BIT_ULL(POWER_DOMAIN_INIT)) 2323 2324 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2325 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2326 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2327 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2328 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2329 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2330 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2331 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2332 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2333 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2334 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2335 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2336 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2337 BIT_ULL(POWER_DOMAIN_VGA) | \ 2338 BIT_ULL(POWER_DOMAIN_INIT)) 2339 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ 2340 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2341 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2342 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2343 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2344 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2345 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ 2346 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2347 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2348 BIT_ULL(POWER_DOMAIN_INIT)) 2349 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ 2350 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2351 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2352 BIT_ULL(POWER_DOMAIN_INIT)) 2353 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ 2354 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2355 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2356 BIT_ULL(POWER_DOMAIN_INIT)) 2357 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2358 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2359 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2360 BIT_ULL(POWER_DOMAIN_INIT)) 2361 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2362 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2363 BIT_ULL(POWER_DOMAIN_INIT)) 2364 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2365 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2366 BIT_ULL(POWER_DOMAIN_INIT)) 2367 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2368 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2369 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2370 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2371 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2372 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2373 BIT_ULL(POWER_DOMAIN_INIT)) 2374 2375 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2376 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2377 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2378 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2379 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2380 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2381 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2382 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2383 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2384 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2385 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2386 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2387 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2388 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2389 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2390 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2391 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2392 BIT_ULL(POWER_DOMAIN_VGA) | \ 2393 BIT_ULL(POWER_DOMAIN_INIT)) 2394 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \ 2395 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2396 BIT_ULL(POWER_DOMAIN_INIT)) 2397 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \ 2398 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2399 BIT_ULL(POWER_DOMAIN_INIT)) 2400 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \ 2401 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2402 BIT_ULL(POWER_DOMAIN_INIT)) 2403 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \ 2404 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2405 BIT_ULL(POWER_DOMAIN_INIT)) 2406 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2407 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2408 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2409 BIT_ULL(POWER_DOMAIN_INIT)) 2410 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2411 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2412 BIT_ULL(POWER_DOMAIN_INIT)) 2413 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2414 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2415 BIT_ULL(POWER_DOMAIN_INIT)) 2416 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \ 2417 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2418 BIT_ULL(POWER_DOMAIN_INIT)) 2419 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \ 2420 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2421 BIT_ULL(POWER_DOMAIN_INIT)) 2422 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \ 2423 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ 2424 BIT_ULL(POWER_DOMAIN_INIT)) 2425 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2426 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2427 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2428 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2429 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2430 BIT_ULL(POWER_DOMAIN_INIT)) 2431 2432 /* 2433 * ICL PW_0/PG_0 domains (HW/DMC control): 2434 * - PCI 2435 * - clocks except port PLL 2436 * - central power except FBC 2437 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers 2438 * ICL PW_1/PG_1 domains (HW/DMC control): 2439 * - DBUF function 2440 * - PIPE_A and its planes, except VGA 2441 * - transcoder EDP + PSR 2442 * - transcoder DSI 2443 * - DDI_A 2444 * - FBC 2445 */ 2446 #define ICL_PW_4_POWER_DOMAINS ( \ 2447 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2448 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2449 BIT_ULL(POWER_DOMAIN_INIT)) 2450 /* VDSC/joining */ 2451 #define ICL_PW_3_POWER_DOMAINS ( \ 2452 ICL_PW_4_POWER_DOMAINS | \ 2453 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2454 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2455 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2456 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2457 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2458 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2459 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2460 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2461 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2462 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2463 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2464 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2465 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2466 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2467 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2468 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ 2469 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2470 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2471 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2472 BIT_ULL(POWER_DOMAIN_VGA) | \ 2473 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2474 BIT_ULL(POWER_DOMAIN_INIT)) 2475 /* 2476 * - transcoder WD 2477 * - KVMR (HW control) 2478 */ 2479 #define ICL_PW_2_POWER_DOMAINS ( \ 2480 ICL_PW_3_POWER_DOMAINS | \ 2481 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2482 BIT_ULL(POWER_DOMAIN_INIT)) 2483 /* 2484 * - KVMR (HW control) 2485 */ 2486 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2487 ICL_PW_2_POWER_DOMAINS | \ 2488 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2489 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2490 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \ 2491 BIT_ULL(POWER_DOMAIN_INIT)) 2492 2493 #define ICL_DDI_IO_A_POWER_DOMAINS ( \ 2494 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2495 #define ICL_DDI_IO_B_POWER_DOMAINS ( \ 2496 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2497 #define ICL_DDI_IO_C_POWER_DOMAINS ( \ 2498 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2499 #define ICL_DDI_IO_D_POWER_DOMAINS ( \ 2500 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2501 #define ICL_DDI_IO_E_POWER_DOMAINS ( \ 2502 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2503 #define ICL_DDI_IO_F_POWER_DOMAINS ( \ 2504 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2505 2506 #define ICL_AUX_A_IO_POWER_DOMAINS ( \ 2507 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2508 BIT_ULL(POWER_DOMAIN_AUX_A)) 2509 #define ICL_AUX_B_IO_POWER_DOMAINS ( \ 2510 BIT_ULL(POWER_DOMAIN_AUX_B)) 2511 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ 2512 BIT_ULL(POWER_DOMAIN_AUX_C)) 2513 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ 2514 BIT_ULL(POWER_DOMAIN_AUX_D)) 2515 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ 2516 BIT_ULL(POWER_DOMAIN_AUX_E)) 2517 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ 2518 BIT_ULL(POWER_DOMAIN_AUX_F)) 2519 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ 2520 BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) 2521 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ 2522 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2523 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ 2524 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2525 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ 2526 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2527 2528 #define TGL_PW_5_POWER_DOMAINS ( \ 2529 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 2530 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 2531 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 2532 BIT_ULL(POWER_DOMAIN_INIT)) 2533 2534 #define TGL_PW_4_POWER_DOMAINS ( \ 2535 TGL_PW_5_POWER_DOMAINS | \ 2536 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2537 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2538 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2539 BIT_ULL(POWER_DOMAIN_INIT)) 2540 2541 #define TGL_PW_3_POWER_DOMAINS ( \ 2542 TGL_PW_4_POWER_DOMAINS | \ 2543 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2544 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2545 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2546 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2547 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2548 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2549 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \ 2550 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \ 2551 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \ 2552 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2553 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2554 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2555 BIT_ULL(POWER_DOMAIN_AUX_G) | \ 2556 BIT_ULL(POWER_DOMAIN_AUX_H) | \ 2557 BIT_ULL(POWER_DOMAIN_AUX_I) | \ 2558 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2559 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2560 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2561 BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \ 2562 BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \ 2563 BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \ 2564 BIT_ULL(POWER_DOMAIN_VGA) | \ 2565 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2566 BIT_ULL(POWER_DOMAIN_INIT)) 2567 2568 #define TGL_PW_2_POWER_DOMAINS ( \ 2569 TGL_PW_3_POWER_DOMAINS | \ 2570 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2571 BIT_ULL(POWER_DOMAIN_INIT)) 2572 2573 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2574 TGL_PW_2_POWER_DOMAINS | \ 2575 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2576 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2577 BIT_ULL(POWER_DOMAIN_INIT)) 2578 2579 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \ 2580 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2581 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \ 2582 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2583 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \ 2584 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2585 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \ 2586 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO)) 2587 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \ 2588 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO)) 2589 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \ 2590 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO)) 2591 2592 #define TGL_AUX_A_IO_POWER_DOMAINS ( \ 2593 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2594 BIT_ULL(POWER_DOMAIN_AUX_A)) 2595 #define TGL_AUX_B_IO_POWER_DOMAINS ( \ 2596 BIT_ULL(POWER_DOMAIN_AUX_B)) 2597 #define TGL_AUX_C_IO_POWER_DOMAINS ( \ 2598 BIT_ULL(POWER_DOMAIN_AUX_C)) 2599 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \ 2600 BIT_ULL(POWER_DOMAIN_AUX_D)) 2601 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \ 2602 BIT_ULL(POWER_DOMAIN_AUX_E)) 2603 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \ 2604 BIT_ULL(POWER_DOMAIN_AUX_F)) 2605 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \ 2606 BIT_ULL(POWER_DOMAIN_AUX_G)) 2607 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \ 2608 BIT_ULL(POWER_DOMAIN_AUX_H)) 2609 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \ 2610 BIT_ULL(POWER_DOMAIN_AUX_I)) 2611 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \ 2612 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2613 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \ 2614 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2615 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \ 2616 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2617 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \ 2618 BIT_ULL(POWER_DOMAIN_AUX_G_TBT)) 2619 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \ 2620 BIT_ULL(POWER_DOMAIN_AUX_H_TBT)) 2621 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \ 2622 BIT_ULL(POWER_DOMAIN_AUX_I_TBT)) 2623 2624 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 2625 .sync_hw = i9xx_power_well_sync_hw_noop, 2626 .enable = i9xx_always_on_power_well_noop, 2627 .disable = i9xx_always_on_power_well_noop, 2628 .is_enabled = i9xx_always_on_power_well_enabled, 2629 }; 2630 2631 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 2632 .sync_hw = i9xx_power_well_sync_hw_noop, 2633 .enable = chv_pipe_power_well_enable, 2634 .disable = chv_pipe_power_well_disable, 2635 .is_enabled = chv_pipe_power_well_enabled, 2636 }; 2637 2638 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 2639 .sync_hw = i9xx_power_well_sync_hw_noop, 2640 .enable = chv_dpio_cmn_power_well_enable, 2641 .disable = chv_dpio_cmn_power_well_disable, 2642 .is_enabled = vlv_power_well_enabled, 2643 }; 2644 2645 static const struct i915_power_well_desc i9xx_always_on_power_well[] = { 2646 { 2647 .name = "always-on", 2648 .always_on = true, 2649 .domains = POWER_DOMAIN_MASK, 2650 .ops = &i9xx_always_on_power_well_ops, 2651 .id = DISP_PW_ID_NONE, 2652 }, 2653 }; 2654 2655 static const struct i915_power_well_ops i830_pipes_power_well_ops = { 2656 .sync_hw = i830_pipes_power_well_sync_hw, 2657 .enable = i830_pipes_power_well_enable, 2658 .disable = i830_pipes_power_well_disable, 2659 .is_enabled = i830_pipes_power_well_enabled, 2660 }; 2661 2662 static const struct i915_power_well_desc i830_power_wells[] = { 2663 { 2664 .name = "always-on", 2665 .always_on = true, 2666 .domains = POWER_DOMAIN_MASK, 2667 .ops = &i9xx_always_on_power_well_ops, 2668 .id = DISP_PW_ID_NONE, 2669 }, 2670 { 2671 .name = "pipes", 2672 .domains = I830_PIPES_POWER_DOMAINS, 2673 .ops = &i830_pipes_power_well_ops, 2674 .id = DISP_PW_ID_NONE, 2675 }, 2676 }; 2677 2678 static const struct i915_power_well_ops hsw_power_well_ops = { 2679 .sync_hw = hsw_power_well_sync_hw, 2680 .enable = hsw_power_well_enable, 2681 .disable = hsw_power_well_disable, 2682 .is_enabled = hsw_power_well_enabled, 2683 }; 2684 2685 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 2686 .sync_hw = i9xx_power_well_sync_hw_noop, 2687 .enable = gen9_dc_off_power_well_enable, 2688 .disable = gen9_dc_off_power_well_disable, 2689 .is_enabled = gen9_dc_off_power_well_enabled, 2690 }; 2691 2692 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 2693 .sync_hw = i9xx_power_well_sync_hw_noop, 2694 .enable = bxt_dpio_cmn_power_well_enable, 2695 .disable = bxt_dpio_cmn_power_well_disable, 2696 .is_enabled = bxt_dpio_cmn_power_well_enabled, 2697 }; 2698 2699 static const struct i915_power_well_regs hsw_power_well_regs = { 2700 .bios = HSW_PWR_WELL_CTL1, 2701 .driver = HSW_PWR_WELL_CTL2, 2702 .kvmr = HSW_PWR_WELL_CTL3, 2703 .debug = HSW_PWR_WELL_CTL4, 2704 }; 2705 2706 static const struct i915_power_well_desc hsw_power_wells[] = { 2707 { 2708 .name = "always-on", 2709 .always_on = true, 2710 .domains = POWER_DOMAIN_MASK, 2711 .ops = &i9xx_always_on_power_well_ops, 2712 .id = DISP_PW_ID_NONE, 2713 }, 2714 { 2715 .name = "display", 2716 .domains = HSW_DISPLAY_POWER_DOMAINS, 2717 .ops = &hsw_power_well_ops, 2718 .id = HSW_DISP_PW_GLOBAL, 2719 { 2720 .hsw.regs = &hsw_power_well_regs, 2721 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 2722 .hsw.has_vga = true, 2723 }, 2724 }, 2725 }; 2726 2727 static const struct i915_power_well_desc bdw_power_wells[] = { 2728 { 2729 .name = "always-on", 2730 .always_on = true, 2731 .domains = POWER_DOMAIN_MASK, 2732 .ops = &i9xx_always_on_power_well_ops, 2733 .id = DISP_PW_ID_NONE, 2734 }, 2735 { 2736 .name = "display", 2737 .domains = BDW_DISPLAY_POWER_DOMAINS, 2738 .ops = &hsw_power_well_ops, 2739 .id = HSW_DISP_PW_GLOBAL, 2740 { 2741 .hsw.regs = &hsw_power_well_regs, 2742 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 2743 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 2744 .hsw.has_vga = true, 2745 }, 2746 }, 2747 }; 2748 2749 static const struct i915_power_well_ops vlv_display_power_well_ops = { 2750 .sync_hw = i9xx_power_well_sync_hw_noop, 2751 .enable = vlv_display_power_well_enable, 2752 .disable = vlv_display_power_well_disable, 2753 .is_enabled = vlv_power_well_enabled, 2754 }; 2755 2756 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 2757 .sync_hw = i9xx_power_well_sync_hw_noop, 2758 .enable = vlv_dpio_cmn_power_well_enable, 2759 .disable = vlv_dpio_cmn_power_well_disable, 2760 .is_enabled = vlv_power_well_enabled, 2761 }; 2762 2763 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 2764 .sync_hw = i9xx_power_well_sync_hw_noop, 2765 .enable = vlv_power_well_enable, 2766 .disable = vlv_power_well_disable, 2767 .is_enabled = vlv_power_well_enabled, 2768 }; 2769 2770 static const struct i915_power_well_desc vlv_power_wells[] = { 2771 { 2772 .name = "always-on", 2773 .always_on = true, 2774 .domains = POWER_DOMAIN_MASK, 2775 .ops = &i9xx_always_on_power_well_ops, 2776 .id = DISP_PW_ID_NONE, 2777 }, 2778 { 2779 .name = "display", 2780 .domains = VLV_DISPLAY_POWER_DOMAINS, 2781 .ops = &vlv_display_power_well_ops, 2782 .id = VLV_DISP_PW_DISP2D, 2783 { 2784 .vlv.idx = PUNIT_PWGT_IDX_DISP2D, 2785 }, 2786 }, 2787 { 2788 .name = "dpio-tx-b-01", 2789 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2790 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2791 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2792 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2793 .ops = &vlv_dpio_power_well_ops, 2794 .id = DISP_PW_ID_NONE, 2795 { 2796 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, 2797 }, 2798 }, 2799 { 2800 .name = "dpio-tx-b-23", 2801 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2802 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2803 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2804 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2805 .ops = &vlv_dpio_power_well_ops, 2806 .id = DISP_PW_ID_NONE, 2807 { 2808 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, 2809 }, 2810 }, 2811 { 2812 .name = "dpio-tx-c-01", 2813 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2814 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2815 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2816 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2817 .ops = &vlv_dpio_power_well_ops, 2818 .id = DISP_PW_ID_NONE, 2819 { 2820 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, 2821 }, 2822 }, 2823 { 2824 .name = "dpio-tx-c-23", 2825 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2826 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2827 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2828 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2829 .ops = &vlv_dpio_power_well_ops, 2830 .id = DISP_PW_ID_NONE, 2831 { 2832 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, 2833 }, 2834 }, 2835 { 2836 .name = "dpio-common", 2837 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 2838 .ops = &vlv_dpio_cmn_power_well_ops, 2839 .id = VLV_DISP_PW_DPIO_CMN_BC, 2840 { 2841 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 2842 }, 2843 }, 2844 }; 2845 2846 static const struct i915_power_well_desc chv_power_wells[] = { 2847 { 2848 .name = "always-on", 2849 .always_on = true, 2850 .domains = POWER_DOMAIN_MASK, 2851 .ops = &i9xx_always_on_power_well_ops, 2852 .id = DISP_PW_ID_NONE, 2853 }, 2854 { 2855 .name = "display", 2856 /* 2857 * Pipe A power well is the new disp2d well. Pipe B and C 2858 * power wells don't actually exist. Pipe A power well is 2859 * required for any pipe to work. 2860 */ 2861 .domains = CHV_DISPLAY_POWER_DOMAINS, 2862 .ops = &chv_pipe_power_well_ops, 2863 .id = DISP_PW_ID_NONE, 2864 }, 2865 { 2866 .name = "dpio-common-bc", 2867 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 2868 .ops = &chv_dpio_cmn_power_well_ops, 2869 .id = VLV_DISP_PW_DPIO_CMN_BC, 2870 { 2871 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 2872 }, 2873 }, 2874 { 2875 .name = "dpio-common-d", 2876 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 2877 .ops = &chv_dpio_cmn_power_well_ops, 2878 .id = CHV_DISP_PW_DPIO_CMN_D, 2879 { 2880 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, 2881 }, 2882 }, 2883 }; 2884 2885 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 2886 enum i915_power_well_id power_well_id) 2887 { 2888 struct i915_power_well *power_well; 2889 bool ret; 2890 2891 power_well = lookup_power_well(dev_priv, power_well_id); 2892 ret = power_well->desc->ops->is_enabled(dev_priv, power_well); 2893 2894 return ret; 2895 } 2896 2897 static const struct i915_power_well_desc skl_power_wells[] = { 2898 { 2899 .name = "always-on", 2900 .always_on = true, 2901 .domains = POWER_DOMAIN_MASK, 2902 .ops = &i9xx_always_on_power_well_ops, 2903 .id = DISP_PW_ID_NONE, 2904 }, 2905 { 2906 .name = "power well 1", 2907 /* Handled by the DMC firmware */ 2908 .always_on = true, 2909 .domains = 0, 2910 .ops = &hsw_power_well_ops, 2911 .id = SKL_DISP_PW_1, 2912 { 2913 .hsw.regs = &hsw_power_well_regs, 2914 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 2915 .hsw.has_fuses = true, 2916 }, 2917 }, 2918 { 2919 .name = "MISC IO power well", 2920 /* Handled by the DMC firmware */ 2921 .always_on = true, 2922 .domains = 0, 2923 .ops = &hsw_power_well_ops, 2924 .id = SKL_DISP_PW_MISC_IO, 2925 { 2926 .hsw.regs = &hsw_power_well_regs, 2927 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, 2928 }, 2929 }, 2930 { 2931 .name = "DC off", 2932 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 2933 .ops = &gen9_dc_off_power_well_ops, 2934 .id = DISP_PW_ID_NONE, 2935 }, 2936 { 2937 .name = "power well 2", 2938 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 2939 .ops = &hsw_power_well_ops, 2940 .id = SKL_DISP_PW_2, 2941 { 2942 .hsw.regs = &hsw_power_well_regs, 2943 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 2944 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 2945 .hsw.has_vga = true, 2946 .hsw.has_fuses = true, 2947 }, 2948 }, 2949 { 2950 .name = "DDI A/E IO power well", 2951 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, 2952 .ops = &hsw_power_well_ops, 2953 .id = DISP_PW_ID_NONE, 2954 { 2955 .hsw.regs = &hsw_power_well_regs, 2956 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, 2957 }, 2958 }, 2959 { 2960 .name = "DDI B IO power well", 2961 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, 2962 .ops = &hsw_power_well_ops, 2963 .id = DISP_PW_ID_NONE, 2964 { 2965 .hsw.regs = &hsw_power_well_regs, 2966 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 2967 }, 2968 }, 2969 { 2970 .name = "DDI C IO power well", 2971 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, 2972 .ops = &hsw_power_well_ops, 2973 .id = DISP_PW_ID_NONE, 2974 { 2975 .hsw.regs = &hsw_power_well_regs, 2976 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 2977 }, 2978 }, 2979 { 2980 .name = "DDI D IO power well", 2981 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, 2982 .ops = &hsw_power_well_ops, 2983 .id = DISP_PW_ID_NONE, 2984 { 2985 .hsw.regs = &hsw_power_well_regs, 2986 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 2987 }, 2988 }, 2989 }; 2990 2991 static const struct i915_power_well_desc bxt_power_wells[] = { 2992 { 2993 .name = "always-on", 2994 .always_on = true, 2995 .domains = POWER_DOMAIN_MASK, 2996 .ops = &i9xx_always_on_power_well_ops, 2997 .id = DISP_PW_ID_NONE, 2998 }, 2999 { 3000 .name = "power well 1", 3001 /* Handled by the DMC firmware */ 3002 .always_on = true, 3003 .domains = 0, 3004 .ops = &hsw_power_well_ops, 3005 .id = SKL_DISP_PW_1, 3006 { 3007 .hsw.regs = &hsw_power_well_regs, 3008 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3009 .hsw.has_fuses = true, 3010 }, 3011 }, 3012 { 3013 .name = "DC off", 3014 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 3015 .ops = &gen9_dc_off_power_well_ops, 3016 .id = DISP_PW_ID_NONE, 3017 }, 3018 { 3019 .name = "power well 2", 3020 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3021 .ops = &hsw_power_well_ops, 3022 .id = SKL_DISP_PW_2, 3023 { 3024 .hsw.regs = &hsw_power_well_regs, 3025 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3026 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3027 .hsw.has_vga = true, 3028 .hsw.has_fuses = true, 3029 }, 3030 }, 3031 { 3032 .name = "dpio-common-a", 3033 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 3034 .ops = &bxt_dpio_cmn_power_well_ops, 3035 .id = BXT_DISP_PW_DPIO_CMN_A, 3036 { 3037 .bxt.phy = DPIO_PHY1, 3038 }, 3039 }, 3040 { 3041 .name = "dpio-common-bc", 3042 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 3043 .ops = &bxt_dpio_cmn_power_well_ops, 3044 .id = VLV_DISP_PW_DPIO_CMN_BC, 3045 { 3046 .bxt.phy = DPIO_PHY0, 3047 }, 3048 }, 3049 }; 3050 3051 static const struct i915_power_well_desc glk_power_wells[] = { 3052 { 3053 .name = "always-on", 3054 .always_on = true, 3055 .domains = POWER_DOMAIN_MASK, 3056 .ops = &i9xx_always_on_power_well_ops, 3057 .id = DISP_PW_ID_NONE, 3058 }, 3059 { 3060 .name = "power well 1", 3061 /* Handled by the DMC firmware */ 3062 .always_on = true, 3063 .domains = 0, 3064 .ops = &hsw_power_well_ops, 3065 .id = SKL_DISP_PW_1, 3066 { 3067 .hsw.regs = &hsw_power_well_regs, 3068 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3069 .hsw.has_fuses = true, 3070 }, 3071 }, 3072 { 3073 .name = "DC off", 3074 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, 3075 .ops = &gen9_dc_off_power_well_ops, 3076 .id = DISP_PW_ID_NONE, 3077 }, 3078 { 3079 .name = "power well 2", 3080 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3081 .ops = &hsw_power_well_ops, 3082 .id = SKL_DISP_PW_2, 3083 { 3084 .hsw.regs = &hsw_power_well_regs, 3085 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3086 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3087 .hsw.has_vga = true, 3088 .hsw.has_fuses = true, 3089 }, 3090 }, 3091 { 3092 .name = "dpio-common-a", 3093 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, 3094 .ops = &bxt_dpio_cmn_power_well_ops, 3095 .id = BXT_DISP_PW_DPIO_CMN_A, 3096 { 3097 .bxt.phy = DPIO_PHY1, 3098 }, 3099 }, 3100 { 3101 .name = "dpio-common-b", 3102 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, 3103 .ops = &bxt_dpio_cmn_power_well_ops, 3104 .id = VLV_DISP_PW_DPIO_CMN_BC, 3105 { 3106 .bxt.phy = DPIO_PHY0, 3107 }, 3108 }, 3109 { 3110 .name = "dpio-common-c", 3111 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, 3112 .ops = &bxt_dpio_cmn_power_well_ops, 3113 .id = GLK_DISP_PW_DPIO_CMN_C, 3114 { 3115 .bxt.phy = DPIO_PHY2, 3116 }, 3117 }, 3118 { 3119 .name = "AUX A", 3120 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, 3121 .ops = &hsw_power_well_ops, 3122 .id = DISP_PW_ID_NONE, 3123 { 3124 .hsw.regs = &hsw_power_well_regs, 3125 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3126 }, 3127 }, 3128 { 3129 .name = "AUX B", 3130 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, 3131 .ops = &hsw_power_well_ops, 3132 .id = DISP_PW_ID_NONE, 3133 { 3134 .hsw.regs = &hsw_power_well_regs, 3135 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3136 }, 3137 }, 3138 { 3139 .name = "AUX C", 3140 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, 3141 .ops = &hsw_power_well_ops, 3142 .id = DISP_PW_ID_NONE, 3143 { 3144 .hsw.regs = &hsw_power_well_regs, 3145 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3146 }, 3147 }, 3148 { 3149 .name = "DDI A IO power well", 3150 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, 3151 .ops = &hsw_power_well_ops, 3152 .id = DISP_PW_ID_NONE, 3153 { 3154 .hsw.regs = &hsw_power_well_regs, 3155 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3156 }, 3157 }, 3158 { 3159 .name = "DDI B IO power well", 3160 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3161 .ops = &hsw_power_well_ops, 3162 .id = DISP_PW_ID_NONE, 3163 { 3164 .hsw.regs = &hsw_power_well_regs, 3165 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3166 }, 3167 }, 3168 { 3169 .name = "DDI C IO power well", 3170 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3171 .ops = &hsw_power_well_ops, 3172 .id = DISP_PW_ID_NONE, 3173 { 3174 .hsw.regs = &hsw_power_well_regs, 3175 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3176 }, 3177 }, 3178 }; 3179 3180 static const struct i915_power_well_desc cnl_power_wells[] = { 3181 { 3182 .name = "always-on", 3183 .always_on = true, 3184 .domains = POWER_DOMAIN_MASK, 3185 .ops = &i9xx_always_on_power_well_ops, 3186 .id = DISP_PW_ID_NONE, 3187 }, 3188 { 3189 .name = "power well 1", 3190 /* Handled by the DMC firmware */ 3191 .always_on = true, 3192 .domains = 0, 3193 .ops = &hsw_power_well_ops, 3194 .id = SKL_DISP_PW_1, 3195 { 3196 .hsw.regs = &hsw_power_well_regs, 3197 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3198 .hsw.has_fuses = true, 3199 }, 3200 }, 3201 { 3202 .name = "AUX A", 3203 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS, 3204 .ops = &hsw_power_well_ops, 3205 .id = DISP_PW_ID_NONE, 3206 { 3207 .hsw.regs = &hsw_power_well_regs, 3208 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3209 }, 3210 }, 3211 { 3212 .name = "AUX B", 3213 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS, 3214 .ops = &hsw_power_well_ops, 3215 .id = DISP_PW_ID_NONE, 3216 { 3217 .hsw.regs = &hsw_power_well_regs, 3218 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3219 }, 3220 }, 3221 { 3222 .name = "AUX C", 3223 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS, 3224 .ops = &hsw_power_well_ops, 3225 .id = DISP_PW_ID_NONE, 3226 { 3227 .hsw.regs = &hsw_power_well_regs, 3228 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3229 }, 3230 }, 3231 { 3232 .name = "AUX D", 3233 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS, 3234 .ops = &hsw_power_well_ops, 3235 .id = DISP_PW_ID_NONE, 3236 { 3237 .hsw.regs = &hsw_power_well_regs, 3238 .hsw.idx = CNL_PW_CTL_IDX_AUX_D, 3239 }, 3240 }, 3241 { 3242 .name = "DC off", 3243 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, 3244 .ops = &gen9_dc_off_power_well_ops, 3245 .id = DISP_PW_ID_NONE, 3246 }, 3247 { 3248 .name = "power well 2", 3249 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3250 .ops = &hsw_power_well_ops, 3251 .id = SKL_DISP_PW_2, 3252 { 3253 .hsw.regs = &hsw_power_well_regs, 3254 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3255 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3256 .hsw.has_vga = true, 3257 .hsw.has_fuses = true, 3258 }, 3259 }, 3260 { 3261 .name = "DDI A IO power well", 3262 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS, 3263 .ops = &hsw_power_well_ops, 3264 .id = DISP_PW_ID_NONE, 3265 { 3266 .hsw.regs = &hsw_power_well_regs, 3267 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3268 }, 3269 }, 3270 { 3271 .name = "DDI B IO power well", 3272 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS, 3273 .ops = &hsw_power_well_ops, 3274 .id = DISP_PW_ID_NONE, 3275 { 3276 .hsw.regs = &hsw_power_well_regs, 3277 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3278 }, 3279 }, 3280 { 3281 .name = "DDI C IO power well", 3282 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS, 3283 .ops = &hsw_power_well_ops, 3284 .id = DISP_PW_ID_NONE, 3285 { 3286 .hsw.regs = &hsw_power_well_regs, 3287 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3288 }, 3289 }, 3290 { 3291 .name = "DDI D IO power well", 3292 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS, 3293 .ops = &hsw_power_well_ops, 3294 .id = DISP_PW_ID_NONE, 3295 { 3296 .hsw.regs = &hsw_power_well_regs, 3297 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3298 }, 3299 }, 3300 { 3301 .name = "DDI F IO power well", 3302 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS, 3303 .ops = &hsw_power_well_ops, 3304 .id = DISP_PW_ID_NONE, 3305 { 3306 .hsw.regs = &hsw_power_well_regs, 3307 .hsw.idx = CNL_PW_CTL_IDX_DDI_F, 3308 }, 3309 }, 3310 { 3311 .name = "AUX F", 3312 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS, 3313 .ops = &hsw_power_well_ops, 3314 .id = DISP_PW_ID_NONE, 3315 { 3316 .hsw.regs = &hsw_power_well_regs, 3317 .hsw.idx = CNL_PW_CTL_IDX_AUX_F, 3318 }, 3319 }, 3320 }; 3321 3322 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { 3323 .sync_hw = hsw_power_well_sync_hw, 3324 .enable = icl_combo_phy_aux_power_well_enable, 3325 .disable = icl_combo_phy_aux_power_well_disable, 3326 .is_enabled = hsw_power_well_enabled, 3327 }; 3328 3329 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = { 3330 .sync_hw = hsw_power_well_sync_hw, 3331 .enable = icl_tc_phy_aux_power_well_enable, 3332 .disable = icl_tc_phy_aux_power_well_disable, 3333 .is_enabled = hsw_power_well_enabled, 3334 }; 3335 3336 static const struct i915_power_well_regs icl_aux_power_well_regs = { 3337 .bios = ICL_PWR_WELL_CTL_AUX1, 3338 .driver = ICL_PWR_WELL_CTL_AUX2, 3339 .debug = ICL_PWR_WELL_CTL_AUX4, 3340 }; 3341 3342 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 3343 .bios = ICL_PWR_WELL_CTL_DDI1, 3344 .driver = ICL_PWR_WELL_CTL_DDI2, 3345 .debug = ICL_PWR_WELL_CTL_DDI4, 3346 }; 3347 3348 static const struct i915_power_well_desc icl_power_wells[] = { 3349 { 3350 .name = "always-on", 3351 .always_on = true, 3352 .domains = POWER_DOMAIN_MASK, 3353 .ops = &i9xx_always_on_power_well_ops, 3354 .id = DISP_PW_ID_NONE, 3355 }, 3356 { 3357 .name = "power well 1", 3358 /* Handled by the DMC firmware */ 3359 .always_on = true, 3360 .domains = 0, 3361 .ops = &hsw_power_well_ops, 3362 .id = SKL_DISP_PW_1, 3363 { 3364 .hsw.regs = &hsw_power_well_regs, 3365 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3366 .hsw.has_fuses = true, 3367 }, 3368 }, 3369 { 3370 .name = "DC off", 3371 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, 3372 .ops = &gen9_dc_off_power_well_ops, 3373 .id = DISP_PW_ID_NONE, 3374 }, 3375 { 3376 .name = "power well 2", 3377 .domains = ICL_PW_2_POWER_DOMAINS, 3378 .ops = &hsw_power_well_ops, 3379 .id = SKL_DISP_PW_2, 3380 { 3381 .hsw.regs = &hsw_power_well_regs, 3382 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3383 .hsw.has_fuses = true, 3384 }, 3385 }, 3386 { 3387 .name = "power well 3", 3388 .domains = ICL_PW_3_POWER_DOMAINS, 3389 .ops = &hsw_power_well_ops, 3390 .id = DISP_PW_ID_NONE, 3391 { 3392 .hsw.regs = &hsw_power_well_regs, 3393 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3394 .hsw.irq_pipe_mask = BIT(PIPE_B), 3395 .hsw.has_vga = true, 3396 .hsw.has_fuses = true, 3397 }, 3398 }, 3399 { 3400 .name = "DDI A IO", 3401 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3402 .ops = &hsw_power_well_ops, 3403 .id = DISP_PW_ID_NONE, 3404 { 3405 .hsw.regs = &icl_ddi_power_well_regs, 3406 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3407 }, 3408 }, 3409 { 3410 .name = "DDI B IO", 3411 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3412 .ops = &hsw_power_well_ops, 3413 .id = DISP_PW_ID_NONE, 3414 { 3415 .hsw.regs = &icl_ddi_power_well_regs, 3416 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3417 }, 3418 }, 3419 { 3420 .name = "DDI C IO", 3421 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3422 .ops = &hsw_power_well_ops, 3423 .id = DISP_PW_ID_NONE, 3424 { 3425 .hsw.regs = &icl_ddi_power_well_regs, 3426 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3427 }, 3428 }, 3429 { 3430 .name = "DDI D IO", 3431 .domains = ICL_DDI_IO_D_POWER_DOMAINS, 3432 .ops = &hsw_power_well_ops, 3433 .id = DISP_PW_ID_NONE, 3434 { 3435 .hsw.regs = &icl_ddi_power_well_regs, 3436 .hsw.idx = ICL_PW_CTL_IDX_DDI_D, 3437 }, 3438 }, 3439 { 3440 .name = "DDI E IO", 3441 .domains = ICL_DDI_IO_E_POWER_DOMAINS, 3442 .ops = &hsw_power_well_ops, 3443 .id = DISP_PW_ID_NONE, 3444 { 3445 .hsw.regs = &icl_ddi_power_well_regs, 3446 .hsw.idx = ICL_PW_CTL_IDX_DDI_E, 3447 }, 3448 }, 3449 { 3450 .name = "DDI F IO", 3451 .domains = ICL_DDI_IO_F_POWER_DOMAINS, 3452 .ops = &hsw_power_well_ops, 3453 .id = DISP_PW_ID_NONE, 3454 { 3455 .hsw.regs = &icl_ddi_power_well_regs, 3456 .hsw.idx = ICL_PW_CTL_IDX_DDI_F, 3457 }, 3458 }, 3459 { 3460 .name = "AUX A", 3461 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 3462 .ops = &icl_combo_phy_aux_power_well_ops, 3463 .id = DISP_PW_ID_NONE, 3464 { 3465 .hsw.regs = &icl_aux_power_well_regs, 3466 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3467 }, 3468 }, 3469 { 3470 .name = "AUX B", 3471 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 3472 .ops = &icl_combo_phy_aux_power_well_ops, 3473 .id = DISP_PW_ID_NONE, 3474 { 3475 .hsw.regs = &icl_aux_power_well_regs, 3476 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3477 }, 3478 }, 3479 { 3480 .name = "AUX C TC1", 3481 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, 3482 .ops = &icl_tc_phy_aux_power_well_ops, 3483 .id = DISP_PW_ID_NONE, 3484 { 3485 .hsw.regs = &icl_aux_power_well_regs, 3486 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3487 .hsw.is_tc_tbt = false, 3488 }, 3489 }, 3490 { 3491 .name = "AUX D TC2", 3492 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, 3493 .ops = &icl_tc_phy_aux_power_well_ops, 3494 .id = DISP_PW_ID_NONE, 3495 { 3496 .hsw.regs = &icl_aux_power_well_regs, 3497 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 3498 .hsw.is_tc_tbt = false, 3499 }, 3500 }, 3501 { 3502 .name = "AUX E TC3", 3503 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, 3504 .ops = &icl_tc_phy_aux_power_well_ops, 3505 .id = DISP_PW_ID_NONE, 3506 { 3507 .hsw.regs = &icl_aux_power_well_regs, 3508 .hsw.idx = ICL_PW_CTL_IDX_AUX_E, 3509 .hsw.is_tc_tbt = false, 3510 }, 3511 }, 3512 { 3513 .name = "AUX F TC4", 3514 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, 3515 .ops = &icl_tc_phy_aux_power_well_ops, 3516 .id = DISP_PW_ID_NONE, 3517 { 3518 .hsw.regs = &icl_aux_power_well_regs, 3519 .hsw.idx = ICL_PW_CTL_IDX_AUX_F, 3520 .hsw.is_tc_tbt = false, 3521 }, 3522 }, 3523 { 3524 .name = "AUX C TBT1", 3525 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, 3526 .ops = &icl_tc_phy_aux_power_well_ops, 3527 .id = DISP_PW_ID_NONE, 3528 { 3529 .hsw.regs = &icl_aux_power_well_regs, 3530 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, 3531 .hsw.is_tc_tbt = true, 3532 }, 3533 }, 3534 { 3535 .name = "AUX D TBT2", 3536 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, 3537 .ops = &icl_tc_phy_aux_power_well_ops, 3538 .id = DISP_PW_ID_NONE, 3539 { 3540 .hsw.regs = &icl_aux_power_well_regs, 3541 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, 3542 .hsw.is_tc_tbt = true, 3543 }, 3544 }, 3545 { 3546 .name = "AUX E TBT3", 3547 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, 3548 .ops = &icl_tc_phy_aux_power_well_ops, 3549 .id = DISP_PW_ID_NONE, 3550 { 3551 .hsw.regs = &icl_aux_power_well_regs, 3552 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, 3553 .hsw.is_tc_tbt = true, 3554 }, 3555 }, 3556 { 3557 .name = "AUX F TBT4", 3558 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, 3559 .ops = &icl_tc_phy_aux_power_well_ops, 3560 .id = DISP_PW_ID_NONE, 3561 { 3562 .hsw.regs = &icl_aux_power_well_regs, 3563 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, 3564 .hsw.is_tc_tbt = true, 3565 }, 3566 }, 3567 { 3568 .name = "power well 4", 3569 .domains = ICL_PW_4_POWER_DOMAINS, 3570 .ops = &hsw_power_well_ops, 3571 .id = DISP_PW_ID_NONE, 3572 { 3573 .hsw.regs = &hsw_power_well_regs, 3574 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 3575 .hsw.has_fuses = true, 3576 .hsw.irq_pipe_mask = BIT(PIPE_C), 3577 }, 3578 }, 3579 }; 3580 3581 static const struct i915_power_well_desc tgl_power_wells[] = { 3582 { 3583 .name = "always-on", 3584 .always_on = true, 3585 .domains = POWER_DOMAIN_MASK, 3586 .ops = &i9xx_always_on_power_well_ops, 3587 .id = DISP_PW_ID_NONE, 3588 }, 3589 { 3590 .name = "power well 1", 3591 /* Handled by the DMC firmware */ 3592 .always_on = true, 3593 .domains = 0, 3594 .ops = &hsw_power_well_ops, 3595 .id = SKL_DISP_PW_1, 3596 { 3597 .hsw.regs = &hsw_power_well_regs, 3598 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3599 .hsw.has_fuses = true, 3600 }, 3601 }, 3602 { 3603 .name = "DC off", 3604 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, 3605 .ops = &gen9_dc_off_power_well_ops, 3606 .id = DISP_PW_ID_NONE, 3607 }, 3608 { 3609 .name = "power well 2", 3610 .domains = TGL_PW_2_POWER_DOMAINS, 3611 .ops = &hsw_power_well_ops, 3612 .id = SKL_DISP_PW_2, 3613 { 3614 .hsw.regs = &hsw_power_well_regs, 3615 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3616 .hsw.has_fuses = true, 3617 }, 3618 }, 3619 { 3620 .name = "power well 3", 3621 .domains = TGL_PW_3_POWER_DOMAINS, 3622 .ops = &hsw_power_well_ops, 3623 .id = DISP_PW_ID_NONE, 3624 { 3625 .hsw.regs = &hsw_power_well_regs, 3626 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3627 .hsw.irq_pipe_mask = BIT(PIPE_B), 3628 .hsw.has_vga = true, 3629 .hsw.has_fuses = true, 3630 }, 3631 }, 3632 { 3633 .name = "DDI A IO", 3634 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3635 .ops = &hsw_power_well_ops, 3636 .id = DISP_PW_ID_NONE, 3637 { 3638 .hsw.regs = &icl_ddi_power_well_regs, 3639 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3640 } 3641 }, 3642 { 3643 .name = "DDI B IO", 3644 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3645 .ops = &hsw_power_well_ops, 3646 .id = DISP_PW_ID_NONE, 3647 { 3648 .hsw.regs = &icl_ddi_power_well_regs, 3649 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3650 } 3651 }, 3652 { 3653 .name = "DDI C IO", 3654 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3655 .ops = &hsw_power_well_ops, 3656 .id = DISP_PW_ID_NONE, 3657 { 3658 .hsw.regs = &icl_ddi_power_well_regs, 3659 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3660 } 3661 }, 3662 { 3663 .name = "DDI D TC1 IO", 3664 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS, 3665 .ops = &hsw_power_well_ops, 3666 .id = DISP_PW_ID_NONE, 3667 { 3668 .hsw.regs = &icl_ddi_power_well_regs, 3669 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 3670 }, 3671 }, 3672 { 3673 .name = "DDI E TC2 IO", 3674 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS, 3675 .ops = &hsw_power_well_ops, 3676 .id = DISP_PW_ID_NONE, 3677 { 3678 .hsw.regs = &icl_ddi_power_well_regs, 3679 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 3680 }, 3681 }, 3682 { 3683 .name = "DDI F TC3 IO", 3684 .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS, 3685 .ops = &hsw_power_well_ops, 3686 .id = DISP_PW_ID_NONE, 3687 { 3688 .hsw.regs = &icl_ddi_power_well_regs, 3689 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 3690 }, 3691 }, 3692 { 3693 .name = "DDI G TC4 IO", 3694 .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS, 3695 .ops = &hsw_power_well_ops, 3696 .id = DISP_PW_ID_NONE, 3697 { 3698 .hsw.regs = &icl_ddi_power_well_regs, 3699 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 3700 }, 3701 }, 3702 { 3703 .name = "DDI H TC5 IO", 3704 .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS, 3705 .ops = &hsw_power_well_ops, 3706 .id = DISP_PW_ID_NONE, 3707 { 3708 .hsw.regs = &icl_ddi_power_well_regs, 3709 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, 3710 }, 3711 }, 3712 { 3713 .name = "DDI I TC6 IO", 3714 .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS, 3715 .ops = &hsw_power_well_ops, 3716 .id = DISP_PW_ID_NONE, 3717 { 3718 .hsw.regs = &icl_ddi_power_well_regs, 3719 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, 3720 }, 3721 }, 3722 { 3723 .name = "AUX A", 3724 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 3725 .ops = &icl_combo_phy_aux_power_well_ops, 3726 .id = DISP_PW_ID_NONE, 3727 { 3728 .hsw.regs = &icl_aux_power_well_regs, 3729 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3730 }, 3731 }, 3732 { 3733 .name = "AUX B", 3734 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 3735 .ops = &icl_combo_phy_aux_power_well_ops, 3736 .id = DISP_PW_ID_NONE, 3737 { 3738 .hsw.regs = &icl_aux_power_well_regs, 3739 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3740 }, 3741 }, 3742 { 3743 .name = "AUX C", 3744 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 3745 .ops = &icl_combo_phy_aux_power_well_ops, 3746 .id = DISP_PW_ID_NONE, 3747 { 3748 .hsw.regs = &icl_aux_power_well_regs, 3749 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3750 }, 3751 }, 3752 { 3753 .name = "AUX D TC1", 3754 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS, 3755 .ops = &icl_tc_phy_aux_power_well_ops, 3756 .id = DISP_PW_ID_NONE, 3757 { 3758 .hsw.regs = &icl_aux_power_well_regs, 3759 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 3760 .hsw.is_tc_tbt = false, 3761 }, 3762 }, 3763 { 3764 .name = "AUX E TC2", 3765 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS, 3766 .ops = &icl_tc_phy_aux_power_well_ops, 3767 .id = DISP_PW_ID_NONE, 3768 { 3769 .hsw.regs = &icl_aux_power_well_regs, 3770 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 3771 .hsw.is_tc_tbt = false, 3772 }, 3773 }, 3774 { 3775 .name = "AUX F TC3", 3776 .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS, 3777 .ops = &icl_tc_phy_aux_power_well_ops, 3778 .id = DISP_PW_ID_NONE, 3779 { 3780 .hsw.regs = &icl_aux_power_well_regs, 3781 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 3782 .hsw.is_tc_tbt = false, 3783 }, 3784 }, 3785 { 3786 .name = "AUX G TC4", 3787 .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS, 3788 .ops = &icl_tc_phy_aux_power_well_ops, 3789 .id = DISP_PW_ID_NONE, 3790 { 3791 .hsw.regs = &icl_aux_power_well_regs, 3792 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 3793 .hsw.is_tc_tbt = false, 3794 }, 3795 }, 3796 { 3797 .name = "AUX H TC5", 3798 .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS, 3799 .ops = &icl_tc_phy_aux_power_well_ops, 3800 .id = DISP_PW_ID_NONE, 3801 { 3802 .hsw.regs = &icl_aux_power_well_regs, 3803 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, 3804 .hsw.is_tc_tbt = false, 3805 }, 3806 }, 3807 { 3808 .name = "AUX I TC6", 3809 .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS, 3810 .ops = &icl_tc_phy_aux_power_well_ops, 3811 .id = DISP_PW_ID_NONE, 3812 { 3813 .hsw.regs = &icl_aux_power_well_regs, 3814 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, 3815 .hsw.is_tc_tbt = false, 3816 }, 3817 }, 3818 { 3819 .name = "AUX D TBT1", 3820 .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS, 3821 .ops = &hsw_power_well_ops, 3822 .id = DISP_PW_ID_NONE, 3823 { 3824 .hsw.regs = &icl_aux_power_well_regs, 3825 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 3826 .hsw.is_tc_tbt = true, 3827 }, 3828 }, 3829 { 3830 .name = "AUX E TBT2", 3831 .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS, 3832 .ops = &hsw_power_well_ops, 3833 .id = DISP_PW_ID_NONE, 3834 { 3835 .hsw.regs = &icl_aux_power_well_regs, 3836 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 3837 .hsw.is_tc_tbt = true, 3838 }, 3839 }, 3840 { 3841 .name = "AUX F TBT3", 3842 .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS, 3843 .ops = &hsw_power_well_ops, 3844 .id = DISP_PW_ID_NONE, 3845 { 3846 .hsw.regs = &icl_aux_power_well_regs, 3847 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 3848 .hsw.is_tc_tbt = true, 3849 }, 3850 }, 3851 { 3852 .name = "AUX G TBT4", 3853 .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS, 3854 .ops = &hsw_power_well_ops, 3855 .id = DISP_PW_ID_NONE, 3856 { 3857 .hsw.regs = &icl_aux_power_well_regs, 3858 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 3859 .hsw.is_tc_tbt = true, 3860 }, 3861 }, 3862 { 3863 .name = "AUX H TBT5", 3864 .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS, 3865 .ops = &hsw_power_well_ops, 3866 .id = DISP_PW_ID_NONE, 3867 { 3868 .hsw.regs = &icl_aux_power_well_regs, 3869 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, 3870 .hsw.is_tc_tbt = true, 3871 }, 3872 }, 3873 { 3874 .name = "AUX I TBT6", 3875 .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS, 3876 .ops = &hsw_power_well_ops, 3877 .id = DISP_PW_ID_NONE, 3878 { 3879 .hsw.regs = &icl_aux_power_well_regs, 3880 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, 3881 .hsw.is_tc_tbt = true, 3882 }, 3883 }, 3884 { 3885 .name = "power well 4", 3886 .domains = TGL_PW_4_POWER_DOMAINS, 3887 .ops = &hsw_power_well_ops, 3888 .id = DISP_PW_ID_NONE, 3889 { 3890 .hsw.regs = &hsw_power_well_regs, 3891 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 3892 .hsw.has_fuses = true, 3893 .hsw.irq_pipe_mask = BIT(PIPE_C), 3894 } 3895 }, 3896 { 3897 .name = "power well 5", 3898 .domains = TGL_PW_5_POWER_DOMAINS, 3899 .ops = &hsw_power_well_ops, 3900 .id = DISP_PW_ID_NONE, 3901 { 3902 .hsw.regs = &hsw_power_well_regs, 3903 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 3904 .hsw.has_fuses = true, 3905 .hsw.irq_pipe_mask = BIT(PIPE_D), 3906 }, 3907 }, 3908 }; 3909 3910 static int 3911 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 3912 int disable_power_well) 3913 { 3914 if (disable_power_well >= 0) 3915 return !!disable_power_well; 3916 3917 return 1; 3918 } 3919 3920 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 3921 int enable_dc) 3922 { 3923 u32 mask; 3924 int requested_dc; 3925 int max_dc; 3926 3927 if (INTEL_GEN(dev_priv) >= 11) { 3928 max_dc = 2; 3929 /* 3930 * DC9 has a separate HW flow from the rest of the DC states, 3931 * not depending on the DMC firmware. It's needed by system 3932 * suspend/resume, so allow it unconditionally. 3933 */ 3934 mask = DC_STATE_EN_DC9; 3935 } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) { 3936 max_dc = 2; 3937 mask = 0; 3938 } else if (IS_GEN9_LP(dev_priv)) { 3939 max_dc = 1; 3940 mask = DC_STATE_EN_DC9; 3941 } else { 3942 max_dc = 0; 3943 mask = 0; 3944 } 3945 3946 if (!i915_modparams.disable_power_well) 3947 max_dc = 0; 3948 3949 if (enable_dc >= 0 && enable_dc <= max_dc) { 3950 requested_dc = enable_dc; 3951 } else if (enable_dc == -1) { 3952 requested_dc = max_dc; 3953 } else if (enable_dc > max_dc && enable_dc <= 2) { 3954 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n", 3955 enable_dc, max_dc); 3956 requested_dc = max_dc; 3957 } else { 3958 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc); 3959 requested_dc = max_dc; 3960 } 3961 3962 if (requested_dc > 1) 3963 mask |= DC_STATE_EN_UPTO_DC6; 3964 if (requested_dc > 0) 3965 mask |= DC_STATE_EN_UPTO_DC5; 3966 3967 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask); 3968 3969 return mask; 3970 } 3971 3972 static int 3973 __set_power_wells(struct i915_power_domains *power_domains, 3974 const struct i915_power_well_desc *power_well_descs, 3975 int power_well_count) 3976 { 3977 u64 power_well_ids = 0; 3978 int i; 3979 3980 power_domains->power_well_count = power_well_count; 3981 power_domains->power_wells = 3982 kcalloc(power_well_count, 3983 sizeof(*power_domains->power_wells), 3984 GFP_KERNEL); 3985 if (!power_domains->power_wells) 3986 return -ENOMEM; 3987 3988 for (i = 0; i < power_well_count; i++) { 3989 enum i915_power_well_id id = power_well_descs[i].id; 3990 3991 power_domains->power_wells[i].desc = &power_well_descs[i]; 3992 3993 if (id == DISP_PW_ID_NONE) 3994 continue; 3995 3996 WARN_ON(id >= sizeof(power_well_ids) * 8); 3997 WARN_ON(power_well_ids & BIT_ULL(id)); 3998 power_well_ids |= BIT_ULL(id); 3999 } 4000 4001 return 0; 4002 } 4003 4004 #define set_power_wells(power_domains, __power_well_descs) \ 4005 __set_power_wells(power_domains, __power_well_descs, \ 4006 ARRAY_SIZE(__power_well_descs)) 4007 4008 /** 4009 * intel_power_domains_init - initializes the power domain structures 4010 * @dev_priv: i915 device instance 4011 * 4012 * Initializes the power domain structures for @dev_priv depending upon the 4013 * supported platform. 4014 */ 4015 int intel_power_domains_init(struct drm_i915_private *dev_priv) 4016 { 4017 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4018 int err; 4019 4020 i915_modparams.disable_power_well = 4021 sanitize_disable_power_well_option(dev_priv, 4022 i915_modparams.disable_power_well); 4023 dev_priv->csr.allowed_dc_mask = 4024 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc); 4025 4026 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); 4027 4028 mutex_init(&power_domains->lock); 4029 4030 INIT_DELAYED_WORK(&power_domains->async_put_work, 4031 intel_display_power_put_async_work); 4032 4033 /* 4034 * The enabling order will be from lower to higher indexed wells, 4035 * the disabling order is reversed. 4036 */ 4037 if (IS_GEN(dev_priv, 12)) { 4038 err = set_power_wells(power_domains, tgl_power_wells); 4039 } else if (IS_GEN(dev_priv, 11)) { 4040 err = set_power_wells(power_domains, icl_power_wells); 4041 } else if (IS_CANNONLAKE(dev_priv)) { 4042 err = set_power_wells(power_domains, cnl_power_wells); 4043 4044 /* 4045 * DDI and Aux IO are getting enabled for all ports 4046 * regardless the presence or use. So, in order to avoid 4047 * timeouts, lets remove them from the list 4048 * for the SKUs without port F. 4049 */ 4050 if (!IS_CNL_WITH_PORT_F(dev_priv)) 4051 power_domains->power_well_count -= 2; 4052 } else if (IS_GEMINILAKE(dev_priv)) { 4053 err = set_power_wells(power_domains, glk_power_wells); 4054 } else if (IS_BROXTON(dev_priv)) { 4055 err = set_power_wells(power_domains, bxt_power_wells); 4056 } else if (IS_GEN9_BC(dev_priv)) { 4057 err = set_power_wells(power_domains, skl_power_wells); 4058 } else if (IS_CHERRYVIEW(dev_priv)) { 4059 err = set_power_wells(power_domains, chv_power_wells); 4060 } else if (IS_BROADWELL(dev_priv)) { 4061 err = set_power_wells(power_domains, bdw_power_wells); 4062 } else if (IS_HASWELL(dev_priv)) { 4063 err = set_power_wells(power_domains, hsw_power_wells); 4064 } else if (IS_VALLEYVIEW(dev_priv)) { 4065 err = set_power_wells(power_domains, vlv_power_wells); 4066 } else if (IS_I830(dev_priv)) { 4067 err = set_power_wells(power_domains, i830_power_wells); 4068 } else { 4069 err = set_power_wells(power_domains, i9xx_always_on_power_well); 4070 } 4071 4072 return err; 4073 } 4074 4075 /** 4076 * intel_power_domains_cleanup - clean up power domains resources 4077 * @dev_priv: i915 device instance 4078 * 4079 * Release any resources acquired by intel_power_domains_init() 4080 */ 4081 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 4082 { 4083 kfree(dev_priv->power_domains.power_wells); 4084 } 4085 4086 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 4087 { 4088 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4089 struct i915_power_well *power_well; 4090 4091 mutex_lock(&power_domains->lock); 4092 for_each_power_well(dev_priv, power_well) { 4093 power_well->desc->ops->sync_hw(dev_priv, power_well); 4094 power_well->hw_enabled = 4095 power_well->desc->ops->is_enabled(dev_priv, power_well); 4096 } 4097 mutex_unlock(&power_domains->lock); 4098 } 4099 4100 static inline 4101 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv, 4102 i915_reg_t reg, bool enable) 4103 { 4104 u32 val, status; 4105 4106 val = I915_READ(reg); 4107 val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST); 4108 I915_WRITE(reg, val); 4109 POSTING_READ(reg); 4110 udelay(10); 4111 4112 status = I915_READ(reg) & DBUF_POWER_STATE; 4113 if ((enable && !status) || (!enable && status)) { 4114 DRM_ERROR("DBus power %s timeout!\n", 4115 enable ? "enable" : "disable"); 4116 return false; 4117 } 4118 return true; 4119 } 4120 4121 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 4122 { 4123 intel_dbuf_slice_set(dev_priv, DBUF_CTL, true); 4124 } 4125 4126 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 4127 { 4128 intel_dbuf_slice_set(dev_priv, DBUF_CTL, false); 4129 } 4130 4131 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv) 4132 { 4133 if (INTEL_GEN(dev_priv) < 11) 4134 return 1; 4135 return 2; 4136 } 4137 4138 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, 4139 u8 req_slices) 4140 { 4141 const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; 4142 bool ret; 4143 4144 if (req_slices > intel_dbuf_max_slices(dev_priv)) { 4145 DRM_ERROR("Invalid number of dbuf slices requested\n"); 4146 return; 4147 } 4148 4149 if (req_slices == hw_enabled_slices || req_slices == 0) 4150 return; 4151 4152 if (req_slices > hw_enabled_slices) 4153 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); 4154 else 4155 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false); 4156 4157 if (ret) 4158 dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices; 4159 } 4160 4161 static void icl_dbuf_enable(struct drm_i915_private *dev_priv) 4162 { 4163 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST); 4164 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST); 4165 POSTING_READ(DBUF_CTL_S2); 4166 4167 udelay(10); 4168 4169 if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || 4170 !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) 4171 DRM_ERROR("DBuf power enable timeout\n"); 4172 else 4173 /* 4174 * FIXME: for now pretend that we only have 1 slice, see 4175 * intel_enabled_dbuf_slices_num(). 4176 */ 4177 dev_priv->wm.skl_hw.ddb.enabled_slices = 1; 4178 } 4179 4180 static void icl_dbuf_disable(struct drm_i915_private *dev_priv) 4181 { 4182 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST); 4183 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST); 4184 POSTING_READ(DBUF_CTL_S2); 4185 4186 udelay(10); 4187 4188 if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || 4189 (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) 4190 DRM_ERROR("DBuf power disable timeout!\n"); 4191 else 4192 /* 4193 * FIXME: for now pretend that the first slice is always 4194 * enabled, see intel_enabled_dbuf_slices_num(). 4195 */ 4196 dev_priv->wm.skl_hw.ddb.enabled_slices = 1; 4197 } 4198 4199 static void icl_mbus_init(struct drm_i915_private *dev_priv) 4200 { 4201 u32 val; 4202 4203 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 4204 MBUS_ABOX_BT_CREDIT_POOL2(16) | 4205 MBUS_ABOX_B_CREDIT(1) | 4206 MBUS_ABOX_BW_CREDIT(1); 4207 4208 I915_WRITE(MBUS_ABOX_CTL, val); 4209 } 4210 4211 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 4212 { 4213 u32 val = I915_READ(LCPLL_CTL); 4214 4215 /* 4216 * The LCPLL register should be turned on by the BIOS. For now 4217 * let's just check its state and print errors in case 4218 * something is wrong. Don't even try to turn it on. 4219 */ 4220 4221 if (val & LCPLL_CD_SOURCE_FCLK) 4222 DRM_ERROR("CDCLK source is not LCPLL\n"); 4223 4224 if (val & LCPLL_PLL_DISABLE) 4225 DRM_ERROR("LCPLL is disabled\n"); 4226 4227 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 4228 DRM_ERROR("LCPLL not using non-SSC reference\n"); 4229 } 4230 4231 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 4232 { 4233 struct drm_device *dev = &dev_priv->drm; 4234 struct intel_crtc *crtc; 4235 4236 for_each_intel_crtc(dev, crtc) 4237 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 4238 pipe_name(crtc->pipe)); 4239 4240 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2), 4241 "Display power well on\n"); 4242 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, 4243 "SPLL enabled\n"); 4244 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 4245 "WRPLL1 enabled\n"); 4246 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 4247 "WRPLL2 enabled\n"); 4248 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, 4249 "Panel power on\n"); 4250 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 4251 "CPU PWM1 enabled\n"); 4252 if (IS_HASWELL(dev_priv)) 4253 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 4254 "CPU PWM2 enabled\n"); 4255 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 4256 "PCH PWM1 enabled\n"); 4257 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 4258 "Utility pin enabled\n"); 4259 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, 4260 "PCH GTC enabled\n"); 4261 4262 /* 4263 * In theory we can still leave IRQs enabled, as long as only the HPD 4264 * interrupts remain enabled. We used to check for that, but since it's 4265 * gen-specific and since we only disable LCPLL after we fully disable 4266 * the interrupts, the check below should be enough. 4267 */ 4268 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 4269 } 4270 4271 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 4272 { 4273 if (IS_HASWELL(dev_priv)) 4274 return I915_READ(D_COMP_HSW); 4275 else 4276 return I915_READ(D_COMP_BDW); 4277 } 4278 4279 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 4280 { 4281 if (IS_HASWELL(dev_priv)) { 4282 if (sandybridge_pcode_write(dev_priv, 4283 GEN6_PCODE_WRITE_D_COMP, val)) 4284 DRM_DEBUG_KMS("Failed to write to D_COMP\n"); 4285 } else { 4286 I915_WRITE(D_COMP_BDW, val); 4287 POSTING_READ(D_COMP_BDW); 4288 } 4289 } 4290 4291 /* 4292 * This function implements pieces of two sequences from BSpec: 4293 * - Sequence for display software to disable LCPLL 4294 * - Sequence for display software to allow package C8+ 4295 * The steps implemented here are just the steps that actually touch the LCPLL 4296 * register. Callers should take care of disabling all the display engine 4297 * functions, doing the mode unset, fixing interrupts, etc. 4298 */ 4299 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 4300 bool switch_to_fclk, bool allow_power_down) 4301 { 4302 u32 val; 4303 4304 assert_can_disable_lcpll(dev_priv); 4305 4306 val = I915_READ(LCPLL_CTL); 4307 4308 if (switch_to_fclk) { 4309 val |= LCPLL_CD_SOURCE_FCLK; 4310 I915_WRITE(LCPLL_CTL, val); 4311 4312 if (wait_for_us(I915_READ(LCPLL_CTL) & 4313 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 4314 DRM_ERROR("Switching to FCLK failed\n"); 4315 4316 val = I915_READ(LCPLL_CTL); 4317 } 4318 4319 val |= LCPLL_PLL_DISABLE; 4320 I915_WRITE(LCPLL_CTL, val); 4321 POSTING_READ(LCPLL_CTL); 4322 4323 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 4324 DRM_ERROR("LCPLL still locked\n"); 4325 4326 val = hsw_read_dcomp(dev_priv); 4327 val |= D_COMP_COMP_DISABLE; 4328 hsw_write_dcomp(dev_priv, val); 4329 ndelay(100); 4330 4331 if (wait_for((hsw_read_dcomp(dev_priv) & 4332 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 4333 DRM_ERROR("D_COMP RCOMP still in progress\n"); 4334 4335 if (allow_power_down) { 4336 val = I915_READ(LCPLL_CTL); 4337 val |= LCPLL_POWER_DOWN_ALLOW; 4338 I915_WRITE(LCPLL_CTL, val); 4339 POSTING_READ(LCPLL_CTL); 4340 } 4341 } 4342 4343 /* 4344 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 4345 * source. 4346 */ 4347 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 4348 { 4349 u32 val; 4350 4351 val = I915_READ(LCPLL_CTL); 4352 4353 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 4354 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 4355 return; 4356 4357 /* 4358 * Make sure we're not on PC8 state before disabling PC8, otherwise 4359 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 4360 */ 4361 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 4362 4363 if (val & LCPLL_POWER_DOWN_ALLOW) { 4364 val &= ~LCPLL_POWER_DOWN_ALLOW; 4365 I915_WRITE(LCPLL_CTL, val); 4366 POSTING_READ(LCPLL_CTL); 4367 } 4368 4369 val = hsw_read_dcomp(dev_priv); 4370 val |= D_COMP_COMP_FORCE; 4371 val &= ~D_COMP_COMP_DISABLE; 4372 hsw_write_dcomp(dev_priv, val); 4373 4374 val = I915_READ(LCPLL_CTL); 4375 val &= ~LCPLL_PLL_DISABLE; 4376 I915_WRITE(LCPLL_CTL, val); 4377 4378 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 4379 DRM_ERROR("LCPLL not locked yet\n"); 4380 4381 if (val & LCPLL_CD_SOURCE_FCLK) { 4382 val = I915_READ(LCPLL_CTL); 4383 val &= ~LCPLL_CD_SOURCE_FCLK; 4384 I915_WRITE(LCPLL_CTL, val); 4385 4386 if (wait_for_us((I915_READ(LCPLL_CTL) & 4387 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 4388 DRM_ERROR("Switching back to LCPLL failed\n"); 4389 } 4390 4391 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 4392 4393 intel_update_cdclk(dev_priv); 4394 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); 4395 } 4396 4397 /* 4398 * Package states C8 and deeper are really deep PC states that can only be 4399 * reached when all the devices on the system allow it, so even if the graphics 4400 * device allows PC8+, it doesn't mean the system will actually get to these 4401 * states. Our driver only allows PC8+ when going into runtime PM. 4402 * 4403 * The requirements for PC8+ are that all the outputs are disabled, the power 4404 * well is disabled and most interrupts are disabled, and these are also 4405 * requirements for runtime PM. When these conditions are met, we manually do 4406 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 4407 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 4408 * hang the machine. 4409 * 4410 * When we really reach PC8 or deeper states (not just when we allow it) we lose 4411 * the state of some registers, so when we come back from PC8+ we need to 4412 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 4413 * need to take care of the registers kept by RC6. Notice that this happens even 4414 * if we don't put the device in PCI D3 state (which is what currently happens 4415 * because of the runtime PM support). 4416 * 4417 * For more, read "Display Sequences for Package C8" on the hardware 4418 * documentation. 4419 */ 4420 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 4421 { 4422 u32 val; 4423 4424 DRM_DEBUG_KMS("Enabling package C8+\n"); 4425 4426 if (HAS_PCH_LPT_LP(dev_priv)) { 4427 val = I915_READ(SOUTH_DSPCLK_GATE_D); 4428 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 4429 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 4430 } 4431 4432 lpt_disable_clkout_dp(dev_priv); 4433 hsw_disable_lcpll(dev_priv, true, true); 4434 } 4435 4436 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 4437 { 4438 u32 val; 4439 4440 DRM_DEBUG_KMS("Disabling package C8+\n"); 4441 4442 hsw_restore_lcpll(dev_priv); 4443 intel_init_pch_refclk(dev_priv); 4444 4445 if (HAS_PCH_LPT_LP(dev_priv)) { 4446 val = I915_READ(SOUTH_DSPCLK_GATE_D); 4447 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 4448 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 4449 } 4450 } 4451 4452 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 4453 bool enable) 4454 { 4455 i915_reg_t reg; 4456 u32 reset_bits, val; 4457 4458 if (IS_IVYBRIDGE(dev_priv)) { 4459 reg = GEN7_MSG_CTL; 4460 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 4461 } else { 4462 reg = HSW_NDE_RSTWRN_OPT; 4463 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 4464 } 4465 4466 val = I915_READ(reg); 4467 4468 if (enable) 4469 val |= reset_bits; 4470 else 4471 val &= ~reset_bits; 4472 4473 I915_WRITE(reg, val); 4474 } 4475 4476 static void skl_display_core_init(struct drm_i915_private *dev_priv, 4477 bool resume) 4478 { 4479 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4480 struct i915_power_well *well; 4481 4482 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 4483 4484 /* enable PCH reset handshake */ 4485 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 4486 4487 /* enable PG1 and Misc I/O */ 4488 mutex_lock(&power_domains->lock); 4489 4490 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4491 intel_power_well_enable(dev_priv, well); 4492 4493 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 4494 intel_power_well_enable(dev_priv, well); 4495 4496 mutex_unlock(&power_domains->lock); 4497 4498 intel_cdclk_init(dev_priv); 4499 4500 gen9_dbuf_enable(dev_priv); 4501 4502 if (resume && dev_priv->csr.dmc_payload) 4503 intel_csr_load_program(dev_priv); 4504 } 4505 4506 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 4507 { 4508 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4509 struct i915_power_well *well; 4510 4511 gen9_disable_dc_states(dev_priv); 4512 4513 gen9_dbuf_disable(dev_priv); 4514 4515 intel_cdclk_uninit(dev_priv); 4516 4517 /* The spec doesn't call for removing the reset handshake flag */ 4518 /* disable PG1 and Misc I/O */ 4519 4520 mutex_lock(&power_domains->lock); 4521 4522 /* 4523 * BSpec says to keep the MISC IO power well enabled here, only 4524 * remove our request for power well 1. 4525 * Note that even though the driver's request is removed power well 1 4526 * may stay enabled after this due to DMC's own request on it. 4527 */ 4528 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4529 intel_power_well_disable(dev_priv, well); 4530 4531 mutex_unlock(&power_domains->lock); 4532 4533 usleep_range(10, 30); /* 10 us delay per Bspec */ 4534 } 4535 4536 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 4537 { 4538 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4539 struct i915_power_well *well; 4540 4541 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 4542 4543 /* 4544 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 4545 * or else the reset will hang because there is no PCH to respond. 4546 * Move the handshake programming to initialization sequence. 4547 * Previously was left up to BIOS. 4548 */ 4549 intel_pch_reset_handshake(dev_priv, false); 4550 4551 /* Enable PG1 */ 4552 mutex_lock(&power_domains->lock); 4553 4554 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4555 intel_power_well_enable(dev_priv, well); 4556 4557 mutex_unlock(&power_domains->lock); 4558 4559 intel_cdclk_init(dev_priv); 4560 4561 gen9_dbuf_enable(dev_priv); 4562 4563 if (resume && dev_priv->csr.dmc_payload) 4564 intel_csr_load_program(dev_priv); 4565 } 4566 4567 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 4568 { 4569 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4570 struct i915_power_well *well; 4571 4572 gen9_disable_dc_states(dev_priv); 4573 4574 gen9_dbuf_disable(dev_priv); 4575 4576 intel_cdclk_uninit(dev_priv); 4577 4578 /* The spec doesn't call for removing the reset handshake flag */ 4579 4580 /* 4581 * Disable PW1 (PG1). 4582 * Note that even though the driver's request is removed power well 1 4583 * may stay enabled after this due to DMC's own request on it. 4584 */ 4585 mutex_lock(&power_domains->lock); 4586 4587 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4588 intel_power_well_disable(dev_priv, well); 4589 4590 mutex_unlock(&power_domains->lock); 4591 4592 usleep_range(10, 30); /* 10 us delay per Bspec */ 4593 } 4594 4595 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) 4596 { 4597 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4598 struct i915_power_well *well; 4599 4600 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 4601 4602 /* 1. Enable PCH Reset Handshake */ 4603 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 4604 4605 /* 2-3. */ 4606 intel_combo_phy_init(dev_priv); 4607 4608 /* 4609 * 4. Enable Power Well 1 (PG1). 4610 * The AUX IO power wells will be enabled on demand. 4611 */ 4612 mutex_lock(&power_domains->lock); 4613 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4614 intel_power_well_enable(dev_priv, well); 4615 mutex_unlock(&power_domains->lock); 4616 4617 /* 5. Enable CD clock */ 4618 intel_cdclk_init(dev_priv); 4619 4620 /* 6. Enable DBUF */ 4621 gen9_dbuf_enable(dev_priv); 4622 4623 if (resume && dev_priv->csr.dmc_payload) 4624 intel_csr_load_program(dev_priv); 4625 } 4626 4627 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) 4628 { 4629 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4630 struct i915_power_well *well; 4631 4632 gen9_disable_dc_states(dev_priv); 4633 4634 /* 1. Disable all display engine functions -> aready done */ 4635 4636 /* 2. Disable DBUF */ 4637 gen9_dbuf_disable(dev_priv); 4638 4639 /* 3. Disable CD clock */ 4640 intel_cdclk_uninit(dev_priv); 4641 4642 /* 4643 * 4. Disable Power Well 1 (PG1). 4644 * The AUX IO power wells are toggled on demand, so they are already 4645 * disabled at this point. 4646 */ 4647 mutex_lock(&power_domains->lock); 4648 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4649 intel_power_well_disable(dev_priv, well); 4650 mutex_unlock(&power_domains->lock); 4651 4652 usleep_range(10, 30); /* 10 us delay per Bspec */ 4653 4654 /* 5. */ 4655 intel_combo_phy_uninit(dev_priv); 4656 } 4657 4658 static void icl_display_core_init(struct drm_i915_private *dev_priv, 4659 bool resume) 4660 { 4661 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4662 struct i915_power_well *well; 4663 4664 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 4665 4666 /* 1. Enable PCH reset handshake. */ 4667 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 4668 4669 /* 2. Initialize all combo phys */ 4670 intel_combo_phy_init(dev_priv); 4671 4672 /* 4673 * 3. Enable Power Well 1 (PG1). 4674 * The AUX IO power wells will be enabled on demand. 4675 */ 4676 mutex_lock(&power_domains->lock); 4677 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4678 intel_power_well_enable(dev_priv, well); 4679 mutex_unlock(&power_domains->lock); 4680 4681 /* 4. Enable CDCLK. */ 4682 intel_cdclk_init(dev_priv); 4683 4684 /* 5. Enable DBUF. */ 4685 icl_dbuf_enable(dev_priv); 4686 4687 /* 6. Setup MBUS. */ 4688 icl_mbus_init(dev_priv); 4689 4690 if (resume && dev_priv->csr.dmc_payload) 4691 intel_csr_load_program(dev_priv); 4692 } 4693 4694 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 4695 { 4696 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4697 struct i915_power_well *well; 4698 4699 gen9_disable_dc_states(dev_priv); 4700 4701 /* 1. Disable all display engine functions -> aready done */ 4702 4703 /* 2. Disable DBUF */ 4704 icl_dbuf_disable(dev_priv); 4705 4706 /* 3. Disable CD clock */ 4707 intel_cdclk_uninit(dev_priv); 4708 4709 /* 4710 * 4. Disable Power Well 1 (PG1). 4711 * The AUX IO power wells are toggled on demand, so they are already 4712 * disabled at this point. 4713 */ 4714 mutex_lock(&power_domains->lock); 4715 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 4716 intel_power_well_disable(dev_priv, well); 4717 mutex_unlock(&power_domains->lock); 4718 4719 /* 5. */ 4720 intel_combo_phy_uninit(dev_priv); 4721 } 4722 4723 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 4724 { 4725 struct i915_power_well *cmn_bc = 4726 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 4727 struct i915_power_well *cmn_d = 4728 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 4729 4730 /* 4731 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 4732 * workaround never ever read DISPLAY_PHY_CONTROL, and 4733 * instead maintain a shadow copy ourselves. Use the actual 4734 * power well state and lane status to reconstruct the 4735 * expected initial value. 4736 */ 4737 dev_priv->chv_phy_control = 4738 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 4739 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 4740 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 4741 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 4742 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 4743 4744 /* 4745 * If all lanes are disabled we leave the override disabled 4746 * with all power down bits cleared to match the state we 4747 * would use after disabling the port. Otherwise enable the 4748 * override and set the lane powerdown bits accding to the 4749 * current lane status. 4750 */ 4751 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 4752 u32 status = I915_READ(DPLL(PIPE_A)); 4753 unsigned int mask; 4754 4755 mask = status & DPLL_PORTB_READY_MASK; 4756 if (mask == 0xf) 4757 mask = 0x0; 4758 else 4759 dev_priv->chv_phy_control |= 4760 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 4761 4762 dev_priv->chv_phy_control |= 4763 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 4764 4765 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 4766 if (mask == 0xf) 4767 mask = 0x0; 4768 else 4769 dev_priv->chv_phy_control |= 4770 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 4771 4772 dev_priv->chv_phy_control |= 4773 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 4774 4775 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 4776 4777 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 4778 } else { 4779 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 4780 } 4781 4782 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 4783 u32 status = I915_READ(DPIO_PHY_STATUS); 4784 unsigned int mask; 4785 4786 mask = status & DPLL_PORTD_READY_MASK; 4787 4788 if (mask == 0xf) 4789 mask = 0x0; 4790 else 4791 dev_priv->chv_phy_control |= 4792 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 4793 4794 dev_priv->chv_phy_control |= 4795 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 4796 4797 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 4798 4799 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 4800 } else { 4801 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 4802 } 4803 4804 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 4805 4806 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", 4807 dev_priv->chv_phy_control); 4808 } 4809 4810 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 4811 { 4812 struct i915_power_well *cmn = 4813 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 4814 struct i915_power_well *disp2d = 4815 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 4816 4817 /* If the display might be already active skip this */ 4818 if (cmn->desc->ops->is_enabled(dev_priv, cmn) && 4819 disp2d->desc->ops->is_enabled(dev_priv, disp2d) && 4820 I915_READ(DPIO_CTL) & DPIO_CMNRST) 4821 return; 4822 4823 DRM_DEBUG_KMS("toggling display PHY side reset\n"); 4824 4825 /* cmnlane needs DPLL registers */ 4826 disp2d->desc->ops->enable(dev_priv, disp2d); 4827 4828 /* 4829 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 4830 * Need to assert and de-assert PHY SB reset by gating the 4831 * common lane power, then un-gating it. 4832 * Simply ungating isn't enough to reset the PHY enough to get 4833 * ports and lanes running. 4834 */ 4835 cmn->desc->ops->disable(dev_priv, cmn); 4836 } 4837 4838 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 4839 { 4840 bool ret; 4841 4842 vlv_punit_get(dev_priv); 4843 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 4844 vlv_punit_put(dev_priv); 4845 4846 return ret; 4847 } 4848 4849 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 4850 { 4851 WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 4852 "VED not power gated\n"); 4853 } 4854 4855 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 4856 { 4857 static const struct pci_device_id isp_ids[] = { 4858 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 4859 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 4860 {} 4861 }; 4862 4863 WARN(!pci_dev_present(isp_ids) && 4864 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 4865 "ISP not power gated\n"); 4866 } 4867 4868 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 4869 4870 /** 4871 * intel_power_domains_init_hw - initialize hardware power domain state 4872 * @i915: i915 device instance 4873 * @resume: Called from resume code paths or not 4874 * 4875 * This function initializes the hardware power domain state and enables all 4876 * power wells belonging to the INIT power domain. Power wells in other 4877 * domains (and not in the INIT domain) are referenced or disabled by 4878 * intel_modeset_readout_hw_state(). After that the reference count of each 4879 * power well must match its HW enabled state, see 4880 * intel_power_domains_verify_state(). 4881 * 4882 * It will return with power domains disabled (to be enabled later by 4883 * intel_power_domains_enable()) and must be paired with 4884 * intel_power_domains_driver_remove(). 4885 */ 4886 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 4887 { 4888 struct i915_power_domains *power_domains = &i915->power_domains; 4889 4890 power_domains->initializing = true; 4891 4892 if (INTEL_GEN(i915) >= 11) { 4893 icl_display_core_init(i915, resume); 4894 } else if (IS_CANNONLAKE(i915)) { 4895 cnl_display_core_init(i915, resume); 4896 } else if (IS_GEN9_BC(i915)) { 4897 skl_display_core_init(i915, resume); 4898 } else if (IS_GEN9_LP(i915)) { 4899 bxt_display_core_init(i915, resume); 4900 } else if (IS_CHERRYVIEW(i915)) { 4901 mutex_lock(&power_domains->lock); 4902 chv_phy_control_init(i915); 4903 mutex_unlock(&power_domains->lock); 4904 assert_isp_power_gated(i915); 4905 } else if (IS_VALLEYVIEW(i915)) { 4906 mutex_lock(&power_domains->lock); 4907 vlv_cmnlane_wa(i915); 4908 mutex_unlock(&power_domains->lock); 4909 assert_ved_power_gated(i915); 4910 assert_isp_power_gated(i915); 4911 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 4912 hsw_assert_cdclk(i915); 4913 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 4914 } else if (IS_IVYBRIDGE(i915)) { 4915 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 4916 } 4917 4918 /* 4919 * Keep all power wells enabled for any dependent HW access during 4920 * initialization and to make sure we keep BIOS enabled display HW 4921 * resources powered until display HW readout is complete. We drop 4922 * this reference in intel_power_domains_enable(). 4923 */ 4924 power_domains->wakeref = 4925 intel_display_power_get(i915, POWER_DOMAIN_INIT); 4926 4927 /* Disable power support if the user asked so. */ 4928 if (!i915_modparams.disable_power_well) 4929 intel_display_power_get(i915, POWER_DOMAIN_INIT); 4930 intel_power_domains_sync_hw(i915); 4931 4932 power_domains->initializing = false; 4933 } 4934 4935 /** 4936 * intel_power_domains_driver_remove - deinitialize hw power domain state 4937 * @i915: i915 device instance 4938 * 4939 * De-initializes the display power domain HW state. It also ensures that the 4940 * device stays powered up so that the driver can be reloaded. 4941 * 4942 * It must be called with power domains already disabled (after a call to 4943 * intel_power_domains_disable()) and must be paired with 4944 * intel_power_domains_init_hw(). 4945 */ 4946 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 4947 { 4948 intel_wakeref_t wakeref __maybe_unused = 4949 fetch_and_zero(&i915->power_domains.wakeref); 4950 4951 /* Remove the refcount we took to keep power well support disabled. */ 4952 if (!i915_modparams.disable_power_well) 4953 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); 4954 4955 intel_display_power_flush_work_sync(i915); 4956 4957 intel_power_domains_verify_state(i915); 4958 4959 /* Keep the power well enabled, but cancel its rpm wakeref. */ 4960 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 4961 } 4962 4963 /** 4964 * intel_power_domains_enable - enable toggling of display power wells 4965 * @i915: i915 device instance 4966 * 4967 * Enable the ondemand enabling/disabling of the display power wells. Note that 4968 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 4969 * only at specific points of the display modeset sequence, thus they are not 4970 * affected by the intel_power_domains_enable()/disable() calls. The purpose 4971 * of these function is to keep the rest of power wells enabled until the end 4972 * of display HW readout (which will acquire the power references reflecting 4973 * the current HW state). 4974 */ 4975 void intel_power_domains_enable(struct drm_i915_private *i915) 4976 { 4977 intel_wakeref_t wakeref __maybe_unused = 4978 fetch_and_zero(&i915->power_domains.wakeref); 4979 4980 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 4981 intel_power_domains_verify_state(i915); 4982 } 4983 4984 /** 4985 * intel_power_domains_disable - disable toggling of display power wells 4986 * @i915: i915 device instance 4987 * 4988 * Disable the ondemand enabling/disabling of the display power wells. See 4989 * intel_power_domains_enable() for which power wells this call controls. 4990 */ 4991 void intel_power_domains_disable(struct drm_i915_private *i915) 4992 { 4993 struct i915_power_domains *power_domains = &i915->power_domains; 4994 4995 WARN_ON(power_domains->wakeref); 4996 power_domains->wakeref = 4997 intel_display_power_get(i915, POWER_DOMAIN_INIT); 4998 4999 intel_power_domains_verify_state(i915); 5000 } 5001 5002 /** 5003 * intel_power_domains_suspend - suspend power domain state 5004 * @i915: i915 device instance 5005 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 5006 * 5007 * This function prepares the hardware power domain state before entering 5008 * system suspend. 5009 * 5010 * It must be called with power domains already disabled (after a call to 5011 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 5012 */ 5013 void intel_power_domains_suspend(struct drm_i915_private *i915, 5014 enum i915_drm_suspend_mode suspend_mode) 5015 { 5016 struct i915_power_domains *power_domains = &i915->power_domains; 5017 intel_wakeref_t wakeref __maybe_unused = 5018 fetch_and_zero(&power_domains->wakeref); 5019 5020 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 5021 5022 /* 5023 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 5024 * support don't manually deinit the power domains. This also means the 5025 * CSR/DMC firmware will stay active, it will power down any HW 5026 * resources as required and also enable deeper system power states 5027 * that would be blocked if the firmware was inactive. 5028 */ 5029 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) && 5030 suspend_mode == I915_DRM_SUSPEND_IDLE && 5031 i915->csr.dmc_payload) { 5032 intel_display_power_flush_work(i915); 5033 intel_power_domains_verify_state(i915); 5034 return; 5035 } 5036 5037 /* 5038 * Even if power well support was disabled we still want to disable 5039 * power wells if power domains must be deinitialized for suspend. 5040 */ 5041 if (!i915_modparams.disable_power_well) 5042 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); 5043 5044 intel_display_power_flush_work(i915); 5045 intel_power_domains_verify_state(i915); 5046 5047 if (INTEL_GEN(i915) >= 11) 5048 icl_display_core_uninit(i915); 5049 else if (IS_CANNONLAKE(i915)) 5050 cnl_display_core_uninit(i915); 5051 else if (IS_GEN9_BC(i915)) 5052 skl_display_core_uninit(i915); 5053 else if (IS_GEN9_LP(i915)) 5054 bxt_display_core_uninit(i915); 5055 5056 power_domains->display_core_suspended = true; 5057 } 5058 5059 /** 5060 * intel_power_domains_resume - resume power domain state 5061 * @i915: i915 device instance 5062 * 5063 * This function resume the hardware power domain state during system resume. 5064 * 5065 * It will return with power domain support disabled (to be enabled later by 5066 * intel_power_domains_enable()) and must be paired with 5067 * intel_power_domains_suspend(). 5068 */ 5069 void intel_power_domains_resume(struct drm_i915_private *i915) 5070 { 5071 struct i915_power_domains *power_domains = &i915->power_domains; 5072 5073 if (power_domains->display_core_suspended) { 5074 intel_power_domains_init_hw(i915, true); 5075 power_domains->display_core_suspended = false; 5076 } else { 5077 WARN_ON(power_domains->wakeref); 5078 power_domains->wakeref = 5079 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5080 } 5081 5082 intel_power_domains_verify_state(i915); 5083 } 5084 5085 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 5086 5087 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 5088 { 5089 struct i915_power_domains *power_domains = &i915->power_domains; 5090 struct i915_power_well *power_well; 5091 5092 for_each_power_well(i915, power_well) { 5093 enum intel_display_power_domain domain; 5094 5095 DRM_DEBUG_DRIVER("%-25s %d\n", 5096 power_well->desc->name, power_well->count); 5097 5098 for_each_power_domain(domain, power_well->desc->domains) 5099 DRM_DEBUG_DRIVER(" %-23s %d\n", 5100 intel_display_power_domain_str(domain), 5101 power_domains->domain_use_count[domain]); 5102 } 5103 } 5104 5105 /** 5106 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 5107 * @i915: i915 device instance 5108 * 5109 * Verify if the reference count of each power well matches its HW enabled 5110 * state and the total refcount of the domains it belongs to. This must be 5111 * called after modeset HW state sanitization, which is responsible for 5112 * acquiring reference counts for any power wells in use and disabling the 5113 * ones left on by BIOS but not required by any active output. 5114 */ 5115 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 5116 { 5117 struct i915_power_domains *power_domains = &i915->power_domains; 5118 struct i915_power_well *power_well; 5119 bool dump_domain_info; 5120 5121 mutex_lock(&power_domains->lock); 5122 5123 verify_async_put_domains_state(power_domains); 5124 5125 dump_domain_info = false; 5126 for_each_power_well(i915, power_well) { 5127 enum intel_display_power_domain domain; 5128 int domains_count; 5129 bool enabled; 5130 5131 enabled = power_well->desc->ops->is_enabled(i915, power_well); 5132 if ((power_well->count || power_well->desc->always_on) != 5133 enabled) 5134 DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)", 5135 power_well->desc->name, 5136 power_well->count, enabled); 5137 5138 domains_count = 0; 5139 for_each_power_domain(domain, power_well->desc->domains) 5140 domains_count += power_domains->domain_use_count[domain]; 5141 5142 if (power_well->count != domains_count) { 5143 DRM_ERROR("power well %s refcount/domain refcount mismatch " 5144 "(refcount %d/domains refcount %d)\n", 5145 power_well->desc->name, power_well->count, 5146 domains_count); 5147 dump_domain_info = true; 5148 } 5149 } 5150 5151 if (dump_domain_info) { 5152 static bool dumped; 5153 5154 if (!dumped) { 5155 intel_power_domains_dump_info(i915); 5156 dumped = true; 5157 } 5158 } 5159 5160 mutex_unlock(&power_domains->lock); 5161 } 5162 5163 #else 5164 5165 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 5166 { 5167 } 5168 5169 #endif 5170 5171 void intel_display_power_suspend_late(struct drm_i915_private *i915) 5172 { 5173 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) 5174 bxt_enable_dc9(i915); 5175 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 5176 hsw_enable_pc8(i915); 5177 } 5178 5179 void intel_display_power_resume_early(struct drm_i915_private *i915) 5180 { 5181 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) { 5182 gen9_sanitize_dc_state(i915); 5183 bxt_disable_dc9(i915); 5184 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5185 hsw_disable_pc8(i915); 5186 } 5187 } 5188 5189 void intel_display_power_suspend(struct drm_i915_private *i915) 5190 { 5191 if (INTEL_GEN(i915) >= 11) { 5192 icl_display_core_uninit(i915); 5193 bxt_enable_dc9(i915); 5194 } else if (IS_GEN9_LP(i915)) { 5195 bxt_display_core_uninit(i915); 5196 bxt_enable_dc9(i915); 5197 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5198 hsw_enable_pc8(i915); 5199 } 5200 } 5201 5202 void intel_display_power_resume(struct drm_i915_private *i915) 5203 { 5204 if (INTEL_GEN(i915) >= 11) { 5205 bxt_disable_dc9(i915); 5206 icl_display_core_init(i915, true); 5207 if (i915->csr.dmc_payload) { 5208 if (i915->csr.allowed_dc_mask & 5209 DC_STATE_EN_UPTO_DC6) 5210 skl_enable_dc6(i915); 5211 else if (i915->csr.allowed_dc_mask & 5212 DC_STATE_EN_UPTO_DC5) 5213 gen9_enable_dc5(i915); 5214 } 5215 } else if (IS_GEN9_LP(i915)) { 5216 bxt_disable_dc9(i915); 5217 bxt_display_core_init(i915, true); 5218 if (i915->csr.dmc_payload && 5219 (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 5220 gen9_enable_dc5(i915); 5221 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5222 hsw_disable_pc8(i915); 5223 } 5224 } 5225