1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_irq.h" 8 #include "intel_backlight_regs.h" 9 #include "intel_combo_phy.h" 10 #include "intel_combo_phy_regs.h" 11 #include "intel_crt.h" 12 #include "intel_de.h" 13 #include "intel_display_power_well.h" 14 #include "intel_display_types.h" 15 #include "intel_dkl_phy.h" 16 #include "intel_dkl_phy_regs.h" 17 #include "intel_dmc.h" 18 #include "intel_dp_aux_regs.h" 19 #include "intel_dpio_phy.h" 20 #include "intel_dpll.h" 21 #include "intel_hotplug.h" 22 #include "intel_pcode.h" 23 #include "intel_pps.h" 24 #include "intel_tc.h" 25 #include "intel_vga.h" 26 #include "skl_watermark.h" 27 #include "vlv_sideband.h" 28 #include "vlv_sideband_reg.h" 29 30 struct i915_power_well_regs { 31 i915_reg_t bios; 32 i915_reg_t driver; 33 i915_reg_t kvmr; 34 i915_reg_t debug; 35 }; 36 37 struct i915_power_well_ops { 38 const struct i915_power_well_regs *regs; 39 /* 40 * Synchronize the well's hw state to match the current sw state, for 41 * example enable/disable it based on the current refcount. Called 42 * during driver init and resume time, possibly after first calling 43 * the enable/disable handlers. 44 */ 45 void (*sync_hw)(struct drm_i915_private *i915, 46 struct i915_power_well *power_well); 47 /* 48 * Enable the well and resources that depend on it (for example 49 * interrupts located on the well). Called after the 0->1 refcount 50 * transition. 51 */ 52 void (*enable)(struct drm_i915_private *i915, 53 struct i915_power_well *power_well); 54 /* 55 * Disable the well and resources that depend on it. Called after 56 * the 1->0 refcount transition. 57 */ 58 void (*disable)(struct drm_i915_private *i915, 59 struct i915_power_well *power_well); 60 /* Returns the hw enabled state. */ 61 bool (*is_enabled)(struct drm_i915_private *i915, 62 struct i915_power_well *power_well); 63 }; 64 65 static const struct i915_power_well_instance * 66 i915_power_well_instance(const struct i915_power_well *power_well) 67 { 68 return &power_well->desc->instances->list[power_well->instance_idx]; 69 } 70 71 struct i915_power_well * 72 lookup_power_well(struct drm_i915_private *i915, 73 enum i915_power_well_id power_well_id) 74 { 75 struct i915_power_well *power_well; 76 77 for_each_power_well(i915, power_well) 78 if (i915_power_well_instance(power_well)->id == power_well_id) 79 return power_well; 80 81 /* 82 * It's not feasible to add error checking code to the callers since 83 * this condition really shouldn't happen and it doesn't even make sense 84 * to abort things like display initialization sequences. Just return 85 * the first power well and hope the WARN gets reported so we can fix 86 * our driver. 87 */ 88 drm_WARN(&i915->drm, 1, 89 "Power well %d not defined for this platform\n", 90 power_well_id); 91 return &i915->display.power.domains.power_wells[0]; 92 } 93 94 void intel_power_well_enable(struct drm_i915_private *i915, 95 struct i915_power_well *power_well) 96 { 97 drm_dbg_kms(&i915->drm, "enabling %s\n", intel_power_well_name(power_well)); 98 power_well->desc->ops->enable(i915, power_well); 99 power_well->hw_enabled = true; 100 } 101 102 void intel_power_well_disable(struct drm_i915_private *i915, 103 struct i915_power_well *power_well) 104 { 105 drm_dbg_kms(&i915->drm, "disabling %s\n", intel_power_well_name(power_well)); 106 power_well->hw_enabled = false; 107 power_well->desc->ops->disable(i915, power_well); 108 } 109 110 void intel_power_well_sync_hw(struct drm_i915_private *i915, 111 struct i915_power_well *power_well) 112 { 113 power_well->desc->ops->sync_hw(i915, power_well); 114 power_well->hw_enabled = 115 power_well->desc->ops->is_enabled(i915, power_well); 116 } 117 118 void intel_power_well_get(struct drm_i915_private *i915, 119 struct i915_power_well *power_well) 120 { 121 if (!power_well->count++) 122 intel_power_well_enable(i915, power_well); 123 } 124 125 void intel_power_well_put(struct drm_i915_private *i915, 126 struct i915_power_well *power_well) 127 { 128 drm_WARN(&i915->drm, !power_well->count, 129 "Use count on power well %s is already zero", 130 i915_power_well_instance(power_well)->name); 131 132 if (!--power_well->count) 133 intel_power_well_disable(i915, power_well); 134 } 135 136 bool intel_power_well_is_enabled(struct drm_i915_private *i915, 137 struct i915_power_well *power_well) 138 { 139 return power_well->desc->ops->is_enabled(i915, power_well); 140 } 141 142 bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well) 143 { 144 return power_well->hw_enabled; 145 } 146 147 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 148 enum i915_power_well_id power_well_id) 149 { 150 struct i915_power_well *power_well; 151 152 power_well = lookup_power_well(dev_priv, power_well_id); 153 154 return intel_power_well_is_enabled(dev_priv, power_well); 155 } 156 157 bool intel_power_well_is_always_on(struct i915_power_well *power_well) 158 { 159 return power_well->desc->always_on; 160 } 161 162 const char *intel_power_well_name(struct i915_power_well *power_well) 163 { 164 return i915_power_well_instance(power_well)->name; 165 } 166 167 struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well) 168 { 169 return &power_well->domains; 170 } 171 172 int intel_power_well_refcount(struct i915_power_well *power_well) 173 { 174 return power_well->count; 175 } 176 177 /* 178 * Starting with Haswell, we have a "Power Down Well" that can be turned off 179 * when not needed anymore. We have 4 registers that can request the power well 180 * to be enabled, and it will only be disabled if none of the registers is 181 * requesting it to be enabled. 182 */ 183 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, 184 u8 irq_pipe_mask, bool has_vga) 185 { 186 if (has_vga) 187 intel_vga_reset_io_mem(dev_priv); 188 189 if (irq_pipe_mask) 190 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); 191 } 192 193 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, 194 u8 irq_pipe_mask) 195 { 196 if (irq_pipe_mask) 197 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); 198 } 199 200 #define ICL_AUX_PW_TO_CH(pw_idx) \ 201 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 202 203 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 204 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 205 206 static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) 207 { 208 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 209 210 return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 211 ICL_AUX_PW_TO_CH(pw_idx); 212 } 213 214 static struct intel_digital_port * 215 aux_ch_to_digital_port(struct drm_i915_private *dev_priv, 216 enum aux_ch aux_ch) 217 { 218 struct intel_digital_port *dig_port = NULL; 219 struct intel_encoder *encoder; 220 221 for_each_intel_encoder(&dev_priv->drm, encoder) { 222 /* We'll check the MST primary port */ 223 if (encoder->type == INTEL_OUTPUT_DP_MST) 224 continue; 225 226 dig_port = enc_to_dig_port(encoder); 227 if (!dig_port) 228 continue; 229 230 if (dig_port->aux_ch != aux_ch) { 231 dig_port = NULL; 232 continue; 233 } 234 235 break; 236 } 237 238 return dig_port; 239 } 240 241 static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915, 242 const struct i915_power_well *power_well) 243 { 244 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 245 struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); 246 247 return intel_port_to_phy(i915, dig_port->base.port); 248 } 249 250 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, 251 struct i915_power_well *power_well, 252 bool timeout_expected) 253 { 254 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 255 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 256 257 /* 258 * For some power wells we're not supposed to watch the status bit for 259 * an ack, but rather just wait a fixed amount of time and then 260 * proceed. This is only used on DG2. 261 */ 262 if (IS_DG2(dev_priv) && power_well->desc->fixed_enable_delay) { 263 usleep_range(600, 1200); 264 return; 265 } 266 267 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 268 if (intel_de_wait_for_set(dev_priv, regs->driver, 269 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { 270 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", 271 intel_power_well_name(power_well)); 272 273 drm_WARN_ON(&dev_priv->drm, !timeout_expected); 274 275 } 276 } 277 278 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, 279 const struct i915_power_well_regs *regs, 280 int pw_idx) 281 { 282 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 283 u32 ret; 284 285 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; 286 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; 287 if (regs->kvmr.reg) 288 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; 289 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; 290 291 return ret; 292 } 293 294 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, 295 struct i915_power_well *power_well) 296 { 297 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 298 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 299 bool disabled; 300 u32 reqs; 301 302 /* 303 * Bspec doesn't require waiting for PWs to get disabled, but still do 304 * this for paranoia. The known cases where a PW will be forced on: 305 * - a KVMR request on any power well via the KVMR request register 306 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 307 * DEBUG request registers 308 * Skip the wait in case any of the request bits are set and print a 309 * diagnostic message. 310 */ 311 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & 312 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 313 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); 314 if (disabled) 315 return; 316 317 drm_dbg_kms(&dev_priv->drm, 318 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 319 intel_power_well_name(power_well), 320 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 321 } 322 323 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, 324 enum skl_power_gate pg) 325 { 326 /* Timeout 5us for PG#0, for other PGs 1us */ 327 drm_WARN_ON(&dev_priv->drm, 328 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, 329 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 330 } 331 332 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 333 struct i915_power_well *power_well) 334 { 335 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 336 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 337 338 if (power_well->desc->has_fuses) { 339 enum skl_power_gate pg; 340 341 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 342 SKL_PW_CTL_IDX_TO_PG(pw_idx); 343 344 /* Wa_16013190616:adlp */ 345 if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1) 346 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC); 347 348 /* 349 * For PW1 we have to wait both for the PW0/PG0 fuse state 350 * before enabling the power well and PW1/PG1's own fuse 351 * state after the enabling. For all other power wells with 352 * fuses we only have to wait for that PW/PG's fuse state 353 * after the enabling. 354 */ 355 if (pg == SKL_PG1) 356 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 357 } 358 359 intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); 360 361 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 362 363 if (power_well->desc->has_fuses) { 364 enum skl_power_gate pg; 365 366 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 367 SKL_PW_CTL_IDX_TO_PG(pw_idx); 368 gen9_wait_for_power_well_fuses(dev_priv, pg); 369 } 370 371 hsw_power_well_post_enable(dev_priv, 372 power_well->desc->irq_pipe_mask, 373 power_well->desc->has_vga); 374 } 375 376 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 377 struct i915_power_well *power_well) 378 { 379 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 380 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 381 382 hsw_power_well_pre_disable(dev_priv, 383 power_well->desc->irq_pipe_mask); 384 385 intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); 386 hsw_wait_for_power_well_disable(dev_priv, power_well); 387 } 388 389 static bool intel_port_is_edp(struct drm_i915_private *i915, enum port port) 390 { 391 struct intel_encoder *encoder; 392 393 for_each_intel_encoder(&i915->drm, encoder) { 394 if (encoder->type == INTEL_OUTPUT_EDP && 395 encoder->port == port) 396 return true; 397 } 398 399 return false; 400 } 401 402 static void 403 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 404 struct i915_power_well *power_well) 405 { 406 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 407 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 408 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 409 410 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 411 412 intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); 413 414 if (DISPLAY_VER(dev_priv) < 12) 415 intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), 416 0, ICL_LANE_ENABLE_AUX); 417 418 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 419 420 /* Display WA #1178: icl */ 421 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && 422 !intel_port_is_edp(dev_priv, (enum port)phy)) 423 intel_de_rmw(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), 424 0, ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS); 425 } 426 427 static void 428 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 429 struct i915_power_well *power_well) 430 { 431 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 432 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 433 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 434 435 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 436 437 intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0); 438 439 intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); 440 441 hsw_wait_for_power_well_disable(dev_priv, power_well); 442 } 443 444 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 445 446 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 447 struct i915_power_well *power_well, 448 struct intel_digital_port *dig_port) 449 { 450 if (drm_WARN_ON(&dev_priv->drm, !dig_port)) 451 return; 452 453 if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) 454 return; 455 456 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); 457 } 458 459 #else 460 461 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 462 struct i915_power_well *power_well, 463 struct intel_digital_port *dig_port) 464 { 465 } 466 467 #endif 468 469 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 470 471 static void icl_tc_cold_exit(struct drm_i915_private *i915) 472 { 473 int ret, tries = 0; 474 475 while (1) { 476 ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0, 477 250, 1); 478 if (ret != -EAGAIN || ++tries == 3) 479 break; 480 msleep(1); 481 } 482 483 /* Spec states that TC cold exit can take up to 1ms to complete */ 484 if (!ret) 485 msleep(1); 486 487 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ 488 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : 489 "succeeded"); 490 } 491 492 static void 493 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 494 struct i915_power_well *power_well) 495 { 496 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 497 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 498 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 499 bool is_tbt = power_well->desc->is_tc_tbt; 500 bool timeout_expected; 501 502 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 503 504 intel_de_rmw(dev_priv, DP_AUX_CH_CTL(aux_ch), 505 DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0); 506 507 intel_de_rmw(dev_priv, regs->driver, 508 0, 509 HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx)); 510 511 /* 512 * An AUX timeout is expected if the TBT DP tunnel is down, 513 * or need to enable AUX on a legacy TypeC port as part of the TC-cold 514 * exit sequence. 515 */ 516 timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); 517 if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) 518 icl_tc_cold_exit(dev_priv); 519 520 hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); 521 522 if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) { 523 enum tc_port tc_port; 524 525 tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx); 526 527 if (wait_for(intel_dkl_phy_read(dev_priv, DKL_CMN_UC_DW_27(tc_port)) & 528 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 529 drm_warn(&dev_priv->drm, 530 "Timeout waiting TC uC health\n"); 531 } 532 } 533 534 static void 535 icl_aux_power_well_enable(struct drm_i915_private *dev_priv, 536 struct i915_power_well *power_well) 537 { 538 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 539 540 if (intel_phy_is_tc(dev_priv, phy)) 541 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); 542 else if (IS_ICELAKE(dev_priv)) 543 return icl_combo_phy_aux_power_well_enable(dev_priv, 544 power_well); 545 else 546 return hsw_power_well_enable(dev_priv, power_well); 547 } 548 549 static void 550 icl_aux_power_well_disable(struct drm_i915_private *dev_priv, 551 struct i915_power_well *power_well) 552 { 553 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 554 555 if (intel_phy_is_tc(dev_priv, phy)) 556 return hsw_power_well_disable(dev_priv, power_well); 557 else if (IS_ICELAKE(dev_priv)) 558 return icl_combo_phy_aux_power_well_disable(dev_priv, 559 power_well); 560 else 561 return hsw_power_well_disable(dev_priv, power_well); 562 } 563 564 /* 565 * We should only use the power well if we explicitly asked the hardware to 566 * enable it, so check if it's enabled and also check if we've requested it to 567 * be enabled. 568 */ 569 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 570 struct i915_power_well *power_well) 571 { 572 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 573 enum i915_power_well_id id = i915_power_well_instance(power_well)->id; 574 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 575 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 576 HSW_PWR_WELL_CTL_STATE(pw_idx); 577 u32 val; 578 579 val = intel_de_read(dev_priv, regs->driver); 580 581 /* 582 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 583 * and the MISC_IO PW will be not restored, so check instead for the 584 * BIOS's own request bits, which are forced-on for these power wells 585 * when exiting DC5/6. 586 */ 587 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) && 588 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 589 val |= intel_de_read(dev_priv, regs->bios); 590 591 return (val & mask) == mask; 592 } 593 594 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 595 { 596 drm_WARN_ONCE(&dev_priv->drm, 597 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), 598 "DC9 already programmed to be enabled.\n"); 599 drm_WARN_ONCE(&dev_priv->drm, 600 intel_de_read(dev_priv, DC_STATE_EN) & 601 DC_STATE_EN_UPTO_DC5, 602 "DC5 still not disabled to enable DC9.\n"); 603 drm_WARN_ONCE(&dev_priv->drm, 604 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & 605 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 606 "Power well 2 on.\n"); 607 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 608 "Interrupts not disabled yet.\n"); 609 610 /* 611 * TODO: check for the following to verify the conditions to enter DC9 612 * state are satisfied: 613 * 1] Check relevant display engine registers to verify if mode set 614 * disable sequence was followed. 615 * 2] Check if display uninitialize sequence is initialized. 616 */ 617 } 618 619 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 620 { 621 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 622 "Interrupts not disabled yet.\n"); 623 drm_WARN_ONCE(&dev_priv->drm, 624 intel_de_read(dev_priv, DC_STATE_EN) & 625 DC_STATE_EN_UPTO_DC5, 626 "DC5 still not disabled.\n"); 627 628 /* 629 * TODO: check for the following to verify DC9 state was indeed 630 * entered before programming to disable it: 631 * 1] Check relevant display engine registers to verify if mode 632 * set disable sequence was followed. 633 * 2] Check if display uninitialize sequence is initialized. 634 */ 635 } 636 637 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 638 u32 state) 639 { 640 int rewrites = 0; 641 int rereads = 0; 642 u32 v; 643 644 intel_de_write(dev_priv, DC_STATE_EN, state); 645 646 /* It has been observed that disabling the dc6 state sometimes 647 * doesn't stick and dmc keeps returning old value. Make sure 648 * the write really sticks enough times and also force rewrite until 649 * we are confident that state is exactly what we want. 650 */ 651 do { 652 v = intel_de_read(dev_priv, DC_STATE_EN); 653 654 if (v != state) { 655 intel_de_write(dev_priv, DC_STATE_EN, state); 656 rewrites++; 657 rereads = 0; 658 } else if (rereads++ > 5) { 659 break; 660 } 661 662 } while (rewrites < 100); 663 664 if (v != state) 665 drm_err(&dev_priv->drm, 666 "Writing dc state to 0x%x failed, now 0x%x\n", 667 state, v); 668 669 /* Most of the times we need one retry, avoid spam */ 670 if (rewrites > 1) 671 drm_dbg_kms(&dev_priv->drm, 672 "Rewrote dc state to 0x%x %d times\n", 673 state, rewrites); 674 } 675 676 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 677 { 678 u32 mask; 679 680 mask = DC_STATE_EN_UPTO_DC5; 681 682 if (DISPLAY_VER(dev_priv) >= 12) 683 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 684 | DC_STATE_EN_DC9; 685 else if (DISPLAY_VER(dev_priv) == 11) 686 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 687 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 688 mask |= DC_STATE_EN_DC9; 689 else 690 mask |= DC_STATE_EN_UPTO_DC6; 691 692 return mask; 693 } 694 695 void gen9_sanitize_dc_state(struct drm_i915_private *i915) 696 { 697 struct i915_power_domains *power_domains = &i915->display.power.domains; 698 u32 val; 699 700 if (!HAS_DISPLAY(i915)) 701 return; 702 703 val = intel_de_read(i915, DC_STATE_EN) & gen9_dc_mask(i915); 704 705 drm_dbg_kms(&i915->drm, 706 "Resetting DC state tracking from %02x to %02x\n", 707 power_domains->dc_state, val); 708 power_domains->dc_state = val; 709 } 710 711 /** 712 * gen9_set_dc_state - set target display C power state 713 * @dev_priv: i915 device instance 714 * @state: target DC power state 715 * - DC_STATE_DISABLE 716 * - DC_STATE_EN_UPTO_DC5 717 * - DC_STATE_EN_UPTO_DC6 718 * - DC_STATE_EN_DC9 719 * 720 * Signal to DMC firmware/HW the target DC power state passed in @state. 721 * DMC/HW can turn off individual display clocks and power rails when entering 722 * a deeper DC power state (higher in number) and turns these back when exiting 723 * that state to a shallower power state (lower in number). The HW will decide 724 * when to actually enter a given state on an on-demand basis, for instance 725 * depending on the active state of display pipes. The state of display 726 * registers backed by affected power rails are saved/restored as needed. 727 * 728 * Based on the above enabling a deeper DC power state is asynchronous wrt. 729 * enabling it. Disabling a deeper power state is synchronous: for instance 730 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 731 * back on and register state is restored. This is guaranteed by the MMIO write 732 * to DC_STATE_EN blocking until the state is restored. 733 */ 734 void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) 735 { 736 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 737 u32 val; 738 u32 mask; 739 740 if (!HAS_DISPLAY(dev_priv)) 741 return; 742 743 if (drm_WARN_ON_ONCE(&dev_priv->drm, 744 state & ~power_domains->allowed_dc_mask)) 745 state &= power_domains->allowed_dc_mask; 746 747 val = intel_de_read(dev_priv, DC_STATE_EN); 748 mask = gen9_dc_mask(dev_priv); 749 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", 750 val & mask, state); 751 752 /* Check if DMC is ignoring our DC state requests */ 753 if ((val & mask) != power_domains->dc_state) 754 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", 755 power_domains->dc_state, val & mask); 756 757 val &= ~mask; 758 val |= state; 759 760 gen9_write_dc_state(dev_priv, val); 761 762 power_domains->dc_state = val & mask; 763 } 764 765 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) 766 { 767 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); 768 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); 769 } 770 771 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) 772 { 773 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); 774 intel_de_rmw(dev_priv, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0); 775 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 776 /* 777 * Delay of 200us DC3CO Exit time B.Spec 49196 778 */ 779 usleep_range(200, 210); 780 } 781 782 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 783 { 784 enum i915_power_well_id high_pg; 785 786 /* Power wells at this level and above must be disabled for DC5 entry */ 787 if (DISPLAY_VER(dev_priv) == 12) 788 high_pg = ICL_DISP_PW_3; 789 else 790 high_pg = SKL_DISP_PW_2; 791 792 drm_WARN_ONCE(&dev_priv->drm, 793 intel_display_power_well_is_enabled(dev_priv, high_pg), 794 "Power wells above platform's DC5 limit still enabled.\n"); 795 796 drm_WARN_ONCE(&dev_priv->drm, 797 (intel_de_read(dev_priv, DC_STATE_EN) & 798 DC_STATE_EN_UPTO_DC5), 799 "DC5 already programmed to be enabled.\n"); 800 assert_rpm_wakelock_held(&dev_priv->runtime_pm); 801 802 assert_dmc_loaded(dev_priv); 803 } 804 805 void gen9_enable_dc5(struct drm_i915_private *dev_priv) 806 { 807 assert_can_enable_dc5(dev_priv); 808 809 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); 810 811 /* Wa Display #1183: skl,kbl,cfl */ 812 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 813 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 814 0, SKL_SELECT_ALTERNATE_DC_EXIT); 815 816 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 817 } 818 819 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 820 { 821 drm_WARN_ONCE(&dev_priv->drm, 822 (intel_de_read(dev_priv, UTIL_PIN_CTL) & 823 (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == 824 (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), 825 "Utility pin enabled in PWM mode\n"); 826 drm_WARN_ONCE(&dev_priv->drm, 827 (intel_de_read(dev_priv, DC_STATE_EN) & 828 DC_STATE_EN_UPTO_DC6), 829 "DC6 already programmed to be enabled.\n"); 830 831 assert_dmc_loaded(dev_priv); 832 } 833 834 void skl_enable_dc6(struct drm_i915_private *dev_priv) 835 { 836 assert_can_enable_dc6(dev_priv); 837 838 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); 839 840 /* Wa Display #1183: skl,kbl,cfl */ 841 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 842 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 843 0, SKL_SELECT_ALTERNATE_DC_EXIT); 844 845 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 846 } 847 848 void bxt_enable_dc9(struct drm_i915_private *dev_priv) 849 { 850 assert_can_enable_dc9(dev_priv); 851 852 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); 853 /* 854 * Power sequencer reset is not needed on 855 * platforms with South Display Engine on PCH, 856 * because PPS registers are always on. 857 */ 858 if (!HAS_PCH_SPLIT(dev_priv)) 859 intel_pps_reset_all(dev_priv); 860 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 861 } 862 863 void bxt_disable_dc9(struct drm_i915_private *dev_priv) 864 { 865 assert_can_disable_dc9(dev_priv); 866 867 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); 868 869 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 870 871 intel_pps_unlock_regs_wa(dev_priv); 872 } 873 874 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 875 struct i915_power_well *power_well) 876 { 877 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 878 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 879 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 880 u32 bios_req = intel_de_read(dev_priv, regs->bios); 881 882 /* Take over the request bit if set by BIOS. */ 883 if (bios_req & mask) { 884 u32 drv_req = intel_de_read(dev_priv, regs->driver); 885 886 if (!(drv_req & mask)) 887 intel_de_write(dev_priv, regs->driver, drv_req | mask); 888 intel_de_write(dev_priv, regs->bios, bios_req & ~mask); 889 } 890 } 891 892 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 893 struct i915_power_well *power_well) 894 { 895 bxt_ddi_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy); 896 } 897 898 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 899 struct i915_power_well *power_well) 900 { 901 bxt_ddi_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy); 902 } 903 904 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 905 struct i915_power_well *power_well) 906 { 907 return bxt_ddi_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy); 908 } 909 910 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 911 { 912 struct i915_power_well *power_well; 913 914 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); 915 if (intel_power_well_refcount(power_well) > 0) 916 bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); 917 918 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 919 if (intel_power_well_refcount(power_well) > 0) 920 bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); 921 922 if (IS_GEMINILAKE(dev_priv)) { 923 power_well = lookup_power_well(dev_priv, 924 GLK_DISP_PW_DPIO_CMN_C); 925 if (intel_power_well_refcount(power_well) > 0) 926 bxt_ddi_phy_verify_state(dev_priv, 927 i915_power_well_instance(power_well)->bxt.phy); 928 } 929 } 930 931 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 932 struct i915_power_well *power_well) 933 { 934 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && 935 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); 936 } 937 938 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 939 { 940 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); 941 u8 enabled_dbuf_slices = dev_priv->display.dbuf.enabled_slices; 942 943 drm_WARN(&dev_priv->drm, 944 hw_enabled_dbuf_slices != enabled_dbuf_slices, 945 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", 946 hw_enabled_dbuf_slices, 947 enabled_dbuf_slices); 948 } 949 950 void gen9_disable_dc_states(struct drm_i915_private *dev_priv) 951 { 952 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 953 struct intel_cdclk_config cdclk_config = {}; 954 955 if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) { 956 tgl_disable_dc3co(dev_priv); 957 return; 958 } 959 960 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 961 962 if (!HAS_DISPLAY(dev_priv)) 963 return; 964 965 intel_cdclk_get_cdclk(dev_priv, &cdclk_config); 966 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 967 drm_WARN_ON(&dev_priv->drm, 968 intel_cdclk_needs_modeset(&dev_priv->display.cdclk.hw, 969 &cdclk_config)); 970 971 gen9_assert_dbuf_enabled(dev_priv); 972 973 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 974 bxt_verify_ddi_phy_power_wells(dev_priv); 975 976 if (DISPLAY_VER(dev_priv) >= 11) 977 /* 978 * DMC retains HW context only for port A, the other combo 979 * PHY's HW context for port B is lost after DC transitions, 980 * so we need to restore it manually. 981 */ 982 intel_combo_phy_init(dev_priv); 983 } 984 985 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 986 struct i915_power_well *power_well) 987 { 988 gen9_disable_dc_states(dev_priv); 989 } 990 991 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 992 struct i915_power_well *power_well) 993 { 994 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 995 996 if (!intel_dmc_has_payload(dev_priv)) 997 return; 998 999 switch (power_domains->target_dc_state) { 1000 case DC_STATE_EN_DC3CO: 1001 tgl_enable_dc3co(dev_priv); 1002 break; 1003 case DC_STATE_EN_UPTO_DC6: 1004 skl_enable_dc6(dev_priv); 1005 break; 1006 case DC_STATE_EN_UPTO_DC5: 1007 gen9_enable_dc5(dev_priv); 1008 break; 1009 } 1010 } 1011 1012 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, 1013 struct i915_power_well *power_well) 1014 { 1015 } 1016 1017 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 1018 struct i915_power_well *power_well) 1019 { 1020 } 1021 1022 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 1023 struct i915_power_well *power_well) 1024 { 1025 return true; 1026 } 1027 1028 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 1029 struct i915_power_well *power_well) 1030 { 1031 if ((intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE) == 0) 1032 i830_enable_pipe(dev_priv, PIPE_A); 1033 if ((intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE) == 0) 1034 i830_enable_pipe(dev_priv, PIPE_B); 1035 } 1036 1037 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, 1038 struct i915_power_well *power_well) 1039 { 1040 i830_disable_pipe(dev_priv, PIPE_B); 1041 i830_disable_pipe(dev_priv, PIPE_A); 1042 } 1043 1044 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 1045 struct i915_power_well *power_well) 1046 { 1047 return intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE && 1048 intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE; 1049 } 1050 1051 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, 1052 struct i915_power_well *power_well) 1053 { 1054 if (intel_power_well_refcount(power_well) > 0) 1055 i830_pipes_power_well_enable(dev_priv, power_well); 1056 else 1057 i830_pipes_power_well_disable(dev_priv, power_well); 1058 } 1059 1060 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 1061 struct i915_power_well *power_well, bool enable) 1062 { 1063 int pw_idx = i915_power_well_instance(power_well)->vlv.idx; 1064 u32 mask; 1065 u32 state; 1066 u32 ctrl; 1067 1068 mask = PUNIT_PWRGT_MASK(pw_idx); 1069 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1070 PUNIT_PWRGT_PWR_GATE(pw_idx); 1071 1072 vlv_punit_get(dev_priv); 1073 1074 #define COND \ 1075 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1076 1077 if (COND) 1078 goto out; 1079 1080 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 1081 ctrl &= ~mask; 1082 ctrl |= state; 1083 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1084 1085 if (wait_for(COND, 100)) 1086 drm_err(&dev_priv->drm, 1087 "timeout setting power well state %08x (%08x)\n", 1088 state, 1089 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1090 1091 #undef COND 1092 1093 out: 1094 vlv_punit_put(dev_priv); 1095 } 1096 1097 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1098 struct i915_power_well *power_well) 1099 { 1100 vlv_set_power_well(dev_priv, power_well, true); 1101 } 1102 1103 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1104 struct i915_power_well *power_well) 1105 { 1106 vlv_set_power_well(dev_priv, power_well, false); 1107 } 1108 1109 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1110 struct i915_power_well *power_well) 1111 { 1112 int pw_idx = i915_power_well_instance(power_well)->vlv.idx; 1113 bool enabled = false; 1114 u32 mask; 1115 u32 state; 1116 u32 ctrl; 1117 1118 mask = PUNIT_PWRGT_MASK(pw_idx); 1119 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1120 1121 vlv_punit_get(dev_priv); 1122 1123 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1124 /* 1125 * We only ever set the power-on and power-gate states, anything 1126 * else is unexpected. 1127 */ 1128 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1129 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1130 if (state == ctrl) 1131 enabled = true; 1132 1133 /* 1134 * A transient state at this point would mean some unexpected party 1135 * is poking at the power controls too. 1136 */ 1137 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1138 drm_WARN_ON(&dev_priv->drm, ctrl != state); 1139 1140 vlv_punit_put(dev_priv); 1141 1142 return enabled; 1143 } 1144 1145 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1146 { 1147 /* 1148 * On driver load, a pipe may be active and driving a DSI display. 1149 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1150 * (and never recovering) in this case. intel_dsi_post_disable() will 1151 * clear it when we turn off the display. 1152 */ 1153 intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv), 1154 ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE); 1155 1156 /* 1157 * Disable trickle feed and enable pnd deadline calculation 1158 */ 1159 intel_de_write(dev_priv, MI_ARB_VLV, 1160 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1161 intel_de_write(dev_priv, CBR1_VLV, 0); 1162 1163 drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); 1164 intel_de_write(dev_priv, RAWCLK_FREQ_VLV, 1165 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 1166 1000)); 1167 } 1168 1169 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1170 { 1171 struct intel_encoder *encoder; 1172 enum pipe pipe; 1173 1174 /* 1175 * Enable the CRI clock source so we can get at the 1176 * display and the reference clock for VGA 1177 * hotplug / manual detection. Supposedly DSI also 1178 * needs the ref clock up and running. 1179 * 1180 * CHV DPLL B/C have some issues if VGA mode is enabled. 1181 */ 1182 for_each_pipe(dev_priv, pipe) { 1183 u32 val = intel_de_read(dev_priv, DPLL(pipe)); 1184 1185 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1186 if (pipe != PIPE_A) 1187 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1188 1189 intel_de_write(dev_priv, DPLL(pipe), val); 1190 } 1191 1192 vlv_init_display_clock_gating(dev_priv); 1193 1194 spin_lock_irq(&dev_priv->irq_lock); 1195 valleyview_enable_display_irqs(dev_priv); 1196 spin_unlock_irq(&dev_priv->irq_lock); 1197 1198 /* 1199 * During driver initialization/resume we can avoid restoring the 1200 * part of the HW/SW state that will be inited anyway explicitly. 1201 */ 1202 if (dev_priv->display.power.domains.initializing) 1203 return; 1204 1205 intel_hpd_init(dev_priv); 1206 intel_hpd_poll_disable(dev_priv); 1207 1208 /* Re-enable the ADPA, if we have one */ 1209 for_each_intel_encoder(&dev_priv->drm, encoder) { 1210 if (encoder->type == INTEL_OUTPUT_ANALOG) 1211 intel_crt_reset(&encoder->base); 1212 } 1213 1214 intel_vga_redisable_power_on(dev_priv); 1215 1216 intel_pps_unlock_regs_wa(dev_priv); 1217 } 1218 1219 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1220 { 1221 spin_lock_irq(&dev_priv->irq_lock); 1222 valleyview_disable_display_irqs(dev_priv); 1223 spin_unlock_irq(&dev_priv->irq_lock); 1224 1225 /* make sure we're done processing display irqs */ 1226 intel_synchronize_irq(dev_priv); 1227 1228 intel_pps_reset_all(dev_priv); 1229 1230 /* Prevent us from re-enabling polling on accident in late suspend */ 1231 if (!dev_priv->drm.dev->power.is_suspended) 1232 intel_hpd_poll_enable(dev_priv); 1233 } 1234 1235 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1236 struct i915_power_well *power_well) 1237 { 1238 vlv_set_power_well(dev_priv, power_well, true); 1239 1240 vlv_display_power_well_init(dev_priv); 1241 } 1242 1243 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1244 struct i915_power_well *power_well) 1245 { 1246 vlv_display_power_well_deinit(dev_priv); 1247 1248 vlv_set_power_well(dev_priv, power_well, false); 1249 } 1250 1251 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1252 struct i915_power_well *power_well) 1253 { 1254 /* since ref/cri clock was enabled */ 1255 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1256 1257 vlv_set_power_well(dev_priv, power_well, true); 1258 1259 /* 1260 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1261 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1262 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1263 * b. The other bits such as sfr settings / modesel may all 1264 * be set to 0. 1265 * 1266 * This should only be done on init and resume from S3 with 1267 * both PLLs disabled, or we risk losing DPIO and PLL 1268 * synchronization. 1269 */ 1270 intel_de_rmw(dev_priv, DPIO_CTL, 0, DPIO_CMNRST); 1271 } 1272 1273 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1274 struct i915_power_well *power_well) 1275 { 1276 enum pipe pipe; 1277 1278 for_each_pipe(dev_priv, pipe) 1279 assert_pll_disabled(dev_priv, pipe); 1280 1281 /* Assert common reset */ 1282 intel_de_rmw(dev_priv, DPIO_CTL, DPIO_CMNRST, 0); 1283 1284 vlv_set_power_well(dev_priv, power_well, false); 1285 } 1286 1287 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1288 1289 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1290 { 1291 struct i915_power_well *cmn_bc = 1292 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1293 struct i915_power_well *cmn_d = 1294 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1295 u32 phy_control = dev_priv->display.power.chv_phy_control; 1296 u32 phy_status = 0; 1297 u32 phy_status_mask = 0xffffffff; 1298 1299 /* 1300 * The BIOS can leave the PHY is some weird state 1301 * where it doesn't fully power down some parts. 1302 * Disable the asserts until the PHY has been fully 1303 * reset (ie. the power well has been disabled at 1304 * least once). 1305 */ 1306 if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY0]) 1307 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1308 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1309 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1310 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1311 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1312 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1313 1314 if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY1]) 1315 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1316 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1317 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1318 1319 if (intel_power_well_is_enabled(dev_priv, cmn_bc)) { 1320 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1321 1322 /* this assumes override is only used to enable lanes */ 1323 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1324 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1325 1326 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1327 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1328 1329 /* CL1 is on whenever anything is on in either channel */ 1330 if (BITS_SET(phy_control, 1331 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1332 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1333 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1334 1335 /* 1336 * The DPLLB check accounts for the pipe B + port A usage 1337 * with CL2 powered up but all the lanes in the second channel 1338 * powered down. 1339 */ 1340 if (BITS_SET(phy_control, 1341 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1342 (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1343 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1344 1345 if (BITS_SET(phy_control, 1346 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1347 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1348 if (BITS_SET(phy_control, 1349 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1350 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1351 1352 if (BITS_SET(phy_control, 1353 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1354 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1355 if (BITS_SET(phy_control, 1356 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1357 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1358 } 1359 1360 if (intel_power_well_is_enabled(dev_priv, cmn_d)) { 1361 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1362 1363 /* this assumes override is only used to enable lanes */ 1364 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1365 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1366 1367 if (BITS_SET(phy_control, 1368 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1369 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1370 1371 if (BITS_SET(phy_control, 1372 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1373 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1374 if (BITS_SET(phy_control, 1375 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1376 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1377 } 1378 1379 phy_status &= phy_status_mask; 1380 1381 /* 1382 * The PHY may be busy with some initial calibration and whatnot, 1383 * so the power state can take a while to actually change. 1384 */ 1385 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, 1386 phy_status_mask, phy_status, 10)) 1387 drm_err(&dev_priv->drm, 1388 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1389 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, 1390 phy_status, dev_priv->display.power.chv_phy_control); 1391 } 1392 1393 #undef BITS_SET 1394 1395 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1396 struct i915_power_well *power_well) 1397 { 1398 enum i915_power_well_id id = i915_power_well_instance(power_well)->id; 1399 enum dpio_phy phy; 1400 enum pipe pipe; 1401 u32 tmp; 1402 1403 drm_WARN_ON_ONCE(&dev_priv->drm, 1404 id != VLV_DISP_PW_DPIO_CMN_BC && 1405 id != CHV_DISP_PW_DPIO_CMN_D); 1406 1407 if (id == VLV_DISP_PW_DPIO_CMN_BC) { 1408 pipe = PIPE_A; 1409 phy = DPIO_PHY0; 1410 } else { 1411 pipe = PIPE_C; 1412 phy = DPIO_PHY1; 1413 } 1414 1415 /* since ref/cri clock was enabled */ 1416 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1417 vlv_set_power_well(dev_priv, power_well, true); 1418 1419 /* Poll for phypwrgood signal */ 1420 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, 1421 PHY_POWERGOOD(phy), 1)) 1422 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", 1423 phy); 1424 1425 vlv_dpio_get(dev_priv); 1426 1427 /* Enable dynamic power down */ 1428 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1429 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1430 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1431 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1432 1433 if (id == VLV_DISP_PW_DPIO_CMN_BC) { 1434 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1435 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1436 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1437 } else { 1438 /* 1439 * Force the non-existing CL2 off. BXT does this 1440 * too, so maybe it saves some power even though 1441 * CL2 doesn't exist? 1442 */ 1443 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1444 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1445 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1446 } 1447 1448 vlv_dpio_put(dev_priv); 1449 1450 dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1451 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1452 dev_priv->display.power.chv_phy_control); 1453 1454 drm_dbg_kms(&dev_priv->drm, 1455 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1456 phy, dev_priv->display.power.chv_phy_control); 1457 1458 assert_chv_phy_status(dev_priv); 1459 } 1460 1461 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1462 struct i915_power_well *power_well) 1463 { 1464 enum i915_power_well_id id = i915_power_well_instance(power_well)->id; 1465 enum dpio_phy phy; 1466 1467 drm_WARN_ON_ONCE(&dev_priv->drm, 1468 id != VLV_DISP_PW_DPIO_CMN_BC && 1469 id != CHV_DISP_PW_DPIO_CMN_D); 1470 1471 if (id == VLV_DISP_PW_DPIO_CMN_BC) { 1472 phy = DPIO_PHY0; 1473 assert_pll_disabled(dev_priv, PIPE_A); 1474 assert_pll_disabled(dev_priv, PIPE_B); 1475 } else { 1476 phy = DPIO_PHY1; 1477 assert_pll_disabled(dev_priv, PIPE_C); 1478 } 1479 1480 dev_priv->display.power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1481 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1482 dev_priv->display.power.chv_phy_control); 1483 1484 vlv_set_power_well(dev_priv, power_well, false); 1485 1486 drm_dbg_kms(&dev_priv->drm, 1487 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1488 phy, dev_priv->display.power.chv_phy_control); 1489 1490 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1491 dev_priv->display.power.chv_phy_assert[phy] = true; 1492 1493 assert_chv_phy_status(dev_priv); 1494 } 1495 1496 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1497 enum dpio_channel ch, bool override, unsigned int mask) 1498 { 1499 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1500 u32 reg, val, expected, actual; 1501 1502 /* 1503 * The BIOS can leave the PHY is some weird state 1504 * where it doesn't fully power down some parts. 1505 * Disable the asserts until the PHY has been fully 1506 * reset (ie. the power well has been disabled at 1507 * least once). 1508 */ 1509 if (!dev_priv->display.power.chv_phy_assert[phy]) 1510 return; 1511 1512 if (ch == DPIO_CH0) 1513 reg = _CHV_CMN_DW0_CH0; 1514 else 1515 reg = _CHV_CMN_DW6_CH1; 1516 1517 vlv_dpio_get(dev_priv); 1518 val = vlv_dpio_read(dev_priv, pipe, reg); 1519 vlv_dpio_put(dev_priv); 1520 1521 /* 1522 * This assumes !override is only used when the port is disabled. 1523 * All lanes should power down even without the override when 1524 * the port is disabled. 1525 */ 1526 if (!override || mask == 0xf) { 1527 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1528 /* 1529 * If CH1 common lane is not active anymore 1530 * (eg. for pipe B DPLL) the entire channel will 1531 * shut down, which causes the common lane registers 1532 * to read as 0. That means we can't actually check 1533 * the lane power down status bits, but as the entire 1534 * register reads as 0 it's a good indication that the 1535 * channel is indeed entirely powered down. 1536 */ 1537 if (ch == DPIO_CH1 && val == 0) 1538 expected = 0; 1539 } else if (mask != 0x0) { 1540 expected = DPIO_ANYDL_POWERDOWN; 1541 } else { 1542 expected = 0; 1543 } 1544 1545 if (ch == DPIO_CH0) 1546 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1547 else 1548 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1549 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1550 1551 drm_WARN(&dev_priv->drm, actual != expected, 1552 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1553 !!(actual & DPIO_ALLDL_POWERDOWN), 1554 !!(actual & DPIO_ANYDL_POWERDOWN), 1555 !!(expected & DPIO_ALLDL_POWERDOWN), 1556 !!(expected & DPIO_ANYDL_POWERDOWN), 1557 reg, val); 1558 } 1559 1560 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1561 enum dpio_channel ch, bool override) 1562 { 1563 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1564 bool was_override; 1565 1566 mutex_lock(&power_domains->lock); 1567 1568 was_override = dev_priv->display.power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1569 1570 if (override == was_override) 1571 goto out; 1572 1573 if (override) 1574 dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1575 else 1576 dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1577 1578 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1579 dev_priv->display.power.chv_phy_control); 1580 1581 drm_dbg_kms(&dev_priv->drm, 1582 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1583 phy, ch, dev_priv->display.power.chv_phy_control); 1584 1585 assert_chv_phy_status(dev_priv); 1586 1587 out: 1588 mutex_unlock(&power_domains->lock); 1589 1590 return was_override; 1591 } 1592 1593 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1594 bool override, unsigned int mask) 1595 { 1596 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1597 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1598 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); 1599 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); 1600 1601 mutex_lock(&power_domains->lock); 1602 1603 dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1604 dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1605 1606 if (override) 1607 dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1608 else 1609 dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1610 1611 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1612 dev_priv->display.power.chv_phy_control); 1613 1614 drm_dbg_kms(&dev_priv->drm, 1615 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1616 phy, ch, mask, dev_priv->display.power.chv_phy_control); 1617 1618 assert_chv_phy_status(dev_priv); 1619 1620 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1621 1622 mutex_unlock(&power_domains->lock); 1623 } 1624 1625 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1626 struct i915_power_well *power_well) 1627 { 1628 enum pipe pipe = PIPE_A; 1629 bool enabled; 1630 u32 state, ctrl; 1631 1632 vlv_punit_get(dev_priv); 1633 1634 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1635 /* 1636 * We only ever set the power-on and power-gate states, anything 1637 * else is unexpected. 1638 */ 1639 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && 1640 state != DP_SSS_PWR_GATE(pipe)); 1641 enabled = state == DP_SSS_PWR_ON(pipe); 1642 1643 /* 1644 * A transient state at this point would mean some unexpected party 1645 * is poking at the power controls too. 1646 */ 1647 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1648 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); 1649 1650 vlv_punit_put(dev_priv); 1651 1652 return enabled; 1653 } 1654 1655 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1656 struct i915_power_well *power_well, 1657 bool enable) 1658 { 1659 enum pipe pipe = PIPE_A; 1660 u32 state; 1661 u32 ctrl; 1662 1663 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1664 1665 vlv_punit_get(dev_priv); 1666 1667 #define COND \ 1668 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 1669 1670 if (COND) 1671 goto out; 1672 1673 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 1674 ctrl &= ~DP_SSC_MASK(pipe); 1675 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1676 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); 1677 1678 if (wait_for(COND, 100)) 1679 drm_err(&dev_priv->drm, 1680 "timeout setting power well state %08x (%08x)\n", 1681 state, 1682 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); 1683 1684 #undef COND 1685 1686 out: 1687 vlv_punit_put(dev_priv); 1688 } 1689 1690 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 1691 struct i915_power_well *power_well) 1692 { 1693 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1694 dev_priv->display.power.chv_phy_control); 1695 } 1696 1697 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1698 struct i915_power_well *power_well) 1699 { 1700 chv_set_pipe_power_well(dev_priv, power_well, true); 1701 1702 vlv_display_power_well_init(dev_priv); 1703 } 1704 1705 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1706 struct i915_power_well *power_well) 1707 { 1708 vlv_display_power_well_deinit(dev_priv); 1709 1710 chv_set_pipe_power_well(dev_priv, power_well, false); 1711 } 1712 1713 static void 1714 tgl_tc_cold_request(struct drm_i915_private *i915, bool block) 1715 { 1716 u8 tries = 0; 1717 int ret; 1718 1719 while (1) { 1720 u32 low_val; 1721 u32 high_val = 0; 1722 1723 if (block) 1724 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; 1725 else 1726 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; 1727 1728 /* 1729 * Spec states that we should timeout the request after 200us 1730 * but the function below will timeout after 500us 1731 */ 1732 ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val); 1733 if (ret == 0) { 1734 if (block && 1735 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) 1736 ret = -EIO; 1737 else 1738 break; 1739 } 1740 1741 if (++tries == 3) 1742 break; 1743 1744 msleep(1); 1745 } 1746 1747 if (ret) 1748 drm_err(&i915->drm, "TC cold %sblock failed\n", 1749 block ? "" : "un"); 1750 else 1751 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", 1752 block ? "" : "un"); 1753 } 1754 1755 static void 1756 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, 1757 struct i915_power_well *power_well) 1758 { 1759 tgl_tc_cold_request(i915, true); 1760 } 1761 1762 static void 1763 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, 1764 struct i915_power_well *power_well) 1765 { 1766 tgl_tc_cold_request(i915, false); 1767 } 1768 1769 static void 1770 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, 1771 struct i915_power_well *power_well) 1772 { 1773 if (intel_power_well_refcount(power_well) > 0) 1774 tgl_tc_cold_off_power_well_enable(i915, power_well); 1775 else 1776 tgl_tc_cold_off_power_well_disable(i915, power_well); 1777 } 1778 1779 static bool 1780 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, 1781 struct i915_power_well *power_well) 1782 { 1783 /* 1784 * Not the correctly implementation but there is no way to just read it 1785 * from PCODE, so returning count to avoid state mismatch errors 1786 */ 1787 return intel_power_well_refcount(power_well); 1788 } 1789 1790 static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv, 1791 struct i915_power_well *power_well) 1792 { 1793 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; 1794 1795 intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch), 1796 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST, 1797 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST); 1798 1799 /* 1800 * The power status flag cannot be used to determine whether aux 1801 * power wells have finished powering up. Instead we're 1802 * expected to just wait a fixed 600us after raising the request 1803 * bit. 1804 */ 1805 usleep_range(600, 1200); 1806 } 1807 1808 static void xelpdp_aux_power_well_disable(struct drm_i915_private *dev_priv, 1809 struct i915_power_well *power_well) 1810 { 1811 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; 1812 1813 intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch), 1814 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST, 1815 0); 1816 usleep_range(10, 30); 1817 } 1818 1819 static bool xelpdp_aux_power_well_enabled(struct drm_i915_private *dev_priv, 1820 struct i915_power_well *power_well) 1821 { 1822 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; 1823 1824 return intel_de_read(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch)) & 1825 XELPDP_DP_AUX_CH_CTL_POWER_STATUS; 1826 } 1827 1828 const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 1829 .sync_hw = i9xx_power_well_sync_hw_noop, 1830 .enable = i9xx_always_on_power_well_noop, 1831 .disable = i9xx_always_on_power_well_noop, 1832 .is_enabled = i9xx_always_on_power_well_enabled, 1833 }; 1834 1835 const struct i915_power_well_ops chv_pipe_power_well_ops = { 1836 .sync_hw = chv_pipe_power_well_sync_hw, 1837 .enable = chv_pipe_power_well_enable, 1838 .disable = chv_pipe_power_well_disable, 1839 .is_enabled = chv_pipe_power_well_enabled, 1840 }; 1841 1842 const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 1843 .sync_hw = i9xx_power_well_sync_hw_noop, 1844 .enable = chv_dpio_cmn_power_well_enable, 1845 .disable = chv_dpio_cmn_power_well_disable, 1846 .is_enabled = vlv_power_well_enabled, 1847 }; 1848 1849 const struct i915_power_well_ops i830_pipes_power_well_ops = { 1850 .sync_hw = i830_pipes_power_well_sync_hw, 1851 .enable = i830_pipes_power_well_enable, 1852 .disable = i830_pipes_power_well_disable, 1853 .is_enabled = i830_pipes_power_well_enabled, 1854 }; 1855 1856 static const struct i915_power_well_regs hsw_power_well_regs = { 1857 .bios = HSW_PWR_WELL_CTL1, 1858 .driver = HSW_PWR_WELL_CTL2, 1859 .kvmr = HSW_PWR_WELL_CTL3, 1860 .debug = HSW_PWR_WELL_CTL4, 1861 }; 1862 1863 const struct i915_power_well_ops hsw_power_well_ops = { 1864 .regs = &hsw_power_well_regs, 1865 .sync_hw = hsw_power_well_sync_hw, 1866 .enable = hsw_power_well_enable, 1867 .disable = hsw_power_well_disable, 1868 .is_enabled = hsw_power_well_enabled, 1869 }; 1870 1871 const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 1872 .sync_hw = i9xx_power_well_sync_hw_noop, 1873 .enable = gen9_dc_off_power_well_enable, 1874 .disable = gen9_dc_off_power_well_disable, 1875 .is_enabled = gen9_dc_off_power_well_enabled, 1876 }; 1877 1878 const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 1879 .sync_hw = i9xx_power_well_sync_hw_noop, 1880 .enable = bxt_dpio_cmn_power_well_enable, 1881 .disable = bxt_dpio_cmn_power_well_disable, 1882 .is_enabled = bxt_dpio_cmn_power_well_enabled, 1883 }; 1884 1885 const struct i915_power_well_ops vlv_display_power_well_ops = { 1886 .sync_hw = i9xx_power_well_sync_hw_noop, 1887 .enable = vlv_display_power_well_enable, 1888 .disable = vlv_display_power_well_disable, 1889 .is_enabled = vlv_power_well_enabled, 1890 }; 1891 1892 const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 1893 .sync_hw = i9xx_power_well_sync_hw_noop, 1894 .enable = vlv_dpio_cmn_power_well_enable, 1895 .disable = vlv_dpio_cmn_power_well_disable, 1896 .is_enabled = vlv_power_well_enabled, 1897 }; 1898 1899 const struct i915_power_well_ops vlv_dpio_power_well_ops = { 1900 .sync_hw = i9xx_power_well_sync_hw_noop, 1901 .enable = vlv_power_well_enable, 1902 .disable = vlv_power_well_disable, 1903 .is_enabled = vlv_power_well_enabled, 1904 }; 1905 1906 static const struct i915_power_well_regs icl_aux_power_well_regs = { 1907 .bios = ICL_PWR_WELL_CTL_AUX1, 1908 .driver = ICL_PWR_WELL_CTL_AUX2, 1909 .debug = ICL_PWR_WELL_CTL_AUX4, 1910 }; 1911 1912 const struct i915_power_well_ops icl_aux_power_well_ops = { 1913 .regs = &icl_aux_power_well_regs, 1914 .sync_hw = hsw_power_well_sync_hw, 1915 .enable = icl_aux_power_well_enable, 1916 .disable = icl_aux_power_well_disable, 1917 .is_enabled = hsw_power_well_enabled, 1918 }; 1919 1920 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 1921 .bios = ICL_PWR_WELL_CTL_DDI1, 1922 .driver = ICL_PWR_WELL_CTL_DDI2, 1923 .debug = ICL_PWR_WELL_CTL_DDI4, 1924 }; 1925 1926 const struct i915_power_well_ops icl_ddi_power_well_ops = { 1927 .regs = &icl_ddi_power_well_regs, 1928 .sync_hw = hsw_power_well_sync_hw, 1929 .enable = hsw_power_well_enable, 1930 .disable = hsw_power_well_disable, 1931 .is_enabled = hsw_power_well_enabled, 1932 }; 1933 1934 const struct i915_power_well_ops tgl_tc_cold_off_ops = { 1935 .sync_hw = tgl_tc_cold_off_power_well_sync_hw, 1936 .enable = tgl_tc_cold_off_power_well_enable, 1937 .disable = tgl_tc_cold_off_power_well_disable, 1938 .is_enabled = tgl_tc_cold_off_power_well_is_enabled, 1939 }; 1940 1941 const struct i915_power_well_ops xelpdp_aux_power_well_ops = { 1942 .sync_hw = i9xx_power_well_sync_hw_noop, 1943 .enable = xelpdp_aux_power_well_enable, 1944 .disable = xelpdp_aux_power_well_disable, 1945 .is_enabled = xelpdp_aux_power_well_enabled, 1946 }; 1947