1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_irq.h" 8 #include "intel_cdclk.h" 9 #include "intel_combo_phy.h" 10 #include "intel_combo_phy_regs.h" 11 #include "intel_crt.h" 12 #include "intel_de.h" 13 #include "intel_display_power.h" 14 #include "intel_display_types.h" 15 #include "intel_dmc.h" 16 #include "intel_dpio_phy.h" 17 #include "intel_dpll.h" 18 #include "intel_hotplug.h" 19 #include "intel_mchbar_regs.h" 20 #include "intel_pch_refclk.h" 21 #include "intel_pcode.h" 22 #include "intel_pm.h" 23 #include "intel_pps.h" 24 #include "intel_snps_phy.h" 25 #include "intel_tc.h" 26 #include "intel_vga.h" 27 #include "vlv_sideband.h" 28 29 struct i915_power_well_ops { 30 /* 31 * Synchronize the well's hw state to match the current sw state, for 32 * example enable/disable it based on the current refcount. Called 33 * during driver init and resume time, possibly after first calling 34 * the enable/disable handlers. 35 */ 36 void (*sync_hw)(struct drm_i915_private *dev_priv, 37 struct i915_power_well *power_well); 38 /* 39 * Enable the well and resources that depend on it (for example 40 * interrupts located on the well). Called after the 0->1 refcount 41 * transition. 42 */ 43 void (*enable)(struct drm_i915_private *dev_priv, 44 struct i915_power_well *power_well); 45 /* 46 * Disable the well and resources that depend on it. Called after 47 * the 1->0 refcount transition. 48 */ 49 void (*disable)(struct drm_i915_private *dev_priv, 50 struct i915_power_well *power_well); 51 /* Returns the hw enabled state. */ 52 bool (*is_enabled)(struct drm_i915_private *dev_priv, 53 struct i915_power_well *power_well); 54 }; 55 56 struct i915_power_well_regs { 57 i915_reg_t bios; 58 i915_reg_t driver; 59 i915_reg_t kvmr; 60 i915_reg_t debug; 61 }; 62 63 /* Power well structure for haswell */ 64 struct i915_power_well_desc { 65 const char *name; 66 bool always_on; 67 u64 domains; 68 /* unique identifier for this power well */ 69 enum i915_power_well_id id; 70 /* 71 * Arbitraty data associated with this power well. Platform and power 72 * well specific. 73 */ 74 union { 75 struct { 76 /* 77 * request/status flag index in the PUNIT power well 78 * control/status registers. 79 */ 80 u8 idx; 81 } vlv; 82 struct { 83 enum dpio_phy phy; 84 } bxt; 85 struct { 86 const struct i915_power_well_regs *regs; 87 /* 88 * request/status flag index in the power well 89 * constrol/status registers. 90 */ 91 u8 idx; 92 /* Mask of pipes whose IRQ logic is backed by the pw */ 93 u8 irq_pipe_mask; 94 /* 95 * Instead of waiting for the status bit to ack enables, 96 * just wait a specific amount of time and then consider 97 * the well enabled. 98 */ 99 u16 fixed_enable_delay; 100 /* The pw is backing the VGA functionality */ 101 bool has_vga:1; 102 bool has_fuses:1; 103 /* 104 * The pw is for an ICL+ TypeC PHY port in 105 * Thunderbolt mode. 106 */ 107 bool is_tc_tbt:1; 108 } hsw; 109 }; 110 const struct i915_power_well_ops *ops; 111 }; 112 113 struct i915_power_well { 114 const struct i915_power_well_desc *desc; 115 /* power well enable/disable usage count */ 116 int count; 117 /* cached hw enabled state */ 118 bool hw_enabled; 119 }; 120 121 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 122 enum i915_power_well_id power_well_id); 123 124 const char * 125 intel_display_power_domain_str(enum intel_display_power_domain domain) 126 { 127 switch (domain) { 128 case POWER_DOMAIN_DISPLAY_CORE: 129 return "DISPLAY_CORE"; 130 case POWER_DOMAIN_PIPE_A: 131 return "PIPE_A"; 132 case POWER_DOMAIN_PIPE_B: 133 return "PIPE_B"; 134 case POWER_DOMAIN_PIPE_C: 135 return "PIPE_C"; 136 case POWER_DOMAIN_PIPE_D: 137 return "PIPE_D"; 138 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 139 return "PIPE_A_PANEL_FITTER"; 140 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 141 return "PIPE_B_PANEL_FITTER"; 142 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 143 return "PIPE_C_PANEL_FITTER"; 144 case POWER_DOMAIN_PIPE_D_PANEL_FITTER: 145 return "PIPE_D_PANEL_FITTER"; 146 case POWER_DOMAIN_TRANSCODER_A: 147 return "TRANSCODER_A"; 148 case POWER_DOMAIN_TRANSCODER_B: 149 return "TRANSCODER_B"; 150 case POWER_DOMAIN_TRANSCODER_C: 151 return "TRANSCODER_C"; 152 case POWER_DOMAIN_TRANSCODER_D: 153 return "TRANSCODER_D"; 154 case POWER_DOMAIN_TRANSCODER_EDP: 155 return "TRANSCODER_EDP"; 156 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 157 return "TRANSCODER_VDSC_PW2"; 158 case POWER_DOMAIN_TRANSCODER_DSI_A: 159 return "TRANSCODER_DSI_A"; 160 case POWER_DOMAIN_TRANSCODER_DSI_C: 161 return "TRANSCODER_DSI_C"; 162 case POWER_DOMAIN_PORT_DDI_A_LANES: 163 return "PORT_DDI_A_LANES"; 164 case POWER_DOMAIN_PORT_DDI_B_LANES: 165 return "PORT_DDI_B_LANES"; 166 case POWER_DOMAIN_PORT_DDI_C_LANES: 167 return "PORT_DDI_C_LANES"; 168 case POWER_DOMAIN_PORT_DDI_D_LANES: 169 return "PORT_DDI_D_LANES"; 170 case POWER_DOMAIN_PORT_DDI_E_LANES: 171 return "PORT_DDI_E_LANES"; 172 case POWER_DOMAIN_PORT_DDI_F_LANES: 173 return "PORT_DDI_F_LANES"; 174 case POWER_DOMAIN_PORT_DDI_G_LANES: 175 return "PORT_DDI_G_LANES"; 176 case POWER_DOMAIN_PORT_DDI_H_LANES: 177 return "PORT_DDI_H_LANES"; 178 case POWER_DOMAIN_PORT_DDI_I_LANES: 179 return "PORT_DDI_I_LANES"; 180 case POWER_DOMAIN_PORT_DDI_A_IO: 181 return "PORT_DDI_A_IO"; 182 case POWER_DOMAIN_PORT_DDI_B_IO: 183 return "PORT_DDI_B_IO"; 184 case POWER_DOMAIN_PORT_DDI_C_IO: 185 return "PORT_DDI_C_IO"; 186 case POWER_DOMAIN_PORT_DDI_D_IO: 187 return "PORT_DDI_D_IO"; 188 case POWER_DOMAIN_PORT_DDI_E_IO: 189 return "PORT_DDI_E_IO"; 190 case POWER_DOMAIN_PORT_DDI_F_IO: 191 return "PORT_DDI_F_IO"; 192 case POWER_DOMAIN_PORT_DDI_G_IO: 193 return "PORT_DDI_G_IO"; 194 case POWER_DOMAIN_PORT_DDI_H_IO: 195 return "PORT_DDI_H_IO"; 196 case POWER_DOMAIN_PORT_DDI_I_IO: 197 return "PORT_DDI_I_IO"; 198 case POWER_DOMAIN_PORT_DSI: 199 return "PORT_DSI"; 200 case POWER_DOMAIN_PORT_CRT: 201 return "PORT_CRT"; 202 case POWER_DOMAIN_PORT_OTHER: 203 return "PORT_OTHER"; 204 case POWER_DOMAIN_VGA: 205 return "VGA"; 206 case POWER_DOMAIN_AUDIO_MMIO: 207 return "AUDIO_MMIO"; 208 case POWER_DOMAIN_AUDIO_PLAYBACK: 209 return "AUDIO_PLAYBACK"; 210 case POWER_DOMAIN_AUX_A: 211 return "AUX_A"; 212 case POWER_DOMAIN_AUX_B: 213 return "AUX_B"; 214 case POWER_DOMAIN_AUX_C: 215 return "AUX_C"; 216 case POWER_DOMAIN_AUX_D: 217 return "AUX_D"; 218 case POWER_DOMAIN_AUX_E: 219 return "AUX_E"; 220 case POWER_DOMAIN_AUX_F: 221 return "AUX_F"; 222 case POWER_DOMAIN_AUX_G: 223 return "AUX_G"; 224 case POWER_DOMAIN_AUX_H: 225 return "AUX_H"; 226 case POWER_DOMAIN_AUX_I: 227 return "AUX_I"; 228 case POWER_DOMAIN_AUX_IO_A: 229 return "AUX_IO_A"; 230 case POWER_DOMAIN_AUX_C_TBT: 231 return "AUX_C_TBT"; 232 case POWER_DOMAIN_AUX_D_TBT: 233 return "AUX_D_TBT"; 234 case POWER_DOMAIN_AUX_E_TBT: 235 return "AUX_E_TBT"; 236 case POWER_DOMAIN_AUX_F_TBT: 237 return "AUX_F_TBT"; 238 case POWER_DOMAIN_AUX_G_TBT: 239 return "AUX_G_TBT"; 240 case POWER_DOMAIN_AUX_H_TBT: 241 return "AUX_H_TBT"; 242 case POWER_DOMAIN_AUX_I_TBT: 243 return "AUX_I_TBT"; 244 case POWER_DOMAIN_GMBUS: 245 return "GMBUS"; 246 case POWER_DOMAIN_INIT: 247 return "INIT"; 248 case POWER_DOMAIN_MODESET: 249 return "MODESET"; 250 case POWER_DOMAIN_GT_IRQ: 251 return "GT_IRQ"; 252 case POWER_DOMAIN_DC_OFF: 253 return "DC_OFF"; 254 case POWER_DOMAIN_TC_COLD_OFF: 255 return "TC_COLD_OFF"; 256 default: 257 MISSING_CASE(domain); 258 return "?"; 259 } 260 } 261 262 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 263 struct i915_power_well *power_well) 264 { 265 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name); 266 power_well->desc->ops->enable(dev_priv, power_well); 267 power_well->hw_enabled = true; 268 } 269 270 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 271 struct i915_power_well *power_well) 272 { 273 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name); 274 power_well->hw_enabled = false; 275 power_well->desc->ops->disable(dev_priv, power_well); 276 } 277 278 static void intel_power_well_get(struct drm_i915_private *dev_priv, 279 struct i915_power_well *power_well) 280 { 281 if (!power_well->count++) 282 intel_power_well_enable(dev_priv, power_well); 283 } 284 285 static void intel_power_well_put(struct drm_i915_private *dev_priv, 286 struct i915_power_well *power_well) 287 { 288 drm_WARN(&dev_priv->drm, !power_well->count, 289 "Use count on power well %s is already zero", 290 power_well->desc->name); 291 292 if (!--power_well->count) 293 intel_power_well_disable(dev_priv, power_well); 294 } 295 296 /** 297 * __intel_display_power_is_enabled - unlocked check for a power domain 298 * @dev_priv: i915 device instance 299 * @domain: power domain to check 300 * 301 * This is the unlocked version of intel_display_power_is_enabled() and should 302 * only be used from error capture and recovery code where deadlocks are 303 * possible. 304 * 305 * Returns: 306 * True when the power domain is enabled, false otherwise. 307 */ 308 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 309 enum intel_display_power_domain domain) 310 { 311 struct i915_power_well *power_well; 312 bool is_enabled; 313 314 if (dev_priv->runtime_pm.suspended) 315 return false; 316 317 is_enabled = true; 318 319 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { 320 if (power_well->desc->always_on) 321 continue; 322 323 if (!power_well->hw_enabled) { 324 is_enabled = false; 325 break; 326 } 327 } 328 329 return is_enabled; 330 } 331 332 /** 333 * intel_display_power_is_enabled - check for a power domain 334 * @dev_priv: i915 device instance 335 * @domain: power domain to check 336 * 337 * This function can be used to check the hw power domain state. It is mostly 338 * used in hardware state readout functions. Everywhere else code should rely 339 * upon explicit power domain reference counting to ensure that the hardware 340 * block is powered up before accessing it. 341 * 342 * Callers must hold the relevant modesetting locks to ensure that concurrent 343 * threads can't disable the power well while the caller tries to read a few 344 * registers. 345 * 346 * Returns: 347 * True when the power domain is enabled, false otherwise. 348 */ 349 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 350 enum intel_display_power_domain domain) 351 { 352 struct i915_power_domains *power_domains; 353 bool ret; 354 355 power_domains = &dev_priv->power_domains; 356 357 mutex_lock(&power_domains->lock); 358 ret = __intel_display_power_is_enabled(dev_priv, domain); 359 mutex_unlock(&power_domains->lock); 360 361 return ret; 362 } 363 364 /* 365 * Starting with Haswell, we have a "Power Down Well" that can be turned off 366 * when not needed anymore. We have 4 registers that can request the power well 367 * to be enabled, and it will only be disabled if none of the registers is 368 * requesting it to be enabled. 369 */ 370 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, 371 u8 irq_pipe_mask, bool has_vga) 372 { 373 if (has_vga) 374 intel_vga_reset_io_mem(dev_priv); 375 376 if (irq_pipe_mask) 377 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); 378 } 379 380 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, 381 u8 irq_pipe_mask) 382 { 383 if (irq_pipe_mask) 384 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); 385 } 386 387 #define ICL_AUX_PW_TO_CH(pw_idx) \ 388 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 389 390 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 391 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 392 393 static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) 394 { 395 int pw_idx = power_well->desc->hsw.idx; 396 397 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 398 ICL_AUX_PW_TO_CH(pw_idx); 399 } 400 401 static struct intel_digital_port * 402 aux_ch_to_digital_port(struct drm_i915_private *dev_priv, 403 enum aux_ch aux_ch) 404 { 405 struct intel_digital_port *dig_port = NULL; 406 struct intel_encoder *encoder; 407 408 for_each_intel_encoder(&dev_priv->drm, encoder) { 409 /* We'll check the MST primary port */ 410 if (encoder->type == INTEL_OUTPUT_DP_MST) 411 continue; 412 413 dig_port = enc_to_dig_port(encoder); 414 if (!dig_port) 415 continue; 416 417 if (dig_port->aux_ch != aux_ch) { 418 dig_port = NULL; 419 continue; 420 } 421 422 break; 423 } 424 425 return dig_port; 426 } 427 428 static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915, 429 const struct i915_power_well *power_well) 430 { 431 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 432 struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); 433 434 return intel_port_to_phy(i915, dig_port->base.port); 435 } 436 437 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, 438 struct i915_power_well *power_well, 439 bool timeout_expected) 440 { 441 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 442 int pw_idx = power_well->desc->hsw.idx; 443 int enable_delay = power_well->desc->hsw.fixed_enable_delay; 444 445 /* 446 * For some power wells we're not supposed to watch the status bit for 447 * an ack, but rather just wait a fixed amount of time and then 448 * proceed. This is only used on DG2. 449 */ 450 if (IS_DG2(dev_priv) && enable_delay) { 451 usleep_range(enable_delay, 2 * enable_delay); 452 return; 453 } 454 455 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 456 if (intel_de_wait_for_set(dev_priv, regs->driver, 457 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { 458 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", 459 power_well->desc->name); 460 461 drm_WARN_ON(&dev_priv->drm, !timeout_expected); 462 463 } 464 } 465 466 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, 467 const struct i915_power_well_regs *regs, 468 int pw_idx) 469 { 470 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 471 u32 ret; 472 473 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; 474 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; 475 if (regs->kvmr.reg) 476 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; 477 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; 478 479 return ret; 480 } 481 482 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, 483 struct i915_power_well *power_well) 484 { 485 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 486 int pw_idx = power_well->desc->hsw.idx; 487 bool disabled; 488 u32 reqs; 489 490 /* 491 * Bspec doesn't require waiting for PWs to get disabled, but still do 492 * this for paranoia. The known cases where a PW will be forced on: 493 * - a KVMR request on any power well via the KVMR request register 494 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 495 * DEBUG request registers 496 * Skip the wait in case any of the request bits are set and print a 497 * diagnostic message. 498 */ 499 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & 500 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 501 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); 502 if (disabled) 503 return; 504 505 drm_dbg_kms(&dev_priv->drm, 506 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 507 power_well->desc->name, 508 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 509 } 510 511 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, 512 enum skl_power_gate pg) 513 { 514 /* Timeout 5us for PG#0, for other PGs 1us */ 515 drm_WARN_ON(&dev_priv->drm, 516 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, 517 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 518 } 519 520 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 521 struct i915_power_well *power_well) 522 { 523 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 524 int pw_idx = power_well->desc->hsw.idx; 525 u32 val; 526 527 if (power_well->desc->hsw.has_fuses) { 528 enum skl_power_gate pg; 529 530 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 531 SKL_PW_CTL_IDX_TO_PG(pw_idx); 532 533 /* Wa_16013190616:adlp */ 534 if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1) 535 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC); 536 537 /* 538 * For PW1 we have to wait both for the PW0/PG0 fuse state 539 * before enabling the power well and PW1/PG1's own fuse 540 * state after the enabling. For all other power wells with 541 * fuses we only have to wait for that PW/PG's fuse state 542 * after the enabling. 543 */ 544 if (pg == SKL_PG1) 545 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 546 } 547 548 val = intel_de_read(dev_priv, regs->driver); 549 intel_de_write(dev_priv, regs->driver, 550 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 551 552 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 553 554 if (power_well->desc->hsw.has_fuses) { 555 enum skl_power_gate pg; 556 557 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 558 SKL_PW_CTL_IDX_TO_PG(pw_idx); 559 gen9_wait_for_power_well_fuses(dev_priv, pg); 560 } 561 562 hsw_power_well_post_enable(dev_priv, 563 power_well->desc->hsw.irq_pipe_mask, 564 power_well->desc->hsw.has_vga); 565 } 566 567 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 568 struct i915_power_well *power_well) 569 { 570 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 571 int pw_idx = power_well->desc->hsw.idx; 572 u32 val; 573 574 hsw_power_well_pre_disable(dev_priv, 575 power_well->desc->hsw.irq_pipe_mask); 576 577 val = intel_de_read(dev_priv, regs->driver); 578 intel_de_write(dev_priv, regs->driver, 579 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 580 hsw_wait_for_power_well_disable(dev_priv, power_well); 581 } 582 583 static void 584 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 585 struct i915_power_well *power_well) 586 { 587 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 588 int pw_idx = power_well->desc->hsw.idx; 589 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 590 u32 val; 591 592 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 593 594 val = intel_de_read(dev_priv, regs->driver); 595 intel_de_write(dev_priv, regs->driver, 596 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 597 598 if (DISPLAY_VER(dev_priv) < 12) { 599 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 600 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 601 val | ICL_LANE_ENABLE_AUX); 602 } 603 604 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 605 606 /* Display WA #1178: icl */ 607 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && 608 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { 609 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); 610 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; 611 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); 612 } 613 } 614 615 static void 616 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 617 struct i915_power_well *power_well) 618 { 619 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 620 int pw_idx = power_well->desc->hsw.idx; 621 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 622 u32 val; 623 624 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 625 626 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 627 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 628 val & ~ICL_LANE_ENABLE_AUX); 629 630 val = intel_de_read(dev_priv, regs->driver); 631 intel_de_write(dev_priv, regs->driver, 632 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 633 634 hsw_wait_for_power_well_disable(dev_priv, power_well); 635 } 636 637 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 638 639 static u64 async_put_domains_mask(struct i915_power_domains *power_domains); 640 641 static int power_well_async_ref_count(struct drm_i915_private *dev_priv, 642 struct i915_power_well *power_well) 643 { 644 int refs = hweight64(power_well->desc->domains & 645 async_put_domains_mask(&dev_priv->power_domains)); 646 647 drm_WARN_ON(&dev_priv->drm, refs > power_well->count); 648 649 return refs; 650 } 651 652 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 653 struct i915_power_well *power_well, 654 struct intel_digital_port *dig_port) 655 { 656 /* Bypass the check if all references are released asynchronously */ 657 if (power_well_async_ref_count(dev_priv, power_well) == 658 power_well->count) 659 return; 660 661 if (drm_WARN_ON(&dev_priv->drm, !dig_port)) 662 return; 663 664 if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) 665 return; 666 667 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); 668 } 669 670 #else 671 672 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 673 struct i915_power_well *power_well, 674 struct intel_digital_port *dig_port) 675 { 676 } 677 678 #endif 679 680 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 681 682 static void icl_tc_cold_exit(struct drm_i915_private *i915) 683 { 684 int ret, tries = 0; 685 686 while (1) { 687 ret = snb_pcode_write_timeout(i915, ICL_PCODE_EXIT_TCCOLD, 0, 688 250, 1); 689 if (ret != -EAGAIN || ++tries == 3) 690 break; 691 msleep(1); 692 } 693 694 /* Spec states that TC cold exit can take up to 1ms to complete */ 695 if (!ret) 696 msleep(1); 697 698 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ 699 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : 700 "succeeded"); 701 } 702 703 static void 704 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 705 struct i915_power_well *power_well) 706 { 707 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 708 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 709 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 710 bool is_tbt = power_well->desc->hsw.is_tc_tbt; 711 bool timeout_expected; 712 u32 val; 713 714 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 715 716 val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); 717 val &= ~DP_AUX_CH_CTL_TBT_IO; 718 if (is_tbt) 719 val |= DP_AUX_CH_CTL_TBT_IO; 720 intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); 721 722 val = intel_de_read(dev_priv, regs->driver); 723 intel_de_write(dev_priv, regs->driver, 724 val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx)); 725 726 /* 727 * An AUX timeout is expected if the TBT DP tunnel is down, 728 * or need to enable AUX on a legacy TypeC port as part of the TC-cold 729 * exit sequence. 730 */ 731 timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); 732 if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) 733 icl_tc_cold_exit(dev_priv); 734 735 hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); 736 737 if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) { 738 enum tc_port tc_port; 739 740 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); 741 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 742 HIP_INDEX_VAL(tc_port, 0x2)); 743 744 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), 745 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 746 drm_warn(&dev_priv->drm, 747 "Timeout waiting TC uC health\n"); 748 } 749 } 750 751 static void 752 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 753 struct i915_power_well *power_well) 754 { 755 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 756 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 757 758 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 759 760 hsw_power_well_disable(dev_priv, power_well); 761 } 762 763 static void 764 icl_aux_power_well_enable(struct drm_i915_private *dev_priv, 765 struct i915_power_well *power_well) 766 { 767 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 768 769 if (intel_phy_is_tc(dev_priv, phy)) 770 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); 771 else if (IS_ICELAKE(dev_priv)) 772 return icl_combo_phy_aux_power_well_enable(dev_priv, 773 power_well); 774 else 775 return hsw_power_well_enable(dev_priv, power_well); 776 } 777 778 static void 779 icl_aux_power_well_disable(struct drm_i915_private *dev_priv, 780 struct i915_power_well *power_well) 781 { 782 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 783 784 if (intel_phy_is_tc(dev_priv, phy)) 785 return icl_tc_phy_aux_power_well_disable(dev_priv, power_well); 786 else if (IS_ICELAKE(dev_priv)) 787 return icl_combo_phy_aux_power_well_disable(dev_priv, 788 power_well); 789 else 790 return hsw_power_well_disable(dev_priv, power_well); 791 } 792 793 /* 794 * We should only use the power well if we explicitly asked the hardware to 795 * enable it, so check if it's enabled and also check if we've requested it to 796 * be enabled. 797 */ 798 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 799 struct i915_power_well *power_well) 800 { 801 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 802 enum i915_power_well_id id = power_well->desc->id; 803 int pw_idx = power_well->desc->hsw.idx; 804 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 805 HSW_PWR_WELL_CTL_STATE(pw_idx); 806 u32 val; 807 808 val = intel_de_read(dev_priv, regs->driver); 809 810 /* 811 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 812 * and the MISC_IO PW will be not restored, so check instead for the 813 * BIOS's own request bits, which are forced-on for these power wells 814 * when exiting DC5/6. 815 */ 816 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) && 817 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 818 val |= intel_de_read(dev_priv, regs->bios); 819 820 return (val & mask) == mask; 821 } 822 823 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 824 { 825 drm_WARN_ONCE(&dev_priv->drm, 826 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), 827 "DC9 already programmed to be enabled.\n"); 828 drm_WARN_ONCE(&dev_priv->drm, 829 intel_de_read(dev_priv, DC_STATE_EN) & 830 DC_STATE_EN_UPTO_DC5, 831 "DC5 still not disabled to enable DC9.\n"); 832 drm_WARN_ONCE(&dev_priv->drm, 833 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & 834 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 835 "Power well 2 on.\n"); 836 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 837 "Interrupts not disabled yet.\n"); 838 839 /* 840 * TODO: check for the following to verify the conditions to enter DC9 841 * state are satisfied: 842 * 1] Check relevant display engine registers to verify if mode set 843 * disable sequence was followed. 844 * 2] Check if display uninitialize sequence is initialized. 845 */ 846 } 847 848 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 849 { 850 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 851 "Interrupts not disabled yet.\n"); 852 drm_WARN_ONCE(&dev_priv->drm, 853 intel_de_read(dev_priv, DC_STATE_EN) & 854 DC_STATE_EN_UPTO_DC5, 855 "DC5 still not disabled.\n"); 856 857 /* 858 * TODO: check for the following to verify DC9 state was indeed 859 * entered before programming to disable it: 860 * 1] Check relevant display engine registers to verify if mode 861 * set disable sequence was followed. 862 * 2] Check if display uninitialize sequence is initialized. 863 */ 864 } 865 866 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 867 u32 state) 868 { 869 int rewrites = 0; 870 int rereads = 0; 871 u32 v; 872 873 intel_de_write(dev_priv, DC_STATE_EN, state); 874 875 /* It has been observed that disabling the dc6 state sometimes 876 * doesn't stick and dmc keeps returning old value. Make sure 877 * the write really sticks enough times and also force rewrite until 878 * we are confident that state is exactly what we want. 879 */ 880 do { 881 v = intel_de_read(dev_priv, DC_STATE_EN); 882 883 if (v != state) { 884 intel_de_write(dev_priv, DC_STATE_EN, state); 885 rewrites++; 886 rereads = 0; 887 } else if (rereads++ > 5) { 888 break; 889 } 890 891 } while (rewrites < 100); 892 893 if (v != state) 894 drm_err(&dev_priv->drm, 895 "Writing dc state to 0x%x failed, now 0x%x\n", 896 state, v); 897 898 /* Most of the times we need one retry, avoid spam */ 899 if (rewrites > 1) 900 drm_dbg_kms(&dev_priv->drm, 901 "Rewrote dc state to 0x%x %d times\n", 902 state, rewrites); 903 } 904 905 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 906 { 907 u32 mask; 908 909 mask = DC_STATE_EN_UPTO_DC5; 910 911 if (DISPLAY_VER(dev_priv) >= 12) 912 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 913 | DC_STATE_EN_DC9; 914 else if (DISPLAY_VER(dev_priv) == 11) 915 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 916 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 917 mask |= DC_STATE_EN_DC9; 918 else 919 mask |= DC_STATE_EN_UPTO_DC6; 920 921 return mask; 922 } 923 924 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 925 { 926 u32 val; 927 928 if (!HAS_DISPLAY(dev_priv)) 929 return; 930 931 val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); 932 933 drm_dbg_kms(&dev_priv->drm, 934 "Resetting DC state tracking from %02x to %02x\n", 935 dev_priv->dmc.dc_state, val); 936 dev_priv->dmc.dc_state = val; 937 } 938 939 /** 940 * gen9_set_dc_state - set target display C power state 941 * @dev_priv: i915 device instance 942 * @state: target DC power state 943 * - DC_STATE_DISABLE 944 * - DC_STATE_EN_UPTO_DC5 945 * - DC_STATE_EN_UPTO_DC6 946 * - DC_STATE_EN_DC9 947 * 948 * Signal to DMC firmware/HW the target DC power state passed in @state. 949 * DMC/HW can turn off individual display clocks and power rails when entering 950 * a deeper DC power state (higher in number) and turns these back when exiting 951 * that state to a shallower power state (lower in number). The HW will decide 952 * when to actually enter a given state on an on-demand basis, for instance 953 * depending on the active state of display pipes. The state of display 954 * registers backed by affected power rails are saved/restored as needed. 955 * 956 * Based on the above enabling a deeper DC power state is asynchronous wrt. 957 * enabling it. Disabling a deeper power state is synchronous: for instance 958 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 959 * back on and register state is restored. This is guaranteed by the MMIO write 960 * to DC_STATE_EN blocking until the state is restored. 961 */ 962 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) 963 { 964 u32 val; 965 u32 mask; 966 967 if (!HAS_DISPLAY(dev_priv)) 968 return; 969 970 if (drm_WARN_ON_ONCE(&dev_priv->drm, 971 state & ~dev_priv->dmc.allowed_dc_mask)) 972 state &= dev_priv->dmc.allowed_dc_mask; 973 974 val = intel_de_read(dev_priv, DC_STATE_EN); 975 mask = gen9_dc_mask(dev_priv); 976 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", 977 val & mask, state); 978 979 /* Check if DMC is ignoring our DC state requests */ 980 if ((val & mask) != dev_priv->dmc.dc_state) 981 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", 982 dev_priv->dmc.dc_state, val & mask); 983 984 val &= ~mask; 985 val |= state; 986 987 gen9_write_dc_state(dev_priv, val); 988 989 dev_priv->dmc.dc_state = val & mask; 990 } 991 992 static u32 993 sanitize_target_dc_state(struct drm_i915_private *dev_priv, 994 u32 target_dc_state) 995 { 996 static const u32 states[] = { 997 DC_STATE_EN_UPTO_DC6, 998 DC_STATE_EN_UPTO_DC5, 999 DC_STATE_EN_DC3CO, 1000 DC_STATE_DISABLE, 1001 }; 1002 int i; 1003 1004 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 1005 if (target_dc_state != states[i]) 1006 continue; 1007 1008 if (dev_priv->dmc.allowed_dc_mask & target_dc_state) 1009 break; 1010 1011 target_dc_state = states[i + 1]; 1012 } 1013 1014 return target_dc_state; 1015 } 1016 1017 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) 1018 { 1019 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); 1020 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); 1021 } 1022 1023 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) 1024 { 1025 u32 val; 1026 1027 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); 1028 val = intel_de_read(dev_priv, DC_STATE_EN); 1029 val &= ~DC_STATE_DC3CO_STATUS; 1030 intel_de_write(dev_priv, DC_STATE_EN, val); 1031 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1032 /* 1033 * Delay of 200us DC3CO Exit time B.Spec 49196 1034 */ 1035 usleep_range(200, 210); 1036 } 1037 1038 static void bxt_enable_dc9(struct drm_i915_private *dev_priv) 1039 { 1040 assert_can_enable_dc9(dev_priv); 1041 1042 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); 1043 /* 1044 * Power sequencer reset is not needed on 1045 * platforms with South Display Engine on PCH, 1046 * because PPS registers are always on. 1047 */ 1048 if (!HAS_PCH_SPLIT(dev_priv)) 1049 intel_pps_reset_all(dev_priv); 1050 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 1051 } 1052 1053 static void bxt_disable_dc9(struct drm_i915_private *dev_priv) 1054 { 1055 assert_can_disable_dc9(dev_priv); 1056 1057 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); 1058 1059 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1060 1061 intel_pps_unlock_regs_wa(dev_priv); 1062 } 1063 1064 static void assert_dmc_loaded(struct drm_i915_private *dev_priv) 1065 { 1066 drm_WARN_ONCE(&dev_priv->drm, 1067 !intel_de_read(dev_priv, 1068 DMC_PROGRAM(dev_priv->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), 1069 "DMC program storage start is NULL\n"); 1070 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE), 1071 "DMC SSP Base Not fine\n"); 1072 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL), 1073 "DMC HTP Not fine\n"); 1074 } 1075 1076 static struct i915_power_well * 1077 lookup_power_well(struct drm_i915_private *dev_priv, 1078 enum i915_power_well_id power_well_id) 1079 { 1080 struct i915_power_well *power_well; 1081 1082 for_each_power_well(dev_priv, power_well) 1083 if (power_well->desc->id == power_well_id) 1084 return power_well; 1085 1086 /* 1087 * It's not feasible to add error checking code to the callers since 1088 * this condition really shouldn't happen and it doesn't even make sense 1089 * to abort things like display initialization sequences. Just return 1090 * the first power well and hope the WARN gets reported so we can fix 1091 * our driver. 1092 */ 1093 drm_WARN(&dev_priv->drm, 1, 1094 "Power well %d not defined for this platform\n", 1095 power_well_id); 1096 return &dev_priv->power_domains.power_wells[0]; 1097 } 1098 1099 /** 1100 * intel_display_power_set_target_dc_state - Set target dc state. 1101 * @dev_priv: i915 device 1102 * @state: state which needs to be set as target_dc_state. 1103 * 1104 * This function set the "DC off" power well target_dc_state, 1105 * based upon this target_dc_stste, "DC off" power well will 1106 * enable desired DC state. 1107 */ 1108 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, 1109 u32 state) 1110 { 1111 struct i915_power_well *power_well; 1112 bool dc_off_enabled; 1113 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1114 1115 mutex_lock(&power_domains->lock); 1116 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); 1117 1118 if (drm_WARN_ON(&dev_priv->drm, !power_well)) 1119 goto unlock; 1120 1121 state = sanitize_target_dc_state(dev_priv, state); 1122 1123 if (state == dev_priv->dmc.target_dc_state) 1124 goto unlock; 1125 1126 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv, 1127 power_well); 1128 /* 1129 * If DC off power well is disabled, need to enable and disable the 1130 * DC off power well to effect target DC state. 1131 */ 1132 if (!dc_off_enabled) 1133 power_well->desc->ops->enable(dev_priv, power_well); 1134 1135 dev_priv->dmc.target_dc_state = state; 1136 1137 if (!dc_off_enabled) 1138 power_well->desc->ops->disable(dev_priv, power_well); 1139 1140 unlock: 1141 mutex_unlock(&power_domains->lock); 1142 } 1143 1144 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 1145 { 1146 enum i915_power_well_id high_pg; 1147 1148 /* Power wells at this level and above must be disabled for DC5 entry */ 1149 if (DISPLAY_VER(dev_priv) == 12) 1150 high_pg = ICL_DISP_PW_3; 1151 else 1152 high_pg = SKL_DISP_PW_2; 1153 1154 drm_WARN_ONCE(&dev_priv->drm, 1155 intel_display_power_well_is_enabled(dev_priv, high_pg), 1156 "Power wells above platform's DC5 limit still enabled.\n"); 1157 1158 drm_WARN_ONCE(&dev_priv->drm, 1159 (intel_de_read(dev_priv, DC_STATE_EN) & 1160 DC_STATE_EN_UPTO_DC5), 1161 "DC5 already programmed to be enabled.\n"); 1162 assert_rpm_wakelock_held(&dev_priv->runtime_pm); 1163 1164 assert_dmc_loaded(dev_priv); 1165 } 1166 1167 static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 1168 { 1169 assert_can_enable_dc5(dev_priv); 1170 1171 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); 1172 1173 /* Wa Display #1183: skl,kbl,cfl */ 1174 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 1175 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1176 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1177 1178 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 1179 } 1180 1181 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 1182 { 1183 drm_WARN_ONCE(&dev_priv->drm, 1184 intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 1185 "Backlight is not disabled.\n"); 1186 drm_WARN_ONCE(&dev_priv->drm, 1187 (intel_de_read(dev_priv, DC_STATE_EN) & 1188 DC_STATE_EN_UPTO_DC6), 1189 "DC6 already programmed to be enabled.\n"); 1190 1191 assert_dmc_loaded(dev_priv); 1192 } 1193 1194 static void skl_enable_dc6(struct drm_i915_private *dev_priv) 1195 { 1196 assert_can_enable_dc6(dev_priv); 1197 1198 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); 1199 1200 /* Wa Display #1183: skl,kbl,cfl */ 1201 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 1202 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1203 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1204 1205 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 1206 } 1207 1208 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 1209 struct i915_power_well *power_well) 1210 { 1211 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 1212 int pw_idx = power_well->desc->hsw.idx; 1213 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 1214 u32 bios_req = intel_de_read(dev_priv, regs->bios); 1215 1216 /* Take over the request bit if set by BIOS. */ 1217 if (bios_req & mask) { 1218 u32 drv_req = intel_de_read(dev_priv, regs->driver); 1219 1220 if (!(drv_req & mask)) 1221 intel_de_write(dev_priv, regs->driver, drv_req | mask); 1222 intel_de_write(dev_priv, regs->bios, bios_req & ~mask); 1223 } 1224 } 1225 1226 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1227 struct i915_power_well *power_well) 1228 { 1229 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); 1230 } 1231 1232 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1233 struct i915_power_well *power_well) 1234 { 1235 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); 1236 } 1237 1238 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 1239 struct i915_power_well *power_well) 1240 { 1241 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); 1242 } 1243 1244 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 1245 { 1246 struct i915_power_well *power_well; 1247 1248 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); 1249 if (power_well->count > 0) 1250 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1251 1252 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1253 if (power_well->count > 0) 1254 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1255 1256 if (IS_GEMINILAKE(dev_priv)) { 1257 power_well = lookup_power_well(dev_priv, 1258 GLK_DISP_PW_DPIO_CMN_C); 1259 if (power_well->count > 0) 1260 bxt_ddi_phy_verify_state(dev_priv, 1261 power_well->desc->bxt.phy); 1262 } 1263 } 1264 1265 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 1266 struct i915_power_well *power_well) 1267 { 1268 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && 1269 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); 1270 } 1271 1272 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 1273 { 1274 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); 1275 u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices; 1276 1277 drm_WARN(&dev_priv->drm, 1278 hw_enabled_dbuf_slices != enabled_dbuf_slices, 1279 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", 1280 hw_enabled_dbuf_slices, 1281 enabled_dbuf_slices); 1282 } 1283 1284 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) 1285 { 1286 struct intel_cdclk_config cdclk_config = {}; 1287 1288 if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) { 1289 tgl_disable_dc3co(dev_priv); 1290 return; 1291 } 1292 1293 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1294 1295 if (!HAS_DISPLAY(dev_priv)) 1296 return; 1297 1298 intel_cdclk_get_cdclk(dev_priv, &cdclk_config); 1299 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 1300 drm_WARN_ON(&dev_priv->drm, 1301 intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, 1302 &cdclk_config)); 1303 1304 gen9_assert_dbuf_enabled(dev_priv); 1305 1306 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 1307 bxt_verify_ddi_phy_power_wells(dev_priv); 1308 1309 if (DISPLAY_VER(dev_priv) >= 11) 1310 /* 1311 * DMC retains HW context only for port A, the other combo 1312 * PHY's HW context for port B is lost after DC transitions, 1313 * so we need to restore it manually. 1314 */ 1315 intel_combo_phy_init(dev_priv); 1316 } 1317 1318 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 1319 struct i915_power_well *power_well) 1320 { 1321 gen9_disable_dc_states(dev_priv); 1322 } 1323 1324 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 1325 struct i915_power_well *power_well) 1326 { 1327 if (!intel_dmc_has_payload(dev_priv)) 1328 return; 1329 1330 switch (dev_priv->dmc.target_dc_state) { 1331 case DC_STATE_EN_DC3CO: 1332 tgl_enable_dc3co(dev_priv); 1333 break; 1334 case DC_STATE_EN_UPTO_DC6: 1335 skl_enable_dc6(dev_priv); 1336 break; 1337 case DC_STATE_EN_UPTO_DC5: 1338 gen9_enable_dc5(dev_priv); 1339 break; 1340 } 1341 } 1342 1343 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, 1344 struct i915_power_well *power_well) 1345 { 1346 } 1347 1348 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 1349 struct i915_power_well *power_well) 1350 { 1351 } 1352 1353 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 1354 struct i915_power_well *power_well) 1355 { 1356 return true; 1357 } 1358 1359 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 1360 struct i915_power_well *power_well) 1361 { 1362 if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) 1363 i830_enable_pipe(dev_priv, PIPE_A); 1364 if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) 1365 i830_enable_pipe(dev_priv, PIPE_B); 1366 } 1367 1368 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, 1369 struct i915_power_well *power_well) 1370 { 1371 i830_disable_pipe(dev_priv, PIPE_B); 1372 i830_disable_pipe(dev_priv, PIPE_A); 1373 } 1374 1375 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 1376 struct i915_power_well *power_well) 1377 { 1378 return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && 1379 intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; 1380 } 1381 1382 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, 1383 struct i915_power_well *power_well) 1384 { 1385 if (power_well->count > 0) 1386 i830_pipes_power_well_enable(dev_priv, power_well); 1387 else 1388 i830_pipes_power_well_disable(dev_priv, power_well); 1389 } 1390 1391 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 1392 struct i915_power_well *power_well, bool enable) 1393 { 1394 int pw_idx = power_well->desc->vlv.idx; 1395 u32 mask; 1396 u32 state; 1397 u32 ctrl; 1398 1399 mask = PUNIT_PWRGT_MASK(pw_idx); 1400 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1401 PUNIT_PWRGT_PWR_GATE(pw_idx); 1402 1403 vlv_punit_get(dev_priv); 1404 1405 #define COND \ 1406 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1407 1408 if (COND) 1409 goto out; 1410 1411 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 1412 ctrl &= ~mask; 1413 ctrl |= state; 1414 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1415 1416 if (wait_for(COND, 100)) 1417 drm_err(&dev_priv->drm, 1418 "timeout setting power well state %08x (%08x)\n", 1419 state, 1420 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1421 1422 #undef COND 1423 1424 out: 1425 vlv_punit_put(dev_priv); 1426 } 1427 1428 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1429 struct i915_power_well *power_well) 1430 { 1431 vlv_set_power_well(dev_priv, power_well, true); 1432 } 1433 1434 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1435 struct i915_power_well *power_well) 1436 { 1437 vlv_set_power_well(dev_priv, power_well, false); 1438 } 1439 1440 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1441 struct i915_power_well *power_well) 1442 { 1443 int pw_idx = power_well->desc->vlv.idx; 1444 bool enabled = false; 1445 u32 mask; 1446 u32 state; 1447 u32 ctrl; 1448 1449 mask = PUNIT_PWRGT_MASK(pw_idx); 1450 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1451 1452 vlv_punit_get(dev_priv); 1453 1454 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1455 /* 1456 * We only ever set the power-on and power-gate states, anything 1457 * else is unexpected. 1458 */ 1459 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1460 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1461 if (state == ctrl) 1462 enabled = true; 1463 1464 /* 1465 * A transient state at this point would mean some unexpected party 1466 * is poking at the power controls too. 1467 */ 1468 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1469 drm_WARN_ON(&dev_priv->drm, ctrl != state); 1470 1471 vlv_punit_put(dev_priv); 1472 1473 return enabled; 1474 } 1475 1476 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1477 { 1478 u32 val; 1479 1480 /* 1481 * On driver load, a pipe may be active and driving a DSI display. 1482 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1483 * (and never recovering) in this case. intel_dsi_post_disable() will 1484 * clear it when we turn off the display. 1485 */ 1486 val = intel_de_read(dev_priv, DSPCLK_GATE_D); 1487 val &= DPOUNIT_CLOCK_GATE_DISABLE; 1488 val |= VRHUNIT_CLOCK_GATE_DISABLE; 1489 intel_de_write(dev_priv, DSPCLK_GATE_D, val); 1490 1491 /* 1492 * Disable trickle feed and enable pnd deadline calculation 1493 */ 1494 intel_de_write(dev_priv, MI_ARB_VLV, 1495 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1496 intel_de_write(dev_priv, CBR1_VLV, 0); 1497 1498 drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); 1499 intel_de_write(dev_priv, RAWCLK_FREQ_VLV, 1500 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 1501 1000)); 1502 } 1503 1504 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1505 { 1506 struct intel_encoder *encoder; 1507 enum pipe pipe; 1508 1509 /* 1510 * Enable the CRI clock source so we can get at the 1511 * display and the reference clock for VGA 1512 * hotplug / manual detection. Supposedly DSI also 1513 * needs the ref clock up and running. 1514 * 1515 * CHV DPLL B/C have some issues if VGA mode is enabled. 1516 */ 1517 for_each_pipe(dev_priv, pipe) { 1518 u32 val = intel_de_read(dev_priv, DPLL(pipe)); 1519 1520 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1521 if (pipe != PIPE_A) 1522 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1523 1524 intel_de_write(dev_priv, DPLL(pipe), val); 1525 } 1526 1527 vlv_init_display_clock_gating(dev_priv); 1528 1529 spin_lock_irq(&dev_priv->irq_lock); 1530 valleyview_enable_display_irqs(dev_priv); 1531 spin_unlock_irq(&dev_priv->irq_lock); 1532 1533 /* 1534 * During driver initialization/resume we can avoid restoring the 1535 * part of the HW/SW state that will be inited anyway explicitly. 1536 */ 1537 if (dev_priv->power_domains.initializing) 1538 return; 1539 1540 intel_hpd_init(dev_priv); 1541 intel_hpd_poll_disable(dev_priv); 1542 1543 /* Re-enable the ADPA, if we have one */ 1544 for_each_intel_encoder(&dev_priv->drm, encoder) { 1545 if (encoder->type == INTEL_OUTPUT_ANALOG) 1546 intel_crt_reset(&encoder->base); 1547 } 1548 1549 intel_vga_redisable_power_on(dev_priv); 1550 1551 intel_pps_unlock_regs_wa(dev_priv); 1552 } 1553 1554 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1555 { 1556 spin_lock_irq(&dev_priv->irq_lock); 1557 valleyview_disable_display_irqs(dev_priv); 1558 spin_unlock_irq(&dev_priv->irq_lock); 1559 1560 /* make sure we're done processing display irqs */ 1561 intel_synchronize_irq(dev_priv); 1562 1563 intel_pps_reset_all(dev_priv); 1564 1565 /* Prevent us from re-enabling polling on accident in late suspend */ 1566 if (!dev_priv->drm.dev->power.is_suspended) 1567 intel_hpd_poll_enable(dev_priv); 1568 } 1569 1570 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1571 struct i915_power_well *power_well) 1572 { 1573 vlv_set_power_well(dev_priv, power_well, true); 1574 1575 vlv_display_power_well_init(dev_priv); 1576 } 1577 1578 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1579 struct i915_power_well *power_well) 1580 { 1581 vlv_display_power_well_deinit(dev_priv); 1582 1583 vlv_set_power_well(dev_priv, power_well, false); 1584 } 1585 1586 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1587 struct i915_power_well *power_well) 1588 { 1589 /* since ref/cri clock was enabled */ 1590 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1591 1592 vlv_set_power_well(dev_priv, power_well, true); 1593 1594 /* 1595 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1596 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1597 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1598 * b. The other bits such as sfr settings / modesel may all 1599 * be set to 0. 1600 * 1601 * This should only be done on init and resume from S3 with 1602 * both PLLs disabled, or we risk losing DPIO and PLL 1603 * synchronization. 1604 */ 1605 intel_de_write(dev_priv, DPIO_CTL, 1606 intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); 1607 } 1608 1609 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1610 struct i915_power_well *power_well) 1611 { 1612 enum pipe pipe; 1613 1614 for_each_pipe(dev_priv, pipe) 1615 assert_pll_disabled(dev_priv, pipe); 1616 1617 /* Assert common reset */ 1618 intel_de_write(dev_priv, DPIO_CTL, 1619 intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); 1620 1621 vlv_set_power_well(dev_priv, power_well, false); 1622 } 1623 1624 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 1625 1626 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1627 1628 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1629 { 1630 struct i915_power_well *cmn_bc = 1631 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1632 struct i915_power_well *cmn_d = 1633 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1634 u32 phy_control = dev_priv->chv_phy_control; 1635 u32 phy_status = 0; 1636 u32 phy_status_mask = 0xffffffff; 1637 1638 /* 1639 * The BIOS can leave the PHY is some weird state 1640 * where it doesn't fully power down some parts. 1641 * Disable the asserts until the PHY has been fully 1642 * reset (ie. the power well has been disabled at 1643 * least once). 1644 */ 1645 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1646 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1647 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1648 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1649 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1650 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1651 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1652 1653 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1654 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1655 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1656 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1657 1658 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 1659 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1660 1661 /* this assumes override is only used to enable lanes */ 1662 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1663 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1664 1665 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1666 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1667 1668 /* CL1 is on whenever anything is on in either channel */ 1669 if (BITS_SET(phy_control, 1670 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1671 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1672 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1673 1674 /* 1675 * The DPLLB check accounts for the pipe B + port A usage 1676 * with CL2 powered up but all the lanes in the second channel 1677 * powered down. 1678 */ 1679 if (BITS_SET(phy_control, 1680 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1681 (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1682 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1683 1684 if (BITS_SET(phy_control, 1685 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1686 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1687 if (BITS_SET(phy_control, 1688 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1689 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1690 1691 if (BITS_SET(phy_control, 1692 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1693 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1694 if (BITS_SET(phy_control, 1695 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1696 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1697 } 1698 1699 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 1700 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1701 1702 /* this assumes override is only used to enable lanes */ 1703 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1704 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1705 1706 if (BITS_SET(phy_control, 1707 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1708 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1709 1710 if (BITS_SET(phy_control, 1711 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1712 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1713 if (BITS_SET(phy_control, 1714 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1715 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1716 } 1717 1718 phy_status &= phy_status_mask; 1719 1720 /* 1721 * The PHY may be busy with some initial calibration and whatnot, 1722 * so the power state can take a while to actually change. 1723 */ 1724 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, 1725 phy_status_mask, phy_status, 10)) 1726 drm_err(&dev_priv->drm, 1727 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1728 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, 1729 phy_status, dev_priv->chv_phy_control); 1730 } 1731 1732 #undef BITS_SET 1733 1734 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1735 struct i915_power_well *power_well) 1736 { 1737 enum dpio_phy phy; 1738 enum pipe pipe; 1739 u32 tmp; 1740 1741 drm_WARN_ON_ONCE(&dev_priv->drm, 1742 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1743 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1744 1745 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1746 pipe = PIPE_A; 1747 phy = DPIO_PHY0; 1748 } else { 1749 pipe = PIPE_C; 1750 phy = DPIO_PHY1; 1751 } 1752 1753 /* since ref/cri clock was enabled */ 1754 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1755 vlv_set_power_well(dev_priv, power_well, true); 1756 1757 /* Poll for phypwrgood signal */ 1758 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, 1759 PHY_POWERGOOD(phy), 1)) 1760 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", 1761 phy); 1762 1763 vlv_dpio_get(dev_priv); 1764 1765 /* Enable dynamic power down */ 1766 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1767 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1768 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1769 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1770 1771 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1772 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1773 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1774 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1775 } else { 1776 /* 1777 * Force the non-existing CL2 off. BXT does this 1778 * too, so maybe it saves some power even though 1779 * CL2 doesn't exist? 1780 */ 1781 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1782 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1783 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1784 } 1785 1786 vlv_dpio_put(dev_priv); 1787 1788 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1789 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1790 dev_priv->chv_phy_control); 1791 1792 drm_dbg_kms(&dev_priv->drm, 1793 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1794 phy, dev_priv->chv_phy_control); 1795 1796 assert_chv_phy_status(dev_priv); 1797 } 1798 1799 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1800 struct i915_power_well *power_well) 1801 { 1802 enum dpio_phy phy; 1803 1804 drm_WARN_ON_ONCE(&dev_priv->drm, 1805 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1806 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1807 1808 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1809 phy = DPIO_PHY0; 1810 assert_pll_disabled(dev_priv, PIPE_A); 1811 assert_pll_disabled(dev_priv, PIPE_B); 1812 } else { 1813 phy = DPIO_PHY1; 1814 assert_pll_disabled(dev_priv, PIPE_C); 1815 } 1816 1817 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1818 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1819 dev_priv->chv_phy_control); 1820 1821 vlv_set_power_well(dev_priv, power_well, false); 1822 1823 drm_dbg_kms(&dev_priv->drm, 1824 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1825 phy, dev_priv->chv_phy_control); 1826 1827 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1828 dev_priv->chv_phy_assert[phy] = true; 1829 1830 assert_chv_phy_status(dev_priv); 1831 } 1832 1833 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1834 enum dpio_channel ch, bool override, unsigned int mask) 1835 { 1836 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1837 u32 reg, val, expected, actual; 1838 1839 /* 1840 * The BIOS can leave the PHY is some weird state 1841 * where it doesn't fully power down some parts. 1842 * Disable the asserts until the PHY has been fully 1843 * reset (ie. the power well has been disabled at 1844 * least once). 1845 */ 1846 if (!dev_priv->chv_phy_assert[phy]) 1847 return; 1848 1849 if (ch == DPIO_CH0) 1850 reg = _CHV_CMN_DW0_CH0; 1851 else 1852 reg = _CHV_CMN_DW6_CH1; 1853 1854 vlv_dpio_get(dev_priv); 1855 val = vlv_dpio_read(dev_priv, pipe, reg); 1856 vlv_dpio_put(dev_priv); 1857 1858 /* 1859 * This assumes !override is only used when the port is disabled. 1860 * All lanes should power down even without the override when 1861 * the port is disabled. 1862 */ 1863 if (!override || mask == 0xf) { 1864 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1865 /* 1866 * If CH1 common lane is not active anymore 1867 * (eg. for pipe B DPLL) the entire channel will 1868 * shut down, which causes the common lane registers 1869 * to read as 0. That means we can't actually check 1870 * the lane power down status bits, but as the entire 1871 * register reads as 0 it's a good indication that the 1872 * channel is indeed entirely powered down. 1873 */ 1874 if (ch == DPIO_CH1 && val == 0) 1875 expected = 0; 1876 } else if (mask != 0x0) { 1877 expected = DPIO_ANYDL_POWERDOWN; 1878 } else { 1879 expected = 0; 1880 } 1881 1882 if (ch == DPIO_CH0) 1883 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1884 else 1885 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1886 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1887 1888 drm_WARN(&dev_priv->drm, actual != expected, 1889 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1890 !!(actual & DPIO_ALLDL_POWERDOWN), 1891 !!(actual & DPIO_ANYDL_POWERDOWN), 1892 !!(expected & DPIO_ALLDL_POWERDOWN), 1893 !!(expected & DPIO_ANYDL_POWERDOWN), 1894 reg, val); 1895 } 1896 1897 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1898 enum dpio_channel ch, bool override) 1899 { 1900 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1901 bool was_override; 1902 1903 mutex_lock(&power_domains->lock); 1904 1905 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1906 1907 if (override == was_override) 1908 goto out; 1909 1910 if (override) 1911 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1912 else 1913 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1914 1915 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1916 dev_priv->chv_phy_control); 1917 1918 drm_dbg_kms(&dev_priv->drm, 1919 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1920 phy, ch, dev_priv->chv_phy_control); 1921 1922 assert_chv_phy_status(dev_priv); 1923 1924 out: 1925 mutex_unlock(&power_domains->lock); 1926 1927 return was_override; 1928 } 1929 1930 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1931 bool override, unsigned int mask) 1932 { 1933 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1934 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1935 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); 1936 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); 1937 1938 mutex_lock(&power_domains->lock); 1939 1940 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1941 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1942 1943 if (override) 1944 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1945 else 1946 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1947 1948 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1949 dev_priv->chv_phy_control); 1950 1951 drm_dbg_kms(&dev_priv->drm, 1952 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1953 phy, ch, mask, dev_priv->chv_phy_control); 1954 1955 assert_chv_phy_status(dev_priv); 1956 1957 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1958 1959 mutex_unlock(&power_domains->lock); 1960 } 1961 1962 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1963 struct i915_power_well *power_well) 1964 { 1965 enum pipe pipe = PIPE_A; 1966 bool enabled; 1967 u32 state, ctrl; 1968 1969 vlv_punit_get(dev_priv); 1970 1971 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1972 /* 1973 * We only ever set the power-on and power-gate states, anything 1974 * else is unexpected. 1975 */ 1976 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && 1977 state != DP_SSS_PWR_GATE(pipe)); 1978 enabled = state == DP_SSS_PWR_ON(pipe); 1979 1980 /* 1981 * A transient state at this point would mean some unexpected party 1982 * is poking at the power controls too. 1983 */ 1984 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1985 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); 1986 1987 vlv_punit_put(dev_priv); 1988 1989 return enabled; 1990 } 1991 1992 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1993 struct i915_power_well *power_well, 1994 bool enable) 1995 { 1996 enum pipe pipe = PIPE_A; 1997 u32 state; 1998 u32 ctrl; 1999 2000 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 2001 2002 vlv_punit_get(dev_priv); 2003 2004 #define COND \ 2005 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 2006 2007 if (COND) 2008 goto out; 2009 2010 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 2011 ctrl &= ~DP_SSC_MASK(pipe); 2012 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 2013 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); 2014 2015 if (wait_for(COND, 100)) 2016 drm_err(&dev_priv->drm, 2017 "timeout setting power well state %08x (%08x)\n", 2018 state, 2019 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); 2020 2021 #undef COND 2022 2023 out: 2024 vlv_punit_put(dev_priv); 2025 } 2026 2027 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 2028 struct i915_power_well *power_well) 2029 { 2030 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 2031 dev_priv->chv_phy_control); 2032 } 2033 2034 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 2035 struct i915_power_well *power_well) 2036 { 2037 chv_set_pipe_power_well(dev_priv, power_well, true); 2038 2039 vlv_display_power_well_init(dev_priv); 2040 } 2041 2042 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 2043 struct i915_power_well *power_well) 2044 { 2045 vlv_display_power_well_deinit(dev_priv); 2046 2047 chv_set_pipe_power_well(dev_priv, power_well, false); 2048 } 2049 2050 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) 2051 { 2052 return power_domains->async_put_domains[0] | 2053 power_domains->async_put_domains[1]; 2054 } 2055 2056 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2057 2058 static bool 2059 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 2060 { 2061 struct drm_i915_private *i915 = container_of(power_domains, 2062 struct drm_i915_private, 2063 power_domains); 2064 return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] & 2065 power_domains->async_put_domains[1]); 2066 } 2067 2068 static bool 2069 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 2070 { 2071 struct drm_i915_private *i915 = container_of(power_domains, 2072 struct drm_i915_private, 2073 power_domains); 2074 enum intel_display_power_domain domain; 2075 bool err = false; 2076 2077 err |= !assert_async_put_domain_masks_disjoint(power_domains); 2078 err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref != 2079 !!__async_put_domains_mask(power_domains)); 2080 2081 for_each_power_domain(domain, __async_put_domains_mask(power_domains)) 2082 err |= drm_WARN_ON(&i915->drm, 2083 power_domains->domain_use_count[domain] != 1); 2084 2085 return !err; 2086 } 2087 2088 static void print_power_domains(struct i915_power_domains *power_domains, 2089 const char *prefix, u64 mask) 2090 { 2091 struct drm_i915_private *i915 = container_of(power_domains, 2092 struct drm_i915_private, 2093 power_domains); 2094 enum intel_display_power_domain domain; 2095 2096 drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask)); 2097 for_each_power_domain(domain, mask) 2098 drm_dbg(&i915->drm, "%s use_count %d\n", 2099 intel_display_power_domain_str(domain), 2100 power_domains->domain_use_count[domain]); 2101 } 2102 2103 static void 2104 print_async_put_domains_state(struct i915_power_domains *power_domains) 2105 { 2106 struct drm_i915_private *i915 = container_of(power_domains, 2107 struct drm_i915_private, 2108 power_domains); 2109 2110 drm_dbg(&i915->drm, "async_put_wakeref %u\n", 2111 power_domains->async_put_wakeref); 2112 2113 print_power_domains(power_domains, "async_put_domains[0]", 2114 power_domains->async_put_domains[0]); 2115 print_power_domains(power_domains, "async_put_domains[1]", 2116 power_domains->async_put_domains[1]); 2117 } 2118 2119 static void 2120 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2121 { 2122 if (!__async_put_domains_state_ok(power_domains)) 2123 print_async_put_domains_state(power_domains); 2124 } 2125 2126 #else 2127 2128 static void 2129 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 2130 { 2131 } 2132 2133 static void 2134 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2135 { 2136 } 2137 2138 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 2139 2140 static u64 async_put_domains_mask(struct i915_power_domains *power_domains) 2141 { 2142 assert_async_put_domain_masks_disjoint(power_domains); 2143 2144 return __async_put_domains_mask(power_domains); 2145 } 2146 2147 static void 2148 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 2149 enum intel_display_power_domain domain) 2150 { 2151 assert_async_put_domain_masks_disjoint(power_domains); 2152 2153 power_domains->async_put_domains[0] &= ~BIT_ULL(domain); 2154 power_domains->async_put_domains[1] &= ~BIT_ULL(domain); 2155 } 2156 2157 static bool 2158 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 2159 enum intel_display_power_domain domain) 2160 { 2161 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2162 bool ret = false; 2163 2164 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) 2165 goto out_verify; 2166 2167 async_put_domains_clear_domain(power_domains, domain); 2168 2169 ret = true; 2170 2171 if (async_put_domains_mask(power_domains)) 2172 goto out_verify; 2173 2174 cancel_delayed_work(&power_domains->async_put_work); 2175 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 2176 fetch_and_zero(&power_domains->async_put_wakeref)); 2177 out_verify: 2178 verify_async_put_domains_state(power_domains); 2179 2180 return ret; 2181 } 2182 2183 static void 2184 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 2185 enum intel_display_power_domain domain) 2186 { 2187 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2188 struct i915_power_well *power_well; 2189 2190 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 2191 return; 2192 2193 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) 2194 intel_power_well_get(dev_priv, power_well); 2195 2196 power_domains->domain_use_count[domain]++; 2197 } 2198 2199 /** 2200 * intel_display_power_get - grab a power domain reference 2201 * @dev_priv: i915 device instance 2202 * @domain: power domain to reference 2203 * 2204 * This function grabs a power domain reference for @domain and ensures that the 2205 * power domain and all its parents are powered up. Therefore users should only 2206 * grab a reference to the innermost power domain they need. 2207 * 2208 * Any power domain reference obtained by this function must have a symmetric 2209 * call to intel_display_power_put() to release the reference again. 2210 */ 2211 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 2212 enum intel_display_power_domain domain) 2213 { 2214 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2215 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2216 2217 mutex_lock(&power_domains->lock); 2218 __intel_display_power_get_domain(dev_priv, domain); 2219 mutex_unlock(&power_domains->lock); 2220 2221 return wakeref; 2222 } 2223 2224 /** 2225 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 2226 * @dev_priv: i915 device instance 2227 * @domain: power domain to reference 2228 * 2229 * This function grabs a power domain reference for @domain and ensures that the 2230 * power domain and all its parents are powered up. Therefore users should only 2231 * grab a reference to the innermost power domain they need. 2232 * 2233 * Any power domain reference obtained by this function must have a symmetric 2234 * call to intel_display_power_put() to release the reference again. 2235 */ 2236 intel_wakeref_t 2237 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 2238 enum intel_display_power_domain domain) 2239 { 2240 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2241 intel_wakeref_t wakeref; 2242 bool is_enabled; 2243 2244 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 2245 if (!wakeref) 2246 return false; 2247 2248 mutex_lock(&power_domains->lock); 2249 2250 if (__intel_display_power_is_enabled(dev_priv, domain)) { 2251 __intel_display_power_get_domain(dev_priv, domain); 2252 is_enabled = true; 2253 } else { 2254 is_enabled = false; 2255 } 2256 2257 mutex_unlock(&power_domains->lock); 2258 2259 if (!is_enabled) { 2260 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2261 wakeref = 0; 2262 } 2263 2264 return wakeref; 2265 } 2266 2267 static void 2268 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 2269 enum intel_display_power_domain domain) 2270 { 2271 struct i915_power_domains *power_domains; 2272 struct i915_power_well *power_well; 2273 const char *name = intel_display_power_domain_str(domain); 2274 2275 power_domains = &dev_priv->power_domains; 2276 2277 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], 2278 "Use count on domain %s is already zero\n", 2279 name); 2280 drm_WARN(&dev_priv->drm, 2281 async_put_domains_mask(power_domains) & BIT_ULL(domain), 2282 "Async disabling of domain %s is pending\n", 2283 name); 2284 2285 power_domains->domain_use_count[domain]--; 2286 2287 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) 2288 intel_power_well_put(dev_priv, power_well); 2289 } 2290 2291 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 2292 enum intel_display_power_domain domain) 2293 { 2294 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2295 2296 mutex_lock(&power_domains->lock); 2297 __intel_display_power_put_domain(dev_priv, domain); 2298 mutex_unlock(&power_domains->lock); 2299 } 2300 2301 static void 2302 queue_async_put_domains_work(struct i915_power_domains *power_domains, 2303 intel_wakeref_t wakeref) 2304 { 2305 struct drm_i915_private *i915 = container_of(power_domains, 2306 struct drm_i915_private, 2307 power_domains); 2308 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2309 power_domains->async_put_wakeref = wakeref; 2310 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, 2311 &power_domains->async_put_work, 2312 msecs_to_jiffies(100))); 2313 } 2314 2315 static void 2316 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) 2317 { 2318 struct drm_i915_private *dev_priv = 2319 container_of(power_domains, struct drm_i915_private, 2320 power_domains); 2321 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2322 enum intel_display_power_domain domain; 2323 intel_wakeref_t wakeref; 2324 2325 /* 2326 * The caller must hold already raw wakeref, upgrade that to a proper 2327 * wakeref to make the state checker happy about the HW access during 2328 * power well disabling. 2329 */ 2330 assert_rpm_raw_wakeref_held(rpm); 2331 wakeref = intel_runtime_pm_get(rpm); 2332 2333 for_each_power_domain(domain, mask) { 2334 /* Clear before put, so put's sanity check is happy. */ 2335 async_put_domains_clear_domain(power_domains, domain); 2336 __intel_display_power_put_domain(dev_priv, domain); 2337 } 2338 2339 intel_runtime_pm_put(rpm, wakeref); 2340 } 2341 2342 static void 2343 intel_display_power_put_async_work(struct work_struct *work) 2344 { 2345 struct drm_i915_private *dev_priv = 2346 container_of(work, struct drm_i915_private, 2347 power_domains.async_put_work.work); 2348 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2349 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2350 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 2351 intel_wakeref_t old_work_wakeref = 0; 2352 2353 mutex_lock(&power_domains->lock); 2354 2355 /* 2356 * Bail out if all the domain refs pending to be released were grabbed 2357 * by subsequent gets or a flush_work. 2358 */ 2359 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2360 if (!old_work_wakeref) 2361 goto out_verify; 2362 2363 release_async_put_domains(power_domains, 2364 power_domains->async_put_domains[0]); 2365 2366 /* Requeue the work if more domains were async put meanwhile. */ 2367 if (power_domains->async_put_domains[1]) { 2368 power_domains->async_put_domains[0] = 2369 fetch_and_zero(&power_domains->async_put_domains[1]); 2370 queue_async_put_domains_work(power_domains, 2371 fetch_and_zero(&new_work_wakeref)); 2372 } else { 2373 /* 2374 * Cancel the work that got queued after this one got dequeued, 2375 * since here we released the corresponding async-put reference. 2376 */ 2377 cancel_delayed_work(&power_domains->async_put_work); 2378 } 2379 2380 out_verify: 2381 verify_async_put_domains_state(power_domains); 2382 2383 mutex_unlock(&power_domains->lock); 2384 2385 if (old_work_wakeref) 2386 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 2387 if (new_work_wakeref) 2388 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 2389 } 2390 2391 /** 2392 * intel_display_power_put_async - release a power domain reference asynchronously 2393 * @i915: i915 device instance 2394 * @domain: power domain to reference 2395 * @wakeref: wakeref acquired for the reference that is being released 2396 * 2397 * This function drops the power domain reference obtained by 2398 * intel_display_power_get*() and schedules a work to power down the 2399 * corresponding hardware block if this is the last reference. 2400 */ 2401 void __intel_display_power_put_async(struct drm_i915_private *i915, 2402 enum intel_display_power_domain domain, 2403 intel_wakeref_t wakeref) 2404 { 2405 struct i915_power_domains *power_domains = &i915->power_domains; 2406 struct intel_runtime_pm *rpm = &i915->runtime_pm; 2407 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 2408 2409 mutex_lock(&power_domains->lock); 2410 2411 if (power_domains->domain_use_count[domain] > 1) { 2412 __intel_display_power_put_domain(i915, domain); 2413 2414 goto out_verify; 2415 } 2416 2417 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); 2418 2419 /* Let a pending work requeue itself or queue a new one. */ 2420 if (power_domains->async_put_wakeref) { 2421 power_domains->async_put_domains[1] |= BIT_ULL(domain); 2422 } else { 2423 power_domains->async_put_domains[0] |= BIT_ULL(domain); 2424 queue_async_put_domains_work(power_domains, 2425 fetch_and_zero(&work_wakeref)); 2426 } 2427 2428 out_verify: 2429 verify_async_put_domains_state(power_domains); 2430 2431 mutex_unlock(&power_domains->lock); 2432 2433 if (work_wakeref) 2434 intel_runtime_pm_put_raw(rpm, work_wakeref); 2435 2436 intel_runtime_pm_put(rpm, wakeref); 2437 } 2438 2439 /** 2440 * intel_display_power_flush_work - flushes the async display power disabling work 2441 * @i915: i915 device instance 2442 * 2443 * Flushes any pending work that was scheduled by a preceding 2444 * intel_display_power_put_async() call, completing the disabling of the 2445 * corresponding power domains. 2446 * 2447 * Note that the work handler function may still be running after this 2448 * function returns; to ensure that the work handler isn't running use 2449 * intel_display_power_flush_work_sync() instead. 2450 */ 2451 void intel_display_power_flush_work(struct drm_i915_private *i915) 2452 { 2453 struct i915_power_domains *power_domains = &i915->power_domains; 2454 intel_wakeref_t work_wakeref; 2455 2456 mutex_lock(&power_domains->lock); 2457 2458 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2459 if (!work_wakeref) 2460 goto out_verify; 2461 2462 release_async_put_domains(power_domains, 2463 async_put_domains_mask(power_domains)); 2464 cancel_delayed_work(&power_domains->async_put_work); 2465 2466 out_verify: 2467 verify_async_put_domains_state(power_domains); 2468 2469 mutex_unlock(&power_domains->lock); 2470 2471 if (work_wakeref) 2472 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 2473 } 2474 2475 /** 2476 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 2477 * @i915: i915 device instance 2478 * 2479 * Like intel_display_power_flush_work(), but also ensure that the work 2480 * handler function is not running any more when this function returns. 2481 */ 2482 static void 2483 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 2484 { 2485 struct i915_power_domains *power_domains = &i915->power_domains; 2486 2487 intel_display_power_flush_work(i915); 2488 cancel_delayed_work_sync(&power_domains->async_put_work); 2489 2490 verify_async_put_domains_state(power_domains); 2491 2492 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2493 } 2494 2495 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2496 /** 2497 * intel_display_power_put - release a power domain reference 2498 * @dev_priv: i915 device instance 2499 * @domain: power domain to reference 2500 * @wakeref: wakeref acquired for the reference that is being released 2501 * 2502 * This function drops the power domain reference obtained by 2503 * intel_display_power_get() and might power down the corresponding hardware 2504 * block right away if this is the last reference. 2505 */ 2506 void intel_display_power_put(struct drm_i915_private *dev_priv, 2507 enum intel_display_power_domain domain, 2508 intel_wakeref_t wakeref) 2509 { 2510 __intel_display_power_put(dev_priv, domain); 2511 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2512 } 2513 #else 2514 /** 2515 * intel_display_power_put_unchecked - release an unchecked power domain reference 2516 * @dev_priv: i915 device instance 2517 * @domain: power domain to reference 2518 * 2519 * This function drops the power domain reference obtained by 2520 * intel_display_power_get() and might power down the corresponding hardware 2521 * block right away if this is the last reference. 2522 * 2523 * This function is only for the power domain code's internal use to suppress wakeref 2524 * tracking when the correspondig debug kconfig option is disabled, should not 2525 * be used otherwise. 2526 */ 2527 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 2528 enum intel_display_power_domain domain) 2529 { 2530 __intel_display_power_put(dev_priv, domain); 2531 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 2532 } 2533 #endif 2534 2535 void 2536 intel_display_power_get_in_set(struct drm_i915_private *i915, 2537 struct intel_display_power_domain_set *power_domain_set, 2538 enum intel_display_power_domain domain) 2539 { 2540 intel_wakeref_t __maybe_unused wf; 2541 2542 drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); 2543 2544 wf = intel_display_power_get(i915, domain); 2545 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2546 power_domain_set->wakerefs[domain] = wf; 2547 #endif 2548 power_domain_set->mask |= BIT_ULL(domain); 2549 } 2550 2551 bool 2552 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, 2553 struct intel_display_power_domain_set *power_domain_set, 2554 enum intel_display_power_domain domain) 2555 { 2556 intel_wakeref_t wf; 2557 2558 drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); 2559 2560 wf = intel_display_power_get_if_enabled(i915, domain); 2561 if (!wf) 2562 return false; 2563 2564 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2565 power_domain_set->wakerefs[domain] = wf; 2566 #endif 2567 power_domain_set->mask |= BIT_ULL(domain); 2568 2569 return true; 2570 } 2571 2572 void 2573 intel_display_power_put_mask_in_set(struct drm_i915_private *i915, 2574 struct intel_display_power_domain_set *power_domain_set, 2575 u64 mask) 2576 { 2577 enum intel_display_power_domain domain; 2578 2579 drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask); 2580 2581 for_each_power_domain(domain, mask) { 2582 intel_wakeref_t __maybe_unused wf = -1; 2583 2584 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2585 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); 2586 #endif 2587 intel_display_power_put(i915, domain, wf); 2588 power_domain_set->mask &= ~BIT_ULL(domain); 2589 } 2590 } 2591 2592 #define I830_PIPES_POWER_DOMAINS ( \ 2593 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2594 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2595 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2596 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2597 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2598 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2599 BIT_ULL(POWER_DOMAIN_INIT)) 2600 2601 #define VLV_DISPLAY_POWER_DOMAINS ( \ 2602 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2603 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2604 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2605 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2606 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2607 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2608 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2609 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2610 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2611 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2612 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2613 BIT_ULL(POWER_DOMAIN_VGA) | \ 2614 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2615 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2616 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2617 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2618 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2619 BIT_ULL(POWER_DOMAIN_INIT)) 2620 2621 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2622 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2623 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2624 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2625 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2626 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2627 BIT_ULL(POWER_DOMAIN_INIT)) 2628 2629 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 2630 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2631 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2632 BIT_ULL(POWER_DOMAIN_INIT)) 2633 2634 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 2635 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2636 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2637 BIT_ULL(POWER_DOMAIN_INIT)) 2638 2639 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 2640 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2641 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2642 BIT_ULL(POWER_DOMAIN_INIT)) 2643 2644 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 2645 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2646 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2647 BIT_ULL(POWER_DOMAIN_INIT)) 2648 2649 #define CHV_DISPLAY_POWER_DOMAINS ( \ 2650 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2651 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2652 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2653 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2654 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2655 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2656 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2657 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2658 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2659 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2660 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2661 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2662 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2663 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2664 BIT_ULL(POWER_DOMAIN_VGA) | \ 2665 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2666 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2667 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2668 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2669 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2670 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2671 BIT_ULL(POWER_DOMAIN_INIT)) 2672 2673 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2674 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2675 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2676 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2677 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2678 BIT_ULL(POWER_DOMAIN_INIT)) 2679 2680 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 2681 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2682 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2683 BIT_ULL(POWER_DOMAIN_INIT)) 2684 2685 #define HSW_DISPLAY_POWER_DOMAINS ( \ 2686 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2687 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2688 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2689 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2690 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2691 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2692 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2693 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2694 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2695 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2696 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2697 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2698 BIT_ULL(POWER_DOMAIN_VGA) | \ 2699 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2700 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2701 BIT_ULL(POWER_DOMAIN_INIT)) 2702 2703 #define BDW_DISPLAY_POWER_DOMAINS ( \ 2704 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2705 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2706 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2707 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2708 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2709 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2710 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2711 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2712 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2713 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2714 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2715 BIT_ULL(POWER_DOMAIN_VGA) | \ 2716 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2717 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2718 BIT_ULL(POWER_DOMAIN_INIT)) 2719 2720 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2721 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2722 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2723 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2724 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2725 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2726 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2727 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2728 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2729 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2730 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2731 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2732 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2733 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2734 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2735 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2736 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2737 BIT_ULL(POWER_DOMAIN_VGA) | \ 2738 BIT_ULL(POWER_DOMAIN_INIT)) 2739 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ 2740 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2741 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ 2742 BIT_ULL(POWER_DOMAIN_INIT)) 2743 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2744 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2745 BIT_ULL(POWER_DOMAIN_INIT)) 2746 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2747 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2748 BIT_ULL(POWER_DOMAIN_INIT)) 2749 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ 2750 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2751 BIT_ULL(POWER_DOMAIN_INIT)) 2752 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2753 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2754 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2755 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2756 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2757 BIT_ULL(POWER_DOMAIN_INIT)) 2758 2759 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2760 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2761 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2762 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2763 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2764 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2765 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2766 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2767 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2768 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2769 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2770 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2771 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2772 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2773 BIT_ULL(POWER_DOMAIN_VGA) | \ 2774 BIT_ULL(POWER_DOMAIN_INIT)) 2775 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2776 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2777 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2778 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2779 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2780 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2781 BIT_ULL(POWER_DOMAIN_INIT)) 2782 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 2783 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2784 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2785 BIT_ULL(POWER_DOMAIN_INIT)) 2786 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 2787 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2788 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2789 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2790 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2791 BIT_ULL(POWER_DOMAIN_INIT)) 2792 2793 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2794 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2795 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2796 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2797 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2798 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2799 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2800 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2801 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2802 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2803 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2804 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2805 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2806 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2807 BIT_ULL(POWER_DOMAIN_VGA) | \ 2808 BIT_ULL(POWER_DOMAIN_INIT)) 2809 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ 2810 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2811 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2812 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2813 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2814 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2815 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ 2816 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2817 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2818 BIT_ULL(POWER_DOMAIN_INIT)) 2819 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ 2820 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2821 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2822 BIT_ULL(POWER_DOMAIN_INIT)) 2823 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ 2824 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2825 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2826 BIT_ULL(POWER_DOMAIN_INIT)) 2827 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2828 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2829 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2830 BIT_ULL(POWER_DOMAIN_INIT)) 2831 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2832 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2833 BIT_ULL(POWER_DOMAIN_INIT)) 2834 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2835 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2836 BIT_ULL(POWER_DOMAIN_INIT)) 2837 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2838 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2839 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2840 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2841 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2842 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2843 BIT_ULL(POWER_DOMAIN_INIT)) 2844 2845 /* 2846 * ICL PW_0/PG_0 domains (HW/DMC control): 2847 * - PCI 2848 * - clocks except port PLL 2849 * - central power except FBC 2850 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers 2851 * ICL PW_1/PG_1 domains (HW/DMC control): 2852 * - DBUF function 2853 * - PIPE_A and its planes, except VGA 2854 * - transcoder EDP + PSR 2855 * - transcoder DSI 2856 * - DDI_A 2857 * - FBC 2858 */ 2859 #define ICL_PW_4_POWER_DOMAINS ( \ 2860 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2861 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2862 BIT_ULL(POWER_DOMAIN_INIT)) 2863 /* VDSC/joining */ 2864 #define ICL_PW_3_POWER_DOMAINS ( \ 2865 ICL_PW_4_POWER_DOMAINS | \ 2866 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2867 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2868 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2869 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2870 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2871 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2872 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2873 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2874 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2875 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2876 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2877 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2878 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2879 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2880 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2881 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ 2882 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2883 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2884 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2885 BIT_ULL(POWER_DOMAIN_VGA) | \ 2886 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2887 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2888 BIT_ULL(POWER_DOMAIN_INIT)) 2889 /* 2890 * - transcoder WD 2891 * - KVMR (HW control) 2892 */ 2893 #define ICL_PW_2_POWER_DOMAINS ( \ 2894 ICL_PW_3_POWER_DOMAINS | \ 2895 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2896 BIT_ULL(POWER_DOMAIN_INIT)) 2897 /* 2898 * - KVMR (HW control) 2899 */ 2900 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2901 ICL_PW_2_POWER_DOMAINS | \ 2902 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2903 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2904 BIT_ULL(POWER_DOMAIN_DC_OFF) | \ 2905 BIT_ULL(POWER_DOMAIN_INIT)) 2906 2907 #define ICL_DDI_IO_A_POWER_DOMAINS ( \ 2908 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2909 #define ICL_DDI_IO_B_POWER_DOMAINS ( \ 2910 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2911 #define ICL_DDI_IO_C_POWER_DOMAINS ( \ 2912 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2913 #define ICL_DDI_IO_D_POWER_DOMAINS ( \ 2914 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2915 #define ICL_DDI_IO_E_POWER_DOMAINS ( \ 2916 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2917 #define ICL_DDI_IO_F_POWER_DOMAINS ( \ 2918 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2919 2920 #define ICL_AUX_A_IO_POWER_DOMAINS ( \ 2921 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2922 BIT_ULL(POWER_DOMAIN_AUX_A)) 2923 #define ICL_AUX_B_IO_POWER_DOMAINS ( \ 2924 BIT_ULL(POWER_DOMAIN_AUX_B)) 2925 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ 2926 BIT_ULL(POWER_DOMAIN_AUX_C)) 2927 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ 2928 BIT_ULL(POWER_DOMAIN_AUX_D)) 2929 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ 2930 BIT_ULL(POWER_DOMAIN_AUX_E)) 2931 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ 2932 BIT_ULL(POWER_DOMAIN_AUX_F)) 2933 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ 2934 BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) 2935 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ 2936 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2937 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ 2938 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2939 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ 2940 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2941 2942 #define TGL_PW_5_POWER_DOMAINS ( \ 2943 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 2944 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 2945 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 2946 BIT_ULL(POWER_DOMAIN_INIT)) 2947 2948 #define TGL_PW_4_POWER_DOMAINS ( \ 2949 TGL_PW_5_POWER_DOMAINS | \ 2950 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2951 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2952 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2953 BIT_ULL(POWER_DOMAIN_INIT)) 2954 2955 #define TGL_PW_3_POWER_DOMAINS ( \ 2956 TGL_PW_4_POWER_DOMAINS | \ 2957 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2958 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2959 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2960 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 2961 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 2962 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ 2963 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ 2964 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \ 2965 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \ 2966 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 2967 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 2968 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 2969 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 2970 BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ 2971 BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ 2972 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 2973 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 2974 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 2975 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 2976 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ 2977 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ 2978 BIT_ULL(POWER_DOMAIN_VGA) | \ 2979 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2980 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2981 BIT_ULL(POWER_DOMAIN_INIT)) 2982 2983 #define TGL_PW_2_POWER_DOMAINS ( \ 2984 TGL_PW_3_POWER_DOMAINS | \ 2985 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2986 BIT_ULL(POWER_DOMAIN_INIT)) 2987 2988 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2989 TGL_PW_3_POWER_DOMAINS | \ 2990 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2991 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2992 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2993 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2994 BIT_ULL(POWER_DOMAIN_INIT)) 2995 2996 #define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) 2997 #define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) 2998 #define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) 2999 #define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) 3000 #define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5) 3001 #define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6) 3002 3003 #define TGL_AUX_A_IO_POWER_DOMAINS ( \ 3004 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 3005 BIT_ULL(POWER_DOMAIN_AUX_A)) 3006 #define TGL_AUX_B_IO_POWER_DOMAINS ( \ 3007 BIT_ULL(POWER_DOMAIN_AUX_B)) 3008 #define TGL_AUX_C_IO_POWER_DOMAINS ( \ 3009 BIT_ULL(POWER_DOMAIN_AUX_C)) 3010 3011 #define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) 3012 #define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) 3013 #define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) 3014 #define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) 3015 #define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5) 3016 #define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6) 3017 3018 #define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) 3019 #define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) 3020 #define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) 3021 #define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) 3022 #define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5) 3023 #define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6) 3024 3025 #define TGL_TC_COLD_OFF_POWER_DOMAINS ( \ 3026 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3027 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3028 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 3029 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 3030 BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ 3031 BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ 3032 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 3033 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 3034 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 3035 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 3036 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ 3037 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ 3038 BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) 3039 3040 #define RKL_PW_4_POWER_DOMAINS ( \ 3041 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 3042 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 3043 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 3044 BIT_ULL(POWER_DOMAIN_INIT)) 3045 3046 #define RKL_PW_3_POWER_DOMAINS ( \ 3047 RKL_PW_4_POWER_DOMAINS | \ 3048 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 3049 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 3050 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 3051 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 3052 BIT_ULL(POWER_DOMAIN_VGA) | \ 3053 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 3054 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 3055 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 3056 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3057 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3058 BIT_ULL(POWER_DOMAIN_INIT)) 3059 3060 /* 3061 * There is no PW_2/PG_2 on RKL. 3062 * 3063 * RKL PW_1/PG_1 domains (under HW/DMC control): 3064 * - DBUF function (note: registers are in PW0) 3065 * - PIPE_A and its planes and VDSC/joining, except VGA 3066 * - transcoder A 3067 * - DDI_A and DDI_B 3068 * - FBC 3069 * 3070 * RKL PW_0/PG_0 domains (under HW/DMC control): 3071 * - PCI 3072 * - clocks except port PLL 3073 * - shared functions: 3074 * * interrupts except pipe interrupts 3075 * * MBus except PIPE_MBUS_DBOX_CTL 3076 * * DBUF registers 3077 * - central power except FBC 3078 * - top-level GTC (DDI-level GTC is in the well associated with the DDI) 3079 */ 3080 3081 #define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 3082 RKL_PW_3_POWER_DOMAINS | \ 3083 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3084 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3085 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3086 BIT_ULL(POWER_DOMAIN_INIT)) 3087 3088 /* 3089 * DG1 onwards Audio MMIO/VERBS lies in PG0 power well. 3090 */ 3091 #define DG1_PW_3_POWER_DOMAINS ( \ 3092 TGL_PW_4_POWER_DOMAINS | \ 3093 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 3094 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 3095 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 3096 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 3097 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 3098 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3099 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3100 BIT_ULL(POWER_DOMAIN_VGA) | \ 3101 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 3102 BIT_ULL(POWER_DOMAIN_INIT)) 3103 3104 #define DG1_PW_2_POWER_DOMAINS ( \ 3105 DG1_PW_3_POWER_DOMAINS | \ 3106 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 3107 BIT_ULL(POWER_DOMAIN_INIT)) 3108 3109 #define DG1_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 3110 DG1_PW_3_POWER_DOMAINS | \ 3111 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 3112 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3113 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3114 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3115 BIT_ULL(POWER_DOMAIN_INIT)) 3116 3117 /* 3118 * XE_LPD Power Domains 3119 * 3120 * Previous platforms required that PG(n-1) be enabled before PG(n). That 3121 * dependency chain turns into a dependency tree on XE_LPD: 3122 * 3123 * PG0 3124 * | 3125 * --PG1-- 3126 * / \ 3127 * PGA --PG2-- 3128 * / | \ 3129 * PGB PGC PGD 3130 * 3131 * Power wells must be enabled from top to bottom and disabled from bottom 3132 * to top. This allows pipes to be power gated independently. 3133 */ 3134 3135 #define XELPD_PW_D_POWER_DOMAINS ( \ 3136 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 3137 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 3138 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 3139 BIT_ULL(POWER_DOMAIN_INIT)) 3140 3141 #define XELPD_PW_C_POWER_DOMAINS ( \ 3142 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 3143 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 3144 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 3145 BIT_ULL(POWER_DOMAIN_INIT)) 3146 3147 #define XELPD_PW_B_POWER_DOMAINS ( \ 3148 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 3149 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 3150 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 3151 BIT_ULL(POWER_DOMAIN_INIT)) 3152 3153 #define XELPD_PW_A_POWER_DOMAINS ( \ 3154 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 3155 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 3156 BIT_ULL(POWER_DOMAIN_INIT)) 3157 3158 #define XELPD_PW_2_POWER_DOMAINS ( \ 3159 XELPD_PW_B_POWER_DOMAINS | \ 3160 XELPD_PW_C_POWER_DOMAINS | \ 3161 XELPD_PW_D_POWER_DOMAINS | \ 3162 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 3163 BIT_ULL(POWER_DOMAIN_VGA) | \ 3164 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 3165 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) | \ 3166 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) | \ 3167 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 3168 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 3169 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ 3170 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ 3171 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 3172 BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) | \ 3173 BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) | \ 3174 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3175 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3176 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 3177 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 3178 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 3179 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 3180 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 3181 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 3182 BIT_ULL(POWER_DOMAIN_INIT)) 3183 3184 /* 3185 * XELPD PW_1/PG_1 domains (under HW/DMC control): 3186 * - DBUF function (registers are in PW0) 3187 * - Transcoder A 3188 * - DDI_A and DDI_B 3189 * 3190 * XELPD PW_0/PW_1 domains (under HW/DMC control): 3191 * - PCI 3192 * - Clocks except port PLL 3193 * - Shared functions: 3194 * * interrupts except pipe interrupts 3195 * * MBus except PIPE_MBUS_DBOX_CTL 3196 * * DBUF registers 3197 * - Central power except FBC 3198 * - Top-level GTC (DDI-level GTC is in the well associated with the DDI) 3199 */ 3200 3201 #define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 3202 XELPD_PW_2_POWER_DOMAINS | \ 3203 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 3204 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3205 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3206 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3207 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 3208 BIT_ULL(POWER_DOMAIN_INIT)) 3209 3210 #define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) 3211 #define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) 3212 #define XELPD_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) 3213 #define XELPD_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) 3214 #define XELPD_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) 3215 #define XELPD_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) 3216 3217 #define XELPD_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) 3218 #define XELPD_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) 3219 #define XELPD_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) 3220 #define XELPD_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) 3221 3222 #define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD) 3223 #define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD) 3224 #define XELPD_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) 3225 #define XELPD_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) 3226 #define XELPD_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) 3227 #define XELPD_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) 3228 3229 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 3230 .sync_hw = i9xx_power_well_sync_hw_noop, 3231 .enable = i9xx_always_on_power_well_noop, 3232 .disable = i9xx_always_on_power_well_noop, 3233 .is_enabled = i9xx_always_on_power_well_enabled, 3234 }; 3235 3236 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 3237 .sync_hw = chv_pipe_power_well_sync_hw, 3238 .enable = chv_pipe_power_well_enable, 3239 .disable = chv_pipe_power_well_disable, 3240 .is_enabled = chv_pipe_power_well_enabled, 3241 }; 3242 3243 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 3244 .sync_hw = i9xx_power_well_sync_hw_noop, 3245 .enable = chv_dpio_cmn_power_well_enable, 3246 .disable = chv_dpio_cmn_power_well_disable, 3247 .is_enabled = vlv_power_well_enabled, 3248 }; 3249 3250 static const struct i915_power_well_desc i9xx_always_on_power_well[] = { 3251 { 3252 .name = "always-on", 3253 .always_on = true, 3254 .domains = POWER_DOMAIN_MASK, 3255 .ops = &i9xx_always_on_power_well_ops, 3256 .id = DISP_PW_ID_NONE, 3257 }, 3258 }; 3259 3260 static const struct i915_power_well_ops i830_pipes_power_well_ops = { 3261 .sync_hw = i830_pipes_power_well_sync_hw, 3262 .enable = i830_pipes_power_well_enable, 3263 .disable = i830_pipes_power_well_disable, 3264 .is_enabled = i830_pipes_power_well_enabled, 3265 }; 3266 3267 static const struct i915_power_well_desc i830_power_wells[] = { 3268 { 3269 .name = "always-on", 3270 .always_on = true, 3271 .domains = POWER_DOMAIN_MASK, 3272 .ops = &i9xx_always_on_power_well_ops, 3273 .id = DISP_PW_ID_NONE, 3274 }, 3275 { 3276 .name = "pipes", 3277 .domains = I830_PIPES_POWER_DOMAINS, 3278 .ops = &i830_pipes_power_well_ops, 3279 .id = DISP_PW_ID_NONE, 3280 }, 3281 }; 3282 3283 static const struct i915_power_well_ops hsw_power_well_ops = { 3284 .sync_hw = hsw_power_well_sync_hw, 3285 .enable = hsw_power_well_enable, 3286 .disable = hsw_power_well_disable, 3287 .is_enabled = hsw_power_well_enabled, 3288 }; 3289 3290 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 3291 .sync_hw = i9xx_power_well_sync_hw_noop, 3292 .enable = gen9_dc_off_power_well_enable, 3293 .disable = gen9_dc_off_power_well_disable, 3294 .is_enabled = gen9_dc_off_power_well_enabled, 3295 }; 3296 3297 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 3298 .sync_hw = i9xx_power_well_sync_hw_noop, 3299 .enable = bxt_dpio_cmn_power_well_enable, 3300 .disable = bxt_dpio_cmn_power_well_disable, 3301 .is_enabled = bxt_dpio_cmn_power_well_enabled, 3302 }; 3303 3304 static const struct i915_power_well_regs hsw_power_well_regs = { 3305 .bios = HSW_PWR_WELL_CTL1, 3306 .driver = HSW_PWR_WELL_CTL2, 3307 .kvmr = HSW_PWR_WELL_CTL3, 3308 .debug = HSW_PWR_WELL_CTL4, 3309 }; 3310 3311 static const struct i915_power_well_desc hsw_power_wells[] = { 3312 { 3313 .name = "always-on", 3314 .always_on = true, 3315 .domains = POWER_DOMAIN_MASK, 3316 .ops = &i9xx_always_on_power_well_ops, 3317 .id = DISP_PW_ID_NONE, 3318 }, 3319 { 3320 .name = "display", 3321 .domains = HSW_DISPLAY_POWER_DOMAINS, 3322 .ops = &hsw_power_well_ops, 3323 .id = HSW_DISP_PW_GLOBAL, 3324 { 3325 .hsw.regs = &hsw_power_well_regs, 3326 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3327 .hsw.has_vga = true, 3328 }, 3329 }, 3330 }; 3331 3332 static const struct i915_power_well_desc bdw_power_wells[] = { 3333 { 3334 .name = "always-on", 3335 .always_on = true, 3336 .domains = POWER_DOMAIN_MASK, 3337 .ops = &i9xx_always_on_power_well_ops, 3338 .id = DISP_PW_ID_NONE, 3339 }, 3340 { 3341 .name = "display", 3342 .domains = BDW_DISPLAY_POWER_DOMAINS, 3343 .ops = &hsw_power_well_ops, 3344 .id = HSW_DISP_PW_GLOBAL, 3345 { 3346 .hsw.regs = &hsw_power_well_regs, 3347 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3348 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3349 .hsw.has_vga = true, 3350 }, 3351 }, 3352 }; 3353 3354 static const struct i915_power_well_ops vlv_display_power_well_ops = { 3355 .sync_hw = i9xx_power_well_sync_hw_noop, 3356 .enable = vlv_display_power_well_enable, 3357 .disable = vlv_display_power_well_disable, 3358 .is_enabled = vlv_power_well_enabled, 3359 }; 3360 3361 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 3362 .sync_hw = i9xx_power_well_sync_hw_noop, 3363 .enable = vlv_dpio_cmn_power_well_enable, 3364 .disable = vlv_dpio_cmn_power_well_disable, 3365 .is_enabled = vlv_power_well_enabled, 3366 }; 3367 3368 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 3369 .sync_hw = i9xx_power_well_sync_hw_noop, 3370 .enable = vlv_power_well_enable, 3371 .disable = vlv_power_well_disable, 3372 .is_enabled = vlv_power_well_enabled, 3373 }; 3374 3375 static const struct i915_power_well_desc vlv_power_wells[] = { 3376 { 3377 .name = "always-on", 3378 .always_on = true, 3379 .domains = POWER_DOMAIN_MASK, 3380 .ops = &i9xx_always_on_power_well_ops, 3381 .id = DISP_PW_ID_NONE, 3382 }, 3383 { 3384 .name = "display", 3385 .domains = VLV_DISPLAY_POWER_DOMAINS, 3386 .ops = &vlv_display_power_well_ops, 3387 .id = VLV_DISP_PW_DISP2D, 3388 { 3389 .vlv.idx = PUNIT_PWGT_IDX_DISP2D, 3390 }, 3391 }, 3392 { 3393 .name = "dpio-tx-b-01", 3394 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3395 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3396 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3397 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3398 .ops = &vlv_dpio_power_well_ops, 3399 .id = DISP_PW_ID_NONE, 3400 { 3401 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, 3402 }, 3403 }, 3404 { 3405 .name = "dpio-tx-b-23", 3406 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3407 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3408 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3409 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3410 .ops = &vlv_dpio_power_well_ops, 3411 .id = DISP_PW_ID_NONE, 3412 { 3413 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, 3414 }, 3415 }, 3416 { 3417 .name = "dpio-tx-c-01", 3418 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3419 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3420 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3421 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3422 .ops = &vlv_dpio_power_well_ops, 3423 .id = DISP_PW_ID_NONE, 3424 { 3425 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, 3426 }, 3427 }, 3428 { 3429 .name = "dpio-tx-c-23", 3430 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3431 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3432 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3433 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3434 .ops = &vlv_dpio_power_well_ops, 3435 .id = DISP_PW_ID_NONE, 3436 { 3437 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, 3438 }, 3439 }, 3440 { 3441 .name = "dpio-common", 3442 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 3443 .ops = &vlv_dpio_cmn_power_well_ops, 3444 .id = VLV_DISP_PW_DPIO_CMN_BC, 3445 { 3446 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3447 }, 3448 }, 3449 }; 3450 3451 static const struct i915_power_well_desc chv_power_wells[] = { 3452 { 3453 .name = "always-on", 3454 .always_on = true, 3455 .domains = POWER_DOMAIN_MASK, 3456 .ops = &i9xx_always_on_power_well_ops, 3457 .id = DISP_PW_ID_NONE, 3458 }, 3459 { 3460 .name = "display", 3461 /* 3462 * Pipe A power well is the new disp2d well. Pipe B and C 3463 * power wells don't actually exist. Pipe A power well is 3464 * required for any pipe to work. 3465 */ 3466 .domains = CHV_DISPLAY_POWER_DOMAINS, 3467 .ops = &chv_pipe_power_well_ops, 3468 .id = DISP_PW_ID_NONE, 3469 }, 3470 { 3471 .name = "dpio-common-bc", 3472 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 3473 .ops = &chv_dpio_cmn_power_well_ops, 3474 .id = VLV_DISP_PW_DPIO_CMN_BC, 3475 { 3476 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3477 }, 3478 }, 3479 { 3480 .name = "dpio-common-d", 3481 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 3482 .ops = &chv_dpio_cmn_power_well_ops, 3483 .id = CHV_DISP_PW_DPIO_CMN_D, 3484 { 3485 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, 3486 }, 3487 }, 3488 }; 3489 3490 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 3491 enum i915_power_well_id power_well_id) 3492 { 3493 struct i915_power_well *power_well; 3494 bool ret; 3495 3496 power_well = lookup_power_well(dev_priv, power_well_id); 3497 ret = power_well->desc->ops->is_enabled(dev_priv, power_well); 3498 3499 return ret; 3500 } 3501 3502 static const struct i915_power_well_desc skl_power_wells[] = { 3503 { 3504 .name = "always-on", 3505 .always_on = true, 3506 .domains = POWER_DOMAIN_MASK, 3507 .ops = &i9xx_always_on_power_well_ops, 3508 .id = DISP_PW_ID_NONE, 3509 }, 3510 { 3511 .name = "power well 1", 3512 /* Handled by the DMC firmware */ 3513 .always_on = true, 3514 .domains = 0, 3515 .ops = &hsw_power_well_ops, 3516 .id = SKL_DISP_PW_1, 3517 { 3518 .hsw.regs = &hsw_power_well_regs, 3519 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3520 .hsw.has_fuses = true, 3521 }, 3522 }, 3523 { 3524 .name = "MISC IO power well", 3525 /* Handled by the DMC firmware */ 3526 .always_on = true, 3527 .domains = 0, 3528 .ops = &hsw_power_well_ops, 3529 .id = SKL_DISP_PW_MISC_IO, 3530 { 3531 .hsw.regs = &hsw_power_well_regs, 3532 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, 3533 }, 3534 }, 3535 { 3536 .name = "DC off", 3537 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 3538 .ops = &gen9_dc_off_power_well_ops, 3539 .id = SKL_DISP_DC_OFF, 3540 }, 3541 { 3542 .name = "power well 2", 3543 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3544 .ops = &hsw_power_well_ops, 3545 .id = SKL_DISP_PW_2, 3546 { 3547 .hsw.regs = &hsw_power_well_regs, 3548 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3549 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3550 .hsw.has_vga = true, 3551 .hsw.has_fuses = true, 3552 }, 3553 }, 3554 { 3555 .name = "DDI A/E IO power well", 3556 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, 3557 .ops = &hsw_power_well_ops, 3558 .id = DISP_PW_ID_NONE, 3559 { 3560 .hsw.regs = &hsw_power_well_regs, 3561 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, 3562 }, 3563 }, 3564 { 3565 .name = "DDI B IO power well", 3566 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3567 .ops = &hsw_power_well_ops, 3568 .id = DISP_PW_ID_NONE, 3569 { 3570 .hsw.regs = &hsw_power_well_regs, 3571 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3572 }, 3573 }, 3574 { 3575 .name = "DDI C IO power well", 3576 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3577 .ops = &hsw_power_well_ops, 3578 .id = DISP_PW_ID_NONE, 3579 { 3580 .hsw.regs = &hsw_power_well_regs, 3581 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3582 }, 3583 }, 3584 { 3585 .name = "DDI D IO power well", 3586 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, 3587 .ops = &hsw_power_well_ops, 3588 .id = DISP_PW_ID_NONE, 3589 { 3590 .hsw.regs = &hsw_power_well_regs, 3591 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3592 }, 3593 }, 3594 }; 3595 3596 static const struct i915_power_well_desc bxt_power_wells[] = { 3597 { 3598 .name = "always-on", 3599 .always_on = true, 3600 .domains = POWER_DOMAIN_MASK, 3601 .ops = &i9xx_always_on_power_well_ops, 3602 .id = DISP_PW_ID_NONE, 3603 }, 3604 { 3605 .name = "power well 1", 3606 /* Handled by the DMC firmware */ 3607 .always_on = true, 3608 .domains = 0, 3609 .ops = &hsw_power_well_ops, 3610 .id = SKL_DISP_PW_1, 3611 { 3612 .hsw.regs = &hsw_power_well_regs, 3613 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3614 .hsw.has_fuses = true, 3615 }, 3616 }, 3617 { 3618 .name = "DC off", 3619 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 3620 .ops = &gen9_dc_off_power_well_ops, 3621 .id = SKL_DISP_DC_OFF, 3622 }, 3623 { 3624 .name = "power well 2", 3625 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3626 .ops = &hsw_power_well_ops, 3627 .id = SKL_DISP_PW_2, 3628 { 3629 .hsw.regs = &hsw_power_well_regs, 3630 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3631 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3632 .hsw.has_vga = true, 3633 .hsw.has_fuses = true, 3634 }, 3635 }, 3636 { 3637 .name = "dpio-common-a", 3638 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 3639 .ops = &bxt_dpio_cmn_power_well_ops, 3640 .id = BXT_DISP_PW_DPIO_CMN_A, 3641 { 3642 .bxt.phy = DPIO_PHY1, 3643 }, 3644 }, 3645 { 3646 .name = "dpio-common-bc", 3647 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 3648 .ops = &bxt_dpio_cmn_power_well_ops, 3649 .id = VLV_DISP_PW_DPIO_CMN_BC, 3650 { 3651 .bxt.phy = DPIO_PHY0, 3652 }, 3653 }, 3654 }; 3655 3656 static const struct i915_power_well_desc glk_power_wells[] = { 3657 { 3658 .name = "always-on", 3659 .always_on = true, 3660 .domains = POWER_DOMAIN_MASK, 3661 .ops = &i9xx_always_on_power_well_ops, 3662 .id = DISP_PW_ID_NONE, 3663 }, 3664 { 3665 .name = "power well 1", 3666 /* Handled by the DMC firmware */ 3667 .always_on = true, 3668 .domains = 0, 3669 .ops = &hsw_power_well_ops, 3670 .id = SKL_DISP_PW_1, 3671 { 3672 .hsw.regs = &hsw_power_well_regs, 3673 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3674 .hsw.has_fuses = true, 3675 }, 3676 }, 3677 { 3678 .name = "DC off", 3679 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, 3680 .ops = &gen9_dc_off_power_well_ops, 3681 .id = SKL_DISP_DC_OFF, 3682 }, 3683 { 3684 .name = "power well 2", 3685 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3686 .ops = &hsw_power_well_ops, 3687 .id = SKL_DISP_PW_2, 3688 { 3689 .hsw.regs = &hsw_power_well_regs, 3690 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3691 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3692 .hsw.has_vga = true, 3693 .hsw.has_fuses = true, 3694 }, 3695 }, 3696 { 3697 .name = "dpio-common-a", 3698 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, 3699 .ops = &bxt_dpio_cmn_power_well_ops, 3700 .id = BXT_DISP_PW_DPIO_CMN_A, 3701 { 3702 .bxt.phy = DPIO_PHY1, 3703 }, 3704 }, 3705 { 3706 .name = "dpio-common-b", 3707 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, 3708 .ops = &bxt_dpio_cmn_power_well_ops, 3709 .id = VLV_DISP_PW_DPIO_CMN_BC, 3710 { 3711 .bxt.phy = DPIO_PHY0, 3712 }, 3713 }, 3714 { 3715 .name = "dpio-common-c", 3716 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, 3717 .ops = &bxt_dpio_cmn_power_well_ops, 3718 .id = GLK_DISP_PW_DPIO_CMN_C, 3719 { 3720 .bxt.phy = DPIO_PHY2, 3721 }, 3722 }, 3723 { 3724 .name = "AUX A", 3725 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, 3726 .ops = &hsw_power_well_ops, 3727 .id = DISP_PW_ID_NONE, 3728 { 3729 .hsw.regs = &hsw_power_well_regs, 3730 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3731 }, 3732 }, 3733 { 3734 .name = "AUX B", 3735 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, 3736 .ops = &hsw_power_well_ops, 3737 .id = DISP_PW_ID_NONE, 3738 { 3739 .hsw.regs = &hsw_power_well_regs, 3740 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3741 }, 3742 }, 3743 { 3744 .name = "AUX C", 3745 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, 3746 .ops = &hsw_power_well_ops, 3747 .id = DISP_PW_ID_NONE, 3748 { 3749 .hsw.regs = &hsw_power_well_regs, 3750 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3751 }, 3752 }, 3753 { 3754 .name = "DDI A IO power well", 3755 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, 3756 .ops = &hsw_power_well_ops, 3757 .id = DISP_PW_ID_NONE, 3758 { 3759 .hsw.regs = &hsw_power_well_regs, 3760 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3761 }, 3762 }, 3763 { 3764 .name = "DDI B IO power well", 3765 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3766 .ops = &hsw_power_well_ops, 3767 .id = DISP_PW_ID_NONE, 3768 { 3769 .hsw.regs = &hsw_power_well_regs, 3770 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3771 }, 3772 }, 3773 { 3774 .name = "DDI C IO power well", 3775 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3776 .ops = &hsw_power_well_ops, 3777 .id = DISP_PW_ID_NONE, 3778 { 3779 .hsw.regs = &hsw_power_well_regs, 3780 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3781 }, 3782 }, 3783 }; 3784 3785 static const struct i915_power_well_ops icl_aux_power_well_ops = { 3786 .sync_hw = hsw_power_well_sync_hw, 3787 .enable = icl_aux_power_well_enable, 3788 .disable = icl_aux_power_well_disable, 3789 .is_enabled = hsw_power_well_enabled, 3790 }; 3791 3792 static const struct i915_power_well_regs icl_aux_power_well_regs = { 3793 .bios = ICL_PWR_WELL_CTL_AUX1, 3794 .driver = ICL_PWR_WELL_CTL_AUX2, 3795 .debug = ICL_PWR_WELL_CTL_AUX4, 3796 }; 3797 3798 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 3799 .bios = ICL_PWR_WELL_CTL_DDI1, 3800 .driver = ICL_PWR_WELL_CTL_DDI2, 3801 .debug = ICL_PWR_WELL_CTL_DDI4, 3802 }; 3803 3804 static const struct i915_power_well_desc icl_power_wells[] = { 3805 { 3806 .name = "always-on", 3807 .always_on = true, 3808 .domains = POWER_DOMAIN_MASK, 3809 .ops = &i9xx_always_on_power_well_ops, 3810 .id = DISP_PW_ID_NONE, 3811 }, 3812 { 3813 .name = "power well 1", 3814 /* Handled by the DMC firmware */ 3815 .always_on = true, 3816 .domains = 0, 3817 .ops = &hsw_power_well_ops, 3818 .id = SKL_DISP_PW_1, 3819 { 3820 .hsw.regs = &hsw_power_well_regs, 3821 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3822 .hsw.has_fuses = true, 3823 }, 3824 }, 3825 { 3826 .name = "DC off", 3827 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, 3828 .ops = &gen9_dc_off_power_well_ops, 3829 .id = SKL_DISP_DC_OFF, 3830 }, 3831 { 3832 .name = "power well 2", 3833 .domains = ICL_PW_2_POWER_DOMAINS, 3834 .ops = &hsw_power_well_ops, 3835 .id = SKL_DISP_PW_2, 3836 { 3837 .hsw.regs = &hsw_power_well_regs, 3838 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3839 .hsw.has_fuses = true, 3840 }, 3841 }, 3842 { 3843 .name = "power well 3", 3844 .domains = ICL_PW_3_POWER_DOMAINS, 3845 .ops = &hsw_power_well_ops, 3846 .id = ICL_DISP_PW_3, 3847 { 3848 .hsw.regs = &hsw_power_well_regs, 3849 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3850 .hsw.irq_pipe_mask = BIT(PIPE_B), 3851 .hsw.has_vga = true, 3852 .hsw.has_fuses = true, 3853 }, 3854 }, 3855 { 3856 .name = "DDI A IO", 3857 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3858 .ops = &hsw_power_well_ops, 3859 .id = DISP_PW_ID_NONE, 3860 { 3861 .hsw.regs = &icl_ddi_power_well_regs, 3862 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3863 }, 3864 }, 3865 { 3866 .name = "DDI B IO", 3867 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3868 .ops = &hsw_power_well_ops, 3869 .id = DISP_PW_ID_NONE, 3870 { 3871 .hsw.regs = &icl_ddi_power_well_regs, 3872 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3873 }, 3874 }, 3875 { 3876 .name = "DDI C IO", 3877 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3878 .ops = &hsw_power_well_ops, 3879 .id = DISP_PW_ID_NONE, 3880 { 3881 .hsw.regs = &icl_ddi_power_well_regs, 3882 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3883 }, 3884 }, 3885 { 3886 .name = "DDI D IO", 3887 .domains = ICL_DDI_IO_D_POWER_DOMAINS, 3888 .ops = &hsw_power_well_ops, 3889 .id = DISP_PW_ID_NONE, 3890 { 3891 .hsw.regs = &icl_ddi_power_well_regs, 3892 .hsw.idx = ICL_PW_CTL_IDX_DDI_D, 3893 }, 3894 }, 3895 { 3896 .name = "DDI E IO", 3897 .domains = ICL_DDI_IO_E_POWER_DOMAINS, 3898 .ops = &hsw_power_well_ops, 3899 .id = DISP_PW_ID_NONE, 3900 { 3901 .hsw.regs = &icl_ddi_power_well_regs, 3902 .hsw.idx = ICL_PW_CTL_IDX_DDI_E, 3903 }, 3904 }, 3905 { 3906 .name = "DDI F IO", 3907 .domains = ICL_DDI_IO_F_POWER_DOMAINS, 3908 .ops = &hsw_power_well_ops, 3909 .id = DISP_PW_ID_NONE, 3910 { 3911 .hsw.regs = &icl_ddi_power_well_regs, 3912 .hsw.idx = ICL_PW_CTL_IDX_DDI_F, 3913 }, 3914 }, 3915 { 3916 .name = "AUX A", 3917 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 3918 .ops = &icl_aux_power_well_ops, 3919 .id = DISP_PW_ID_NONE, 3920 { 3921 .hsw.regs = &icl_aux_power_well_regs, 3922 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3923 }, 3924 }, 3925 { 3926 .name = "AUX B", 3927 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 3928 .ops = &icl_aux_power_well_ops, 3929 .id = DISP_PW_ID_NONE, 3930 { 3931 .hsw.regs = &icl_aux_power_well_regs, 3932 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3933 }, 3934 }, 3935 { 3936 .name = "AUX C TC1", 3937 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, 3938 .ops = &icl_aux_power_well_ops, 3939 .id = DISP_PW_ID_NONE, 3940 { 3941 .hsw.regs = &icl_aux_power_well_regs, 3942 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3943 .hsw.is_tc_tbt = false, 3944 }, 3945 }, 3946 { 3947 .name = "AUX D TC2", 3948 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, 3949 .ops = &icl_aux_power_well_ops, 3950 .id = DISP_PW_ID_NONE, 3951 { 3952 .hsw.regs = &icl_aux_power_well_regs, 3953 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 3954 .hsw.is_tc_tbt = false, 3955 }, 3956 }, 3957 { 3958 .name = "AUX E TC3", 3959 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, 3960 .ops = &icl_aux_power_well_ops, 3961 .id = DISP_PW_ID_NONE, 3962 { 3963 .hsw.regs = &icl_aux_power_well_regs, 3964 .hsw.idx = ICL_PW_CTL_IDX_AUX_E, 3965 .hsw.is_tc_tbt = false, 3966 }, 3967 }, 3968 { 3969 .name = "AUX F TC4", 3970 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, 3971 .ops = &icl_aux_power_well_ops, 3972 .id = DISP_PW_ID_NONE, 3973 { 3974 .hsw.regs = &icl_aux_power_well_regs, 3975 .hsw.idx = ICL_PW_CTL_IDX_AUX_F, 3976 .hsw.is_tc_tbt = false, 3977 }, 3978 }, 3979 { 3980 .name = "AUX C TBT1", 3981 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, 3982 .ops = &icl_aux_power_well_ops, 3983 .id = DISP_PW_ID_NONE, 3984 { 3985 .hsw.regs = &icl_aux_power_well_regs, 3986 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, 3987 .hsw.is_tc_tbt = true, 3988 }, 3989 }, 3990 { 3991 .name = "AUX D TBT2", 3992 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, 3993 .ops = &icl_aux_power_well_ops, 3994 .id = DISP_PW_ID_NONE, 3995 { 3996 .hsw.regs = &icl_aux_power_well_regs, 3997 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, 3998 .hsw.is_tc_tbt = true, 3999 }, 4000 }, 4001 { 4002 .name = "AUX E TBT3", 4003 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, 4004 .ops = &icl_aux_power_well_ops, 4005 .id = DISP_PW_ID_NONE, 4006 { 4007 .hsw.regs = &icl_aux_power_well_regs, 4008 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, 4009 .hsw.is_tc_tbt = true, 4010 }, 4011 }, 4012 { 4013 .name = "AUX F TBT4", 4014 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, 4015 .ops = &icl_aux_power_well_ops, 4016 .id = DISP_PW_ID_NONE, 4017 { 4018 .hsw.regs = &icl_aux_power_well_regs, 4019 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, 4020 .hsw.is_tc_tbt = true, 4021 }, 4022 }, 4023 { 4024 .name = "power well 4", 4025 .domains = ICL_PW_4_POWER_DOMAINS, 4026 .ops = &hsw_power_well_ops, 4027 .id = DISP_PW_ID_NONE, 4028 { 4029 .hsw.regs = &hsw_power_well_regs, 4030 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4031 .hsw.has_fuses = true, 4032 .hsw.irq_pipe_mask = BIT(PIPE_C), 4033 }, 4034 }, 4035 }; 4036 4037 static void 4038 tgl_tc_cold_request(struct drm_i915_private *i915, bool block) 4039 { 4040 u8 tries = 0; 4041 int ret; 4042 4043 while (1) { 4044 u32 low_val; 4045 u32 high_val = 0; 4046 4047 if (block) 4048 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; 4049 else 4050 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; 4051 4052 /* 4053 * Spec states that we should timeout the request after 200us 4054 * but the function below will timeout after 500us 4055 */ 4056 ret = snb_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, &high_val); 4057 if (ret == 0) { 4058 if (block && 4059 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) 4060 ret = -EIO; 4061 else 4062 break; 4063 } 4064 4065 if (++tries == 3) 4066 break; 4067 4068 msleep(1); 4069 } 4070 4071 if (ret) 4072 drm_err(&i915->drm, "TC cold %sblock failed\n", 4073 block ? "" : "un"); 4074 else 4075 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", 4076 block ? "" : "un"); 4077 } 4078 4079 static void 4080 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, 4081 struct i915_power_well *power_well) 4082 { 4083 tgl_tc_cold_request(i915, true); 4084 } 4085 4086 static void 4087 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, 4088 struct i915_power_well *power_well) 4089 { 4090 tgl_tc_cold_request(i915, false); 4091 } 4092 4093 static void 4094 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, 4095 struct i915_power_well *power_well) 4096 { 4097 if (power_well->count > 0) 4098 tgl_tc_cold_off_power_well_enable(i915, power_well); 4099 else 4100 tgl_tc_cold_off_power_well_disable(i915, power_well); 4101 } 4102 4103 static bool 4104 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, 4105 struct i915_power_well *power_well) 4106 { 4107 /* 4108 * Not the correctly implementation but there is no way to just read it 4109 * from PCODE, so returning count to avoid state mismatch errors 4110 */ 4111 return power_well->count; 4112 } 4113 4114 static const struct i915_power_well_ops tgl_tc_cold_off_ops = { 4115 .sync_hw = tgl_tc_cold_off_power_well_sync_hw, 4116 .enable = tgl_tc_cold_off_power_well_enable, 4117 .disable = tgl_tc_cold_off_power_well_disable, 4118 .is_enabled = tgl_tc_cold_off_power_well_is_enabled, 4119 }; 4120 4121 static const struct i915_power_well_desc tgl_power_wells[] = { 4122 { 4123 .name = "always-on", 4124 .always_on = true, 4125 .domains = POWER_DOMAIN_MASK, 4126 .ops = &i9xx_always_on_power_well_ops, 4127 .id = DISP_PW_ID_NONE, 4128 }, 4129 { 4130 .name = "power well 1", 4131 /* Handled by the DMC firmware */ 4132 .always_on = true, 4133 .domains = 0, 4134 .ops = &hsw_power_well_ops, 4135 .id = SKL_DISP_PW_1, 4136 { 4137 .hsw.regs = &hsw_power_well_regs, 4138 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4139 .hsw.has_fuses = true, 4140 }, 4141 }, 4142 { 4143 .name = "DC off", 4144 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, 4145 .ops = &gen9_dc_off_power_well_ops, 4146 .id = SKL_DISP_DC_OFF, 4147 }, 4148 { 4149 .name = "power well 2", 4150 .domains = TGL_PW_2_POWER_DOMAINS, 4151 .ops = &hsw_power_well_ops, 4152 .id = SKL_DISP_PW_2, 4153 { 4154 .hsw.regs = &hsw_power_well_regs, 4155 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4156 .hsw.has_fuses = true, 4157 }, 4158 }, 4159 { 4160 .name = "power well 3", 4161 .domains = TGL_PW_3_POWER_DOMAINS, 4162 .ops = &hsw_power_well_ops, 4163 .id = ICL_DISP_PW_3, 4164 { 4165 .hsw.regs = &hsw_power_well_regs, 4166 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4167 .hsw.irq_pipe_mask = BIT(PIPE_B), 4168 .hsw.has_vga = true, 4169 .hsw.has_fuses = true, 4170 }, 4171 }, 4172 { 4173 .name = "DDI A IO", 4174 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4175 .ops = &hsw_power_well_ops, 4176 .id = DISP_PW_ID_NONE, 4177 { 4178 .hsw.regs = &icl_ddi_power_well_regs, 4179 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4180 } 4181 }, 4182 { 4183 .name = "DDI B IO", 4184 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4185 .ops = &hsw_power_well_ops, 4186 .id = DISP_PW_ID_NONE, 4187 { 4188 .hsw.regs = &icl_ddi_power_well_regs, 4189 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4190 } 4191 }, 4192 { 4193 .name = "DDI C IO", 4194 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 4195 .ops = &hsw_power_well_ops, 4196 .id = DISP_PW_ID_NONE, 4197 { 4198 .hsw.regs = &icl_ddi_power_well_regs, 4199 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 4200 } 4201 }, 4202 { 4203 .name = "DDI IO TC1", 4204 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4205 .ops = &hsw_power_well_ops, 4206 .id = DISP_PW_ID_NONE, 4207 { 4208 .hsw.regs = &icl_ddi_power_well_regs, 4209 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4210 }, 4211 }, 4212 { 4213 .name = "DDI IO TC2", 4214 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4215 .ops = &hsw_power_well_ops, 4216 .id = DISP_PW_ID_NONE, 4217 { 4218 .hsw.regs = &icl_ddi_power_well_regs, 4219 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4220 }, 4221 }, 4222 { 4223 .name = "DDI IO TC3", 4224 .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, 4225 .ops = &hsw_power_well_ops, 4226 .id = DISP_PW_ID_NONE, 4227 { 4228 .hsw.regs = &icl_ddi_power_well_regs, 4229 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 4230 }, 4231 }, 4232 { 4233 .name = "DDI IO TC4", 4234 .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, 4235 .ops = &hsw_power_well_ops, 4236 .id = DISP_PW_ID_NONE, 4237 { 4238 .hsw.regs = &icl_ddi_power_well_regs, 4239 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 4240 }, 4241 }, 4242 { 4243 .name = "DDI IO TC5", 4244 .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, 4245 .ops = &hsw_power_well_ops, 4246 .id = DISP_PW_ID_NONE, 4247 { 4248 .hsw.regs = &icl_ddi_power_well_regs, 4249 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, 4250 }, 4251 }, 4252 { 4253 .name = "DDI IO TC6", 4254 .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, 4255 .ops = &hsw_power_well_ops, 4256 .id = DISP_PW_ID_NONE, 4257 { 4258 .hsw.regs = &icl_ddi_power_well_regs, 4259 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, 4260 }, 4261 }, 4262 { 4263 .name = "TC cold off", 4264 .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, 4265 .ops = &tgl_tc_cold_off_ops, 4266 .id = TGL_DISP_PW_TC_COLD_OFF, 4267 }, 4268 { 4269 .name = "AUX A", 4270 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 4271 .ops = &icl_aux_power_well_ops, 4272 .id = DISP_PW_ID_NONE, 4273 { 4274 .hsw.regs = &icl_aux_power_well_regs, 4275 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4276 }, 4277 }, 4278 { 4279 .name = "AUX B", 4280 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 4281 .ops = &icl_aux_power_well_ops, 4282 .id = DISP_PW_ID_NONE, 4283 { 4284 .hsw.regs = &icl_aux_power_well_regs, 4285 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4286 }, 4287 }, 4288 { 4289 .name = "AUX C", 4290 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 4291 .ops = &icl_aux_power_well_ops, 4292 .id = DISP_PW_ID_NONE, 4293 { 4294 .hsw.regs = &icl_aux_power_well_regs, 4295 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4296 }, 4297 }, 4298 { 4299 .name = "AUX USBC1", 4300 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4301 .ops = &icl_aux_power_well_ops, 4302 .id = DISP_PW_ID_NONE, 4303 { 4304 .hsw.regs = &icl_aux_power_well_regs, 4305 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4306 .hsw.is_tc_tbt = false, 4307 }, 4308 }, 4309 { 4310 .name = "AUX USBC2", 4311 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4312 .ops = &icl_aux_power_well_ops, 4313 .id = DISP_PW_ID_NONE, 4314 { 4315 .hsw.regs = &icl_aux_power_well_regs, 4316 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4317 .hsw.is_tc_tbt = false, 4318 }, 4319 }, 4320 { 4321 .name = "AUX USBC3", 4322 .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS, 4323 .ops = &icl_aux_power_well_ops, 4324 .id = DISP_PW_ID_NONE, 4325 { 4326 .hsw.regs = &icl_aux_power_well_regs, 4327 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 4328 .hsw.is_tc_tbt = false, 4329 }, 4330 }, 4331 { 4332 .name = "AUX USBC4", 4333 .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS, 4334 .ops = &icl_aux_power_well_ops, 4335 .id = DISP_PW_ID_NONE, 4336 { 4337 .hsw.regs = &icl_aux_power_well_regs, 4338 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 4339 .hsw.is_tc_tbt = false, 4340 }, 4341 }, 4342 { 4343 .name = "AUX USBC5", 4344 .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS, 4345 .ops = &icl_aux_power_well_ops, 4346 .id = DISP_PW_ID_NONE, 4347 { 4348 .hsw.regs = &icl_aux_power_well_regs, 4349 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, 4350 .hsw.is_tc_tbt = false, 4351 }, 4352 }, 4353 { 4354 .name = "AUX USBC6", 4355 .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS, 4356 .ops = &icl_aux_power_well_ops, 4357 .id = DISP_PW_ID_NONE, 4358 { 4359 .hsw.regs = &icl_aux_power_well_regs, 4360 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, 4361 .hsw.is_tc_tbt = false, 4362 }, 4363 }, 4364 { 4365 .name = "AUX TBT1", 4366 .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS, 4367 .ops = &icl_aux_power_well_ops, 4368 .id = DISP_PW_ID_NONE, 4369 { 4370 .hsw.regs = &icl_aux_power_well_regs, 4371 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 4372 .hsw.is_tc_tbt = true, 4373 }, 4374 }, 4375 { 4376 .name = "AUX TBT2", 4377 .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS, 4378 .ops = &icl_aux_power_well_ops, 4379 .id = DISP_PW_ID_NONE, 4380 { 4381 .hsw.regs = &icl_aux_power_well_regs, 4382 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 4383 .hsw.is_tc_tbt = true, 4384 }, 4385 }, 4386 { 4387 .name = "AUX TBT3", 4388 .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS, 4389 .ops = &icl_aux_power_well_ops, 4390 .id = DISP_PW_ID_NONE, 4391 { 4392 .hsw.regs = &icl_aux_power_well_regs, 4393 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 4394 .hsw.is_tc_tbt = true, 4395 }, 4396 }, 4397 { 4398 .name = "AUX TBT4", 4399 .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS, 4400 .ops = &icl_aux_power_well_ops, 4401 .id = DISP_PW_ID_NONE, 4402 { 4403 .hsw.regs = &icl_aux_power_well_regs, 4404 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 4405 .hsw.is_tc_tbt = true, 4406 }, 4407 }, 4408 { 4409 .name = "AUX TBT5", 4410 .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS, 4411 .ops = &icl_aux_power_well_ops, 4412 .id = DISP_PW_ID_NONE, 4413 { 4414 .hsw.regs = &icl_aux_power_well_regs, 4415 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, 4416 .hsw.is_tc_tbt = true, 4417 }, 4418 }, 4419 { 4420 .name = "AUX TBT6", 4421 .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS, 4422 .ops = &icl_aux_power_well_ops, 4423 .id = DISP_PW_ID_NONE, 4424 { 4425 .hsw.regs = &icl_aux_power_well_regs, 4426 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, 4427 .hsw.is_tc_tbt = true, 4428 }, 4429 }, 4430 { 4431 .name = "power well 4", 4432 .domains = TGL_PW_4_POWER_DOMAINS, 4433 .ops = &hsw_power_well_ops, 4434 .id = DISP_PW_ID_NONE, 4435 { 4436 .hsw.regs = &hsw_power_well_regs, 4437 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4438 .hsw.has_fuses = true, 4439 .hsw.irq_pipe_mask = BIT(PIPE_C), 4440 } 4441 }, 4442 { 4443 .name = "power well 5", 4444 .domains = TGL_PW_5_POWER_DOMAINS, 4445 .ops = &hsw_power_well_ops, 4446 .id = DISP_PW_ID_NONE, 4447 { 4448 .hsw.regs = &hsw_power_well_regs, 4449 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4450 .hsw.has_fuses = true, 4451 .hsw.irq_pipe_mask = BIT(PIPE_D), 4452 }, 4453 }, 4454 }; 4455 4456 static const struct i915_power_well_desc rkl_power_wells[] = { 4457 { 4458 .name = "always-on", 4459 .always_on = true, 4460 .domains = POWER_DOMAIN_MASK, 4461 .ops = &i9xx_always_on_power_well_ops, 4462 .id = DISP_PW_ID_NONE, 4463 }, 4464 { 4465 .name = "power well 1", 4466 /* Handled by the DMC firmware */ 4467 .always_on = true, 4468 .domains = 0, 4469 .ops = &hsw_power_well_ops, 4470 .id = SKL_DISP_PW_1, 4471 { 4472 .hsw.regs = &hsw_power_well_regs, 4473 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4474 .hsw.has_fuses = true, 4475 }, 4476 }, 4477 { 4478 .name = "DC off", 4479 .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS, 4480 .ops = &gen9_dc_off_power_well_ops, 4481 .id = SKL_DISP_DC_OFF, 4482 }, 4483 { 4484 .name = "power well 3", 4485 .domains = RKL_PW_3_POWER_DOMAINS, 4486 .ops = &hsw_power_well_ops, 4487 .id = ICL_DISP_PW_3, 4488 { 4489 .hsw.regs = &hsw_power_well_regs, 4490 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4491 .hsw.irq_pipe_mask = BIT(PIPE_B), 4492 .hsw.has_vga = true, 4493 .hsw.has_fuses = true, 4494 }, 4495 }, 4496 { 4497 .name = "power well 4", 4498 .domains = RKL_PW_4_POWER_DOMAINS, 4499 .ops = &hsw_power_well_ops, 4500 .id = DISP_PW_ID_NONE, 4501 { 4502 .hsw.regs = &hsw_power_well_regs, 4503 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4504 .hsw.has_fuses = true, 4505 .hsw.irq_pipe_mask = BIT(PIPE_C), 4506 } 4507 }, 4508 { 4509 .name = "DDI A IO", 4510 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4511 .ops = &hsw_power_well_ops, 4512 .id = DISP_PW_ID_NONE, 4513 { 4514 .hsw.regs = &icl_ddi_power_well_regs, 4515 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4516 } 4517 }, 4518 { 4519 .name = "DDI B IO", 4520 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4521 .ops = &hsw_power_well_ops, 4522 .id = DISP_PW_ID_NONE, 4523 { 4524 .hsw.regs = &icl_ddi_power_well_regs, 4525 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4526 } 4527 }, 4528 { 4529 .name = "DDI IO TC1", 4530 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4531 .ops = &hsw_power_well_ops, 4532 .id = DISP_PW_ID_NONE, 4533 { 4534 .hsw.regs = &icl_ddi_power_well_regs, 4535 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4536 }, 4537 }, 4538 { 4539 .name = "DDI IO TC2", 4540 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4541 .ops = &hsw_power_well_ops, 4542 .id = DISP_PW_ID_NONE, 4543 { 4544 .hsw.regs = &icl_ddi_power_well_regs, 4545 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4546 }, 4547 }, 4548 { 4549 .name = "AUX A", 4550 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 4551 .ops = &icl_aux_power_well_ops, 4552 .id = DISP_PW_ID_NONE, 4553 { 4554 .hsw.regs = &icl_aux_power_well_regs, 4555 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4556 }, 4557 }, 4558 { 4559 .name = "AUX B", 4560 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 4561 .ops = &icl_aux_power_well_ops, 4562 .id = DISP_PW_ID_NONE, 4563 { 4564 .hsw.regs = &icl_aux_power_well_regs, 4565 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4566 }, 4567 }, 4568 { 4569 .name = "AUX USBC1", 4570 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4571 .ops = &icl_aux_power_well_ops, 4572 .id = DISP_PW_ID_NONE, 4573 { 4574 .hsw.regs = &icl_aux_power_well_regs, 4575 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4576 }, 4577 }, 4578 { 4579 .name = "AUX USBC2", 4580 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4581 .ops = &icl_aux_power_well_ops, 4582 .id = DISP_PW_ID_NONE, 4583 { 4584 .hsw.regs = &icl_aux_power_well_regs, 4585 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4586 }, 4587 }, 4588 }; 4589 4590 static const struct i915_power_well_desc dg1_power_wells[] = { 4591 { 4592 .name = "always-on", 4593 .always_on = true, 4594 .domains = POWER_DOMAIN_MASK, 4595 .ops = &i9xx_always_on_power_well_ops, 4596 .id = DISP_PW_ID_NONE, 4597 }, 4598 { 4599 .name = "power well 1", 4600 /* Handled by the DMC firmware */ 4601 .always_on = true, 4602 .domains = 0, 4603 .ops = &hsw_power_well_ops, 4604 .id = SKL_DISP_PW_1, 4605 { 4606 .hsw.regs = &hsw_power_well_regs, 4607 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4608 .hsw.has_fuses = true, 4609 }, 4610 }, 4611 { 4612 .name = "DC off", 4613 .domains = DG1_DISPLAY_DC_OFF_POWER_DOMAINS, 4614 .ops = &gen9_dc_off_power_well_ops, 4615 .id = SKL_DISP_DC_OFF, 4616 }, 4617 { 4618 .name = "power well 2", 4619 .domains = DG1_PW_2_POWER_DOMAINS, 4620 .ops = &hsw_power_well_ops, 4621 .id = SKL_DISP_PW_2, 4622 { 4623 .hsw.regs = &hsw_power_well_regs, 4624 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4625 .hsw.has_fuses = true, 4626 }, 4627 }, 4628 { 4629 .name = "power well 3", 4630 .domains = DG1_PW_3_POWER_DOMAINS, 4631 .ops = &hsw_power_well_ops, 4632 .id = ICL_DISP_PW_3, 4633 { 4634 .hsw.regs = &hsw_power_well_regs, 4635 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4636 .hsw.irq_pipe_mask = BIT(PIPE_B), 4637 .hsw.has_vga = true, 4638 .hsw.has_fuses = true, 4639 }, 4640 }, 4641 { 4642 .name = "DDI A IO", 4643 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4644 .ops = &hsw_power_well_ops, 4645 .id = DISP_PW_ID_NONE, 4646 { 4647 .hsw.regs = &icl_ddi_power_well_regs, 4648 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4649 } 4650 }, 4651 { 4652 .name = "DDI B IO", 4653 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4654 .ops = &hsw_power_well_ops, 4655 .id = DISP_PW_ID_NONE, 4656 { 4657 .hsw.regs = &icl_ddi_power_well_regs, 4658 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4659 } 4660 }, 4661 { 4662 .name = "DDI IO TC1", 4663 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4664 .ops = &hsw_power_well_ops, 4665 .id = DISP_PW_ID_NONE, 4666 { 4667 .hsw.regs = &icl_ddi_power_well_regs, 4668 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4669 }, 4670 }, 4671 { 4672 .name = "DDI IO TC2", 4673 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4674 .ops = &hsw_power_well_ops, 4675 .id = DISP_PW_ID_NONE, 4676 { 4677 .hsw.regs = &icl_ddi_power_well_regs, 4678 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4679 }, 4680 }, 4681 { 4682 .name = "AUX A", 4683 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 4684 .ops = &icl_aux_power_well_ops, 4685 .id = DISP_PW_ID_NONE, 4686 { 4687 .hsw.regs = &icl_aux_power_well_regs, 4688 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4689 }, 4690 }, 4691 { 4692 .name = "AUX B", 4693 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 4694 .ops = &icl_aux_power_well_ops, 4695 .id = DISP_PW_ID_NONE, 4696 { 4697 .hsw.regs = &icl_aux_power_well_regs, 4698 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4699 }, 4700 }, 4701 { 4702 .name = "AUX USBC1", 4703 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4704 .ops = &icl_aux_power_well_ops, 4705 .id = DISP_PW_ID_NONE, 4706 { 4707 .hsw.regs = &icl_aux_power_well_regs, 4708 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4709 .hsw.is_tc_tbt = false, 4710 }, 4711 }, 4712 { 4713 .name = "AUX USBC2", 4714 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4715 .ops = &icl_aux_power_well_ops, 4716 .id = DISP_PW_ID_NONE, 4717 { 4718 .hsw.regs = &icl_aux_power_well_regs, 4719 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4720 .hsw.is_tc_tbt = false, 4721 }, 4722 }, 4723 { 4724 .name = "power well 4", 4725 .domains = TGL_PW_4_POWER_DOMAINS, 4726 .ops = &hsw_power_well_ops, 4727 .id = DISP_PW_ID_NONE, 4728 { 4729 .hsw.regs = &hsw_power_well_regs, 4730 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4731 .hsw.has_fuses = true, 4732 .hsw.irq_pipe_mask = BIT(PIPE_C), 4733 } 4734 }, 4735 { 4736 .name = "power well 5", 4737 .domains = TGL_PW_5_POWER_DOMAINS, 4738 .ops = &hsw_power_well_ops, 4739 .id = DISP_PW_ID_NONE, 4740 { 4741 .hsw.regs = &hsw_power_well_regs, 4742 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4743 .hsw.has_fuses = true, 4744 .hsw.irq_pipe_mask = BIT(PIPE_D), 4745 }, 4746 }, 4747 }; 4748 4749 static const struct i915_power_well_desc xelpd_power_wells[] = { 4750 { 4751 .name = "always-on", 4752 .always_on = true, 4753 .domains = POWER_DOMAIN_MASK, 4754 .ops = &i9xx_always_on_power_well_ops, 4755 .id = DISP_PW_ID_NONE, 4756 }, 4757 { 4758 .name = "power well 1", 4759 /* Handled by the DMC firmware */ 4760 .always_on = true, 4761 .domains = 0, 4762 .ops = &hsw_power_well_ops, 4763 .id = SKL_DISP_PW_1, 4764 { 4765 .hsw.regs = &hsw_power_well_regs, 4766 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4767 .hsw.has_fuses = true, 4768 }, 4769 }, 4770 { 4771 .name = "DC off", 4772 .domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS, 4773 .ops = &gen9_dc_off_power_well_ops, 4774 .id = SKL_DISP_DC_OFF, 4775 }, 4776 { 4777 .name = "power well 2", 4778 .domains = XELPD_PW_2_POWER_DOMAINS, 4779 .ops = &hsw_power_well_ops, 4780 .id = SKL_DISP_PW_2, 4781 { 4782 .hsw.regs = &hsw_power_well_regs, 4783 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4784 .hsw.has_vga = true, 4785 .hsw.has_fuses = true, 4786 }, 4787 }, 4788 { 4789 .name = "power well A", 4790 .domains = XELPD_PW_A_POWER_DOMAINS, 4791 .ops = &hsw_power_well_ops, 4792 .id = DISP_PW_ID_NONE, 4793 { 4794 .hsw.regs = &hsw_power_well_regs, 4795 .hsw.idx = XELPD_PW_CTL_IDX_PW_A, 4796 .hsw.irq_pipe_mask = BIT(PIPE_A), 4797 .hsw.has_fuses = true, 4798 }, 4799 }, 4800 { 4801 .name = "power well B", 4802 .domains = XELPD_PW_B_POWER_DOMAINS, 4803 .ops = &hsw_power_well_ops, 4804 .id = DISP_PW_ID_NONE, 4805 { 4806 .hsw.regs = &hsw_power_well_regs, 4807 .hsw.idx = XELPD_PW_CTL_IDX_PW_B, 4808 .hsw.irq_pipe_mask = BIT(PIPE_B), 4809 .hsw.has_fuses = true, 4810 }, 4811 }, 4812 { 4813 .name = "power well C", 4814 .domains = XELPD_PW_C_POWER_DOMAINS, 4815 .ops = &hsw_power_well_ops, 4816 .id = DISP_PW_ID_NONE, 4817 { 4818 .hsw.regs = &hsw_power_well_regs, 4819 .hsw.idx = XELPD_PW_CTL_IDX_PW_C, 4820 .hsw.irq_pipe_mask = BIT(PIPE_C), 4821 .hsw.has_fuses = true, 4822 }, 4823 }, 4824 { 4825 .name = "power well D", 4826 .domains = XELPD_PW_D_POWER_DOMAINS, 4827 .ops = &hsw_power_well_ops, 4828 .id = DISP_PW_ID_NONE, 4829 { 4830 .hsw.regs = &hsw_power_well_regs, 4831 .hsw.idx = XELPD_PW_CTL_IDX_PW_D, 4832 .hsw.irq_pipe_mask = BIT(PIPE_D), 4833 .hsw.has_fuses = true, 4834 }, 4835 }, 4836 { 4837 .name = "DDI A IO", 4838 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4839 .ops = &hsw_power_well_ops, 4840 .id = DISP_PW_ID_NONE, 4841 { 4842 .hsw.regs = &icl_ddi_power_well_regs, 4843 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4844 } 4845 }, 4846 { 4847 .name = "DDI B IO", 4848 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4849 .ops = &hsw_power_well_ops, 4850 .id = DISP_PW_ID_NONE, 4851 { 4852 .hsw.regs = &icl_ddi_power_well_regs, 4853 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4854 } 4855 }, 4856 { 4857 .name = "DDI C IO", 4858 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 4859 .ops = &hsw_power_well_ops, 4860 .id = DISP_PW_ID_NONE, 4861 { 4862 .hsw.regs = &icl_ddi_power_well_regs, 4863 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 4864 } 4865 }, 4866 { 4867 .name = "DDI IO D_XELPD", 4868 .domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS, 4869 .ops = &hsw_power_well_ops, 4870 .id = DISP_PW_ID_NONE, 4871 { 4872 .hsw.regs = &icl_ddi_power_well_regs, 4873 .hsw.idx = XELPD_PW_CTL_IDX_DDI_D, 4874 } 4875 }, 4876 { 4877 .name = "DDI IO E_XELPD", 4878 .domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS, 4879 .ops = &hsw_power_well_ops, 4880 .id = DISP_PW_ID_NONE, 4881 { 4882 .hsw.regs = &icl_ddi_power_well_regs, 4883 .hsw.idx = XELPD_PW_CTL_IDX_DDI_E, 4884 } 4885 }, 4886 { 4887 .name = "DDI IO TC1", 4888 .domains = XELPD_DDI_IO_TC1_POWER_DOMAINS, 4889 .ops = &hsw_power_well_ops, 4890 .id = DISP_PW_ID_NONE, 4891 { 4892 .hsw.regs = &icl_ddi_power_well_regs, 4893 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4894 } 4895 }, 4896 { 4897 .name = "DDI IO TC2", 4898 .domains = XELPD_DDI_IO_TC2_POWER_DOMAINS, 4899 .ops = &hsw_power_well_ops, 4900 .id = DISP_PW_ID_NONE, 4901 { 4902 .hsw.regs = &icl_ddi_power_well_regs, 4903 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4904 } 4905 }, 4906 { 4907 .name = "DDI IO TC3", 4908 .domains = XELPD_DDI_IO_TC3_POWER_DOMAINS, 4909 .ops = &hsw_power_well_ops, 4910 .id = DISP_PW_ID_NONE, 4911 { 4912 .hsw.regs = &icl_ddi_power_well_regs, 4913 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 4914 } 4915 }, 4916 { 4917 .name = "DDI IO TC4", 4918 .domains = XELPD_DDI_IO_TC4_POWER_DOMAINS, 4919 .ops = &hsw_power_well_ops, 4920 .id = DISP_PW_ID_NONE, 4921 { 4922 .hsw.regs = &icl_ddi_power_well_regs, 4923 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 4924 } 4925 }, 4926 { 4927 .name = "AUX A", 4928 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 4929 .ops = &icl_aux_power_well_ops, 4930 .id = DISP_PW_ID_NONE, 4931 { 4932 .hsw.regs = &icl_aux_power_well_regs, 4933 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4934 .hsw.fixed_enable_delay = 600, 4935 }, 4936 }, 4937 { 4938 .name = "AUX B", 4939 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 4940 .ops = &icl_aux_power_well_ops, 4941 .id = DISP_PW_ID_NONE, 4942 { 4943 .hsw.regs = &icl_aux_power_well_regs, 4944 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4945 .hsw.fixed_enable_delay = 600, 4946 }, 4947 }, 4948 { 4949 .name = "AUX C", 4950 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 4951 .ops = &icl_aux_power_well_ops, 4952 .id = DISP_PW_ID_NONE, 4953 { 4954 .hsw.regs = &icl_aux_power_well_regs, 4955 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4956 .hsw.fixed_enable_delay = 600, 4957 }, 4958 }, 4959 { 4960 .name = "AUX D_XELPD", 4961 .domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS, 4962 .ops = &icl_aux_power_well_ops, 4963 .id = DISP_PW_ID_NONE, 4964 { 4965 .hsw.regs = &icl_aux_power_well_regs, 4966 .hsw.idx = XELPD_PW_CTL_IDX_AUX_D, 4967 .hsw.fixed_enable_delay = 600, 4968 }, 4969 }, 4970 { 4971 .name = "AUX E_XELPD", 4972 .domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS, 4973 .ops = &icl_aux_power_well_ops, 4974 .id = DISP_PW_ID_NONE, 4975 { 4976 .hsw.regs = &icl_aux_power_well_regs, 4977 .hsw.idx = XELPD_PW_CTL_IDX_AUX_E, 4978 }, 4979 }, 4980 { 4981 .name = "AUX USBC1", 4982 .domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS, 4983 .ops = &icl_aux_power_well_ops, 4984 .id = DISP_PW_ID_NONE, 4985 { 4986 .hsw.regs = &icl_aux_power_well_regs, 4987 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4988 .hsw.fixed_enable_delay = 600, 4989 }, 4990 }, 4991 { 4992 .name = "AUX USBC2", 4993 .domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS, 4994 .ops = &icl_aux_power_well_ops, 4995 .id = DISP_PW_ID_NONE, 4996 { 4997 .hsw.regs = &icl_aux_power_well_regs, 4998 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4999 }, 5000 }, 5001 { 5002 .name = "AUX USBC3", 5003 .domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS, 5004 .ops = &icl_aux_power_well_ops, 5005 .id = DISP_PW_ID_NONE, 5006 { 5007 .hsw.regs = &icl_aux_power_well_regs, 5008 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 5009 }, 5010 }, 5011 { 5012 .name = "AUX USBC4", 5013 .domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS, 5014 .ops = &icl_aux_power_well_ops, 5015 .id = DISP_PW_ID_NONE, 5016 { 5017 .hsw.regs = &icl_aux_power_well_regs, 5018 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 5019 }, 5020 }, 5021 { 5022 .name = "AUX TBT1", 5023 .domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS, 5024 .ops = &icl_aux_power_well_ops, 5025 .id = DISP_PW_ID_NONE, 5026 { 5027 .hsw.regs = &icl_aux_power_well_regs, 5028 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 5029 .hsw.is_tc_tbt = true, 5030 }, 5031 }, 5032 { 5033 .name = "AUX TBT2", 5034 .domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS, 5035 .ops = &icl_aux_power_well_ops, 5036 .id = DISP_PW_ID_NONE, 5037 { 5038 .hsw.regs = &icl_aux_power_well_regs, 5039 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 5040 .hsw.is_tc_tbt = true, 5041 }, 5042 }, 5043 { 5044 .name = "AUX TBT3", 5045 .domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS, 5046 .ops = &icl_aux_power_well_ops, 5047 .id = DISP_PW_ID_NONE, 5048 { 5049 .hsw.regs = &icl_aux_power_well_regs, 5050 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 5051 .hsw.is_tc_tbt = true, 5052 }, 5053 }, 5054 { 5055 .name = "AUX TBT4", 5056 .domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS, 5057 .ops = &icl_aux_power_well_ops, 5058 .id = DISP_PW_ID_NONE, 5059 { 5060 .hsw.regs = &icl_aux_power_well_regs, 5061 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 5062 .hsw.is_tc_tbt = true, 5063 }, 5064 }, 5065 }; 5066 5067 static int 5068 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 5069 int disable_power_well) 5070 { 5071 if (disable_power_well >= 0) 5072 return !!disable_power_well; 5073 5074 return 1; 5075 } 5076 5077 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 5078 int enable_dc) 5079 { 5080 u32 mask; 5081 int requested_dc; 5082 int max_dc; 5083 5084 if (!HAS_DISPLAY(dev_priv)) 5085 return 0; 5086 5087 if (IS_DG1(dev_priv)) 5088 max_dc = 3; 5089 else if (DISPLAY_VER(dev_priv) >= 12) 5090 max_dc = 4; 5091 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 5092 max_dc = 1; 5093 else if (DISPLAY_VER(dev_priv) >= 9) 5094 max_dc = 2; 5095 else 5096 max_dc = 0; 5097 5098 /* 5099 * DC9 has a separate HW flow from the rest of the DC states, 5100 * not depending on the DMC firmware. It's needed by system 5101 * suspend/resume, so allow it unconditionally. 5102 */ 5103 mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || 5104 DISPLAY_VER(dev_priv) >= 11 ? 5105 DC_STATE_EN_DC9 : 0; 5106 5107 if (!dev_priv->params.disable_power_well) 5108 max_dc = 0; 5109 5110 if (enable_dc >= 0 && enable_dc <= max_dc) { 5111 requested_dc = enable_dc; 5112 } else if (enable_dc == -1) { 5113 requested_dc = max_dc; 5114 } else if (enable_dc > max_dc && enable_dc <= 4) { 5115 drm_dbg_kms(&dev_priv->drm, 5116 "Adjusting requested max DC state (%d->%d)\n", 5117 enable_dc, max_dc); 5118 requested_dc = max_dc; 5119 } else { 5120 drm_err(&dev_priv->drm, 5121 "Unexpected value for enable_dc (%d)\n", enable_dc); 5122 requested_dc = max_dc; 5123 } 5124 5125 switch (requested_dc) { 5126 case 4: 5127 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 5128 break; 5129 case 3: 5130 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 5131 break; 5132 case 2: 5133 mask |= DC_STATE_EN_UPTO_DC6; 5134 break; 5135 case 1: 5136 mask |= DC_STATE_EN_UPTO_DC5; 5137 break; 5138 } 5139 5140 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); 5141 5142 return mask; 5143 } 5144 5145 static int 5146 __set_power_wells(struct i915_power_domains *power_domains, 5147 const struct i915_power_well_desc *power_well_descs, 5148 int power_well_descs_sz, u64 skip_mask) 5149 { 5150 struct drm_i915_private *i915 = container_of(power_domains, 5151 struct drm_i915_private, 5152 power_domains); 5153 u64 power_well_ids = 0; 5154 int power_well_count = 0; 5155 int i, plt_idx = 0; 5156 5157 for (i = 0; i < power_well_descs_sz; i++) 5158 if (!(BIT_ULL(power_well_descs[i].id) & skip_mask)) 5159 power_well_count++; 5160 5161 power_domains->power_well_count = power_well_count; 5162 power_domains->power_wells = 5163 kcalloc(power_well_count, 5164 sizeof(*power_domains->power_wells), 5165 GFP_KERNEL); 5166 if (!power_domains->power_wells) 5167 return -ENOMEM; 5168 5169 for (i = 0; i < power_well_descs_sz; i++) { 5170 enum i915_power_well_id id = power_well_descs[i].id; 5171 5172 if (BIT_ULL(id) & skip_mask) 5173 continue; 5174 5175 power_domains->power_wells[plt_idx++].desc = 5176 &power_well_descs[i]; 5177 5178 if (id == DISP_PW_ID_NONE) 5179 continue; 5180 5181 drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8); 5182 drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id)); 5183 power_well_ids |= BIT_ULL(id); 5184 } 5185 5186 return 0; 5187 } 5188 5189 #define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \ 5190 __set_power_wells(power_domains, __power_well_descs, \ 5191 ARRAY_SIZE(__power_well_descs), skip_mask) 5192 5193 #define set_power_wells(power_domains, __power_well_descs) \ 5194 set_power_wells_mask(power_domains, __power_well_descs, 0) 5195 5196 /** 5197 * intel_power_domains_init - initializes the power domain structures 5198 * @dev_priv: i915 device instance 5199 * 5200 * Initializes the power domain structures for @dev_priv depending upon the 5201 * supported platform. 5202 */ 5203 int intel_power_domains_init(struct drm_i915_private *dev_priv) 5204 { 5205 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5206 int err; 5207 5208 dev_priv->params.disable_power_well = 5209 sanitize_disable_power_well_option(dev_priv, 5210 dev_priv->params.disable_power_well); 5211 dev_priv->dmc.allowed_dc_mask = 5212 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); 5213 5214 dev_priv->dmc.target_dc_state = 5215 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 5216 5217 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); 5218 5219 mutex_init(&power_domains->lock); 5220 5221 INIT_DELAYED_WORK(&power_domains->async_put_work, 5222 intel_display_power_put_async_work); 5223 5224 /* 5225 * The enabling order will be from lower to higher indexed wells, 5226 * the disabling order is reversed. 5227 */ 5228 if (!HAS_DISPLAY(dev_priv)) { 5229 power_domains->power_well_count = 0; 5230 err = 0; 5231 } else if (DISPLAY_VER(dev_priv) >= 13) { 5232 err = set_power_wells(power_domains, xelpd_power_wells); 5233 } else if (IS_DG1(dev_priv)) { 5234 err = set_power_wells(power_domains, dg1_power_wells); 5235 } else if (IS_ALDERLAKE_S(dev_priv)) { 5236 err = set_power_wells_mask(power_domains, tgl_power_wells, 5237 BIT_ULL(TGL_DISP_PW_TC_COLD_OFF)); 5238 } else if (IS_ROCKETLAKE(dev_priv)) { 5239 err = set_power_wells(power_domains, rkl_power_wells); 5240 } else if (DISPLAY_VER(dev_priv) == 12) { 5241 err = set_power_wells(power_domains, tgl_power_wells); 5242 } else if (DISPLAY_VER(dev_priv) == 11) { 5243 err = set_power_wells(power_domains, icl_power_wells); 5244 } else if (IS_GEMINILAKE(dev_priv)) { 5245 err = set_power_wells(power_domains, glk_power_wells); 5246 } else if (IS_BROXTON(dev_priv)) { 5247 err = set_power_wells(power_domains, bxt_power_wells); 5248 } else if (DISPLAY_VER(dev_priv) == 9) { 5249 err = set_power_wells(power_domains, skl_power_wells); 5250 } else if (IS_CHERRYVIEW(dev_priv)) { 5251 err = set_power_wells(power_domains, chv_power_wells); 5252 } else if (IS_BROADWELL(dev_priv)) { 5253 err = set_power_wells(power_domains, bdw_power_wells); 5254 } else if (IS_HASWELL(dev_priv)) { 5255 err = set_power_wells(power_domains, hsw_power_wells); 5256 } else if (IS_VALLEYVIEW(dev_priv)) { 5257 err = set_power_wells(power_domains, vlv_power_wells); 5258 } else if (IS_I830(dev_priv)) { 5259 err = set_power_wells(power_domains, i830_power_wells); 5260 } else { 5261 err = set_power_wells(power_domains, i9xx_always_on_power_well); 5262 } 5263 5264 return err; 5265 } 5266 5267 /** 5268 * intel_power_domains_cleanup - clean up power domains resources 5269 * @dev_priv: i915 device instance 5270 * 5271 * Release any resources acquired by intel_power_domains_init() 5272 */ 5273 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 5274 { 5275 kfree(dev_priv->power_domains.power_wells); 5276 } 5277 5278 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 5279 { 5280 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5281 struct i915_power_well *power_well; 5282 5283 mutex_lock(&power_domains->lock); 5284 for_each_power_well(dev_priv, power_well) { 5285 power_well->desc->ops->sync_hw(dev_priv, power_well); 5286 power_well->hw_enabled = 5287 power_well->desc->ops->is_enabled(dev_priv, power_well); 5288 } 5289 mutex_unlock(&power_domains->lock); 5290 } 5291 5292 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, 5293 enum dbuf_slice slice, bool enable) 5294 { 5295 i915_reg_t reg = DBUF_CTL_S(slice); 5296 bool state; 5297 5298 intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST, 5299 enable ? DBUF_POWER_REQUEST : 0); 5300 intel_de_posting_read(dev_priv, reg); 5301 udelay(10); 5302 5303 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; 5304 drm_WARN(&dev_priv->drm, enable != state, 5305 "DBuf slice %d power %s timeout!\n", 5306 slice, enabledisable(enable)); 5307 } 5308 5309 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, 5310 u8 req_slices) 5311 { 5312 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5313 u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask; 5314 enum dbuf_slice slice; 5315 5316 drm_WARN(&dev_priv->drm, req_slices & ~slice_mask, 5317 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n", 5318 req_slices, slice_mask); 5319 5320 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", 5321 req_slices); 5322 5323 /* 5324 * Might be running this in parallel to gen9_dc_off_power_well_enable 5325 * being called from intel_dp_detect for instance, 5326 * which causes assertion triggered by race condition, 5327 * as gen9_assert_dbuf_enabled might preempt this when registers 5328 * were already updated, while dev_priv was not. 5329 */ 5330 mutex_lock(&power_domains->lock); 5331 5332 for_each_dbuf_slice(dev_priv, slice) 5333 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); 5334 5335 dev_priv->dbuf.enabled_slices = req_slices; 5336 5337 mutex_unlock(&power_domains->lock); 5338 } 5339 5340 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 5341 { 5342 dev_priv->dbuf.enabled_slices = 5343 intel_enabled_dbuf_slices_mask(dev_priv); 5344 5345 /* 5346 * Just power up at least 1 slice, we will 5347 * figure out later which slices we have and what we need. 5348 */ 5349 gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | 5350 dev_priv->dbuf.enabled_slices); 5351 } 5352 5353 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 5354 { 5355 gen9_dbuf_slices_update(dev_priv, 0); 5356 } 5357 5358 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) 5359 { 5360 enum dbuf_slice slice; 5361 5362 if (IS_ALDERLAKE_P(dev_priv)) 5363 return; 5364 5365 for_each_dbuf_slice(dev_priv, slice) 5366 intel_de_rmw(dev_priv, DBUF_CTL_S(slice), 5367 DBUF_TRACKER_STATE_SERVICE_MASK, 5368 DBUF_TRACKER_STATE_SERVICE(8)); 5369 } 5370 5371 static void icl_mbus_init(struct drm_i915_private *dev_priv) 5372 { 5373 unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask; 5374 u32 mask, val, i; 5375 5376 if (IS_ALDERLAKE_P(dev_priv)) 5377 return; 5378 5379 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | 5380 MBUS_ABOX_BT_CREDIT_POOL2_MASK | 5381 MBUS_ABOX_B_CREDIT_MASK | 5382 MBUS_ABOX_BW_CREDIT_MASK; 5383 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 5384 MBUS_ABOX_BT_CREDIT_POOL2(16) | 5385 MBUS_ABOX_B_CREDIT(1) | 5386 MBUS_ABOX_BW_CREDIT(1); 5387 5388 /* 5389 * gen12 platforms that use abox1 and abox2 for pixel data reads still 5390 * expect us to program the abox_ctl0 register as well, even though 5391 * we don't have to program other instance-0 registers like BW_BUDDY. 5392 */ 5393 if (DISPLAY_VER(dev_priv) == 12) 5394 abox_regs |= BIT(0); 5395 5396 for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) 5397 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val); 5398 } 5399 5400 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 5401 { 5402 u32 val = intel_de_read(dev_priv, LCPLL_CTL); 5403 5404 /* 5405 * The LCPLL register should be turned on by the BIOS. For now 5406 * let's just check its state and print errors in case 5407 * something is wrong. Don't even try to turn it on. 5408 */ 5409 5410 if (val & LCPLL_CD_SOURCE_FCLK) 5411 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); 5412 5413 if (val & LCPLL_PLL_DISABLE) 5414 drm_err(&dev_priv->drm, "LCPLL is disabled\n"); 5415 5416 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 5417 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); 5418 } 5419 5420 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 5421 { 5422 struct drm_device *dev = &dev_priv->drm; 5423 struct intel_crtc *crtc; 5424 5425 for_each_intel_crtc(dev, crtc) 5426 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 5427 pipe_name(crtc->pipe)); 5428 5429 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), 5430 "Display power well on\n"); 5431 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, 5432 "SPLL enabled\n"); 5433 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 5434 "WRPLL1 enabled\n"); 5435 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 5436 "WRPLL2 enabled\n"); 5437 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, 5438 "Panel power on\n"); 5439 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 5440 "CPU PWM1 enabled\n"); 5441 if (IS_HASWELL(dev_priv)) 5442 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 5443 "CPU PWM2 enabled\n"); 5444 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 5445 "PCH PWM1 enabled\n"); 5446 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 5447 "Utility pin enabled\n"); 5448 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, 5449 "PCH GTC enabled\n"); 5450 5451 /* 5452 * In theory we can still leave IRQs enabled, as long as only the HPD 5453 * interrupts remain enabled. We used to check for that, but since it's 5454 * gen-specific and since we only disable LCPLL after we fully disable 5455 * the interrupts, the check below should be enough. 5456 */ 5457 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 5458 } 5459 5460 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 5461 { 5462 if (IS_HASWELL(dev_priv)) 5463 return intel_de_read(dev_priv, D_COMP_HSW); 5464 else 5465 return intel_de_read(dev_priv, D_COMP_BDW); 5466 } 5467 5468 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 5469 { 5470 if (IS_HASWELL(dev_priv)) { 5471 if (snb_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) 5472 drm_dbg_kms(&dev_priv->drm, 5473 "Failed to write to D_COMP\n"); 5474 } else { 5475 intel_de_write(dev_priv, D_COMP_BDW, val); 5476 intel_de_posting_read(dev_priv, D_COMP_BDW); 5477 } 5478 } 5479 5480 /* 5481 * This function implements pieces of two sequences from BSpec: 5482 * - Sequence for display software to disable LCPLL 5483 * - Sequence for display software to allow package C8+ 5484 * The steps implemented here are just the steps that actually touch the LCPLL 5485 * register. Callers should take care of disabling all the display engine 5486 * functions, doing the mode unset, fixing interrupts, etc. 5487 */ 5488 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 5489 bool switch_to_fclk, bool allow_power_down) 5490 { 5491 u32 val; 5492 5493 assert_can_disable_lcpll(dev_priv); 5494 5495 val = intel_de_read(dev_priv, LCPLL_CTL); 5496 5497 if (switch_to_fclk) { 5498 val |= LCPLL_CD_SOURCE_FCLK; 5499 intel_de_write(dev_priv, LCPLL_CTL, val); 5500 5501 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & 5502 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 5503 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); 5504 5505 val = intel_de_read(dev_priv, LCPLL_CTL); 5506 } 5507 5508 val |= LCPLL_PLL_DISABLE; 5509 intel_de_write(dev_priv, LCPLL_CTL, val); 5510 intel_de_posting_read(dev_priv, LCPLL_CTL); 5511 5512 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 5513 drm_err(&dev_priv->drm, "LCPLL still locked\n"); 5514 5515 val = hsw_read_dcomp(dev_priv); 5516 val |= D_COMP_COMP_DISABLE; 5517 hsw_write_dcomp(dev_priv, val); 5518 ndelay(100); 5519 5520 if (wait_for((hsw_read_dcomp(dev_priv) & 5521 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 5522 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); 5523 5524 if (allow_power_down) { 5525 val = intel_de_read(dev_priv, LCPLL_CTL); 5526 val |= LCPLL_POWER_DOWN_ALLOW; 5527 intel_de_write(dev_priv, LCPLL_CTL, val); 5528 intel_de_posting_read(dev_priv, LCPLL_CTL); 5529 } 5530 } 5531 5532 /* 5533 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 5534 * source. 5535 */ 5536 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 5537 { 5538 u32 val; 5539 5540 val = intel_de_read(dev_priv, LCPLL_CTL); 5541 5542 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 5543 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 5544 return; 5545 5546 /* 5547 * Make sure we're not on PC8 state before disabling PC8, otherwise 5548 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 5549 */ 5550 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 5551 5552 if (val & LCPLL_POWER_DOWN_ALLOW) { 5553 val &= ~LCPLL_POWER_DOWN_ALLOW; 5554 intel_de_write(dev_priv, LCPLL_CTL, val); 5555 intel_de_posting_read(dev_priv, LCPLL_CTL); 5556 } 5557 5558 val = hsw_read_dcomp(dev_priv); 5559 val |= D_COMP_COMP_FORCE; 5560 val &= ~D_COMP_COMP_DISABLE; 5561 hsw_write_dcomp(dev_priv, val); 5562 5563 val = intel_de_read(dev_priv, LCPLL_CTL); 5564 val &= ~LCPLL_PLL_DISABLE; 5565 intel_de_write(dev_priv, LCPLL_CTL, val); 5566 5567 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 5568 drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); 5569 5570 if (val & LCPLL_CD_SOURCE_FCLK) { 5571 val = intel_de_read(dev_priv, LCPLL_CTL); 5572 val &= ~LCPLL_CD_SOURCE_FCLK; 5573 intel_de_write(dev_priv, LCPLL_CTL, val); 5574 5575 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & 5576 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 5577 drm_err(&dev_priv->drm, 5578 "Switching back to LCPLL failed\n"); 5579 } 5580 5581 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 5582 5583 intel_update_cdclk(dev_priv); 5584 intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); 5585 } 5586 5587 /* 5588 * Package states C8 and deeper are really deep PC states that can only be 5589 * reached when all the devices on the system allow it, so even if the graphics 5590 * device allows PC8+, it doesn't mean the system will actually get to these 5591 * states. Our driver only allows PC8+ when going into runtime PM. 5592 * 5593 * The requirements for PC8+ are that all the outputs are disabled, the power 5594 * well is disabled and most interrupts are disabled, and these are also 5595 * requirements for runtime PM. When these conditions are met, we manually do 5596 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 5597 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 5598 * hang the machine. 5599 * 5600 * When we really reach PC8 or deeper states (not just when we allow it) we lose 5601 * the state of some registers, so when we come back from PC8+ we need to 5602 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 5603 * need to take care of the registers kept by RC6. Notice that this happens even 5604 * if we don't put the device in PCI D3 state (which is what currently happens 5605 * because of the runtime PM support). 5606 * 5607 * For more, read "Display Sequences for Package C8" on the hardware 5608 * documentation. 5609 */ 5610 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 5611 { 5612 u32 val; 5613 5614 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); 5615 5616 if (HAS_PCH_LPT_LP(dev_priv)) { 5617 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5618 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 5619 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5620 } 5621 5622 lpt_disable_clkout_dp(dev_priv); 5623 hsw_disable_lcpll(dev_priv, true, true); 5624 } 5625 5626 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 5627 { 5628 u32 val; 5629 5630 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); 5631 5632 hsw_restore_lcpll(dev_priv); 5633 intel_init_pch_refclk(dev_priv); 5634 5635 if (HAS_PCH_LPT_LP(dev_priv)) { 5636 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5637 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 5638 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5639 } 5640 } 5641 5642 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 5643 bool enable) 5644 { 5645 i915_reg_t reg; 5646 u32 reset_bits, val; 5647 5648 if (IS_IVYBRIDGE(dev_priv)) { 5649 reg = GEN7_MSG_CTL; 5650 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 5651 } else { 5652 reg = HSW_NDE_RSTWRN_OPT; 5653 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 5654 } 5655 5656 val = intel_de_read(dev_priv, reg); 5657 5658 if (enable) 5659 val |= reset_bits; 5660 else 5661 val &= ~reset_bits; 5662 5663 intel_de_write(dev_priv, reg, val); 5664 } 5665 5666 static void skl_display_core_init(struct drm_i915_private *dev_priv, 5667 bool resume) 5668 { 5669 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5670 struct i915_power_well *well; 5671 5672 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5673 5674 /* enable PCH reset handshake */ 5675 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5676 5677 if (!HAS_DISPLAY(dev_priv)) 5678 return; 5679 5680 /* enable PG1 and Misc I/O */ 5681 mutex_lock(&power_domains->lock); 5682 5683 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5684 intel_power_well_enable(dev_priv, well); 5685 5686 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 5687 intel_power_well_enable(dev_priv, well); 5688 5689 mutex_unlock(&power_domains->lock); 5690 5691 intel_cdclk_init_hw(dev_priv); 5692 5693 gen9_dbuf_enable(dev_priv); 5694 5695 if (resume && intel_dmc_has_payload(dev_priv)) 5696 intel_dmc_load_program(dev_priv); 5697 } 5698 5699 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 5700 { 5701 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5702 struct i915_power_well *well; 5703 5704 if (!HAS_DISPLAY(dev_priv)) 5705 return; 5706 5707 gen9_disable_dc_states(dev_priv); 5708 5709 gen9_dbuf_disable(dev_priv); 5710 5711 intel_cdclk_uninit_hw(dev_priv); 5712 5713 /* The spec doesn't call for removing the reset handshake flag */ 5714 /* disable PG1 and Misc I/O */ 5715 5716 mutex_lock(&power_domains->lock); 5717 5718 /* 5719 * BSpec says to keep the MISC IO power well enabled here, only 5720 * remove our request for power well 1. 5721 * Note that even though the driver's request is removed power well 1 5722 * may stay enabled after this due to DMC's own request on it. 5723 */ 5724 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5725 intel_power_well_disable(dev_priv, well); 5726 5727 mutex_unlock(&power_domains->lock); 5728 5729 usleep_range(10, 30); /* 10 us delay per Bspec */ 5730 } 5731 5732 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 5733 { 5734 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5735 struct i915_power_well *well; 5736 5737 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5738 5739 /* 5740 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 5741 * or else the reset will hang because there is no PCH to respond. 5742 * Move the handshake programming to initialization sequence. 5743 * Previously was left up to BIOS. 5744 */ 5745 intel_pch_reset_handshake(dev_priv, false); 5746 5747 if (!HAS_DISPLAY(dev_priv)) 5748 return; 5749 5750 /* Enable PG1 */ 5751 mutex_lock(&power_domains->lock); 5752 5753 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5754 intel_power_well_enable(dev_priv, well); 5755 5756 mutex_unlock(&power_domains->lock); 5757 5758 intel_cdclk_init_hw(dev_priv); 5759 5760 gen9_dbuf_enable(dev_priv); 5761 5762 if (resume && intel_dmc_has_payload(dev_priv)) 5763 intel_dmc_load_program(dev_priv); 5764 } 5765 5766 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 5767 { 5768 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5769 struct i915_power_well *well; 5770 5771 if (!HAS_DISPLAY(dev_priv)) 5772 return; 5773 5774 gen9_disable_dc_states(dev_priv); 5775 5776 gen9_dbuf_disable(dev_priv); 5777 5778 intel_cdclk_uninit_hw(dev_priv); 5779 5780 /* The spec doesn't call for removing the reset handshake flag */ 5781 5782 /* 5783 * Disable PW1 (PG1). 5784 * Note that even though the driver's request is removed power well 1 5785 * may stay enabled after this due to DMC's own request on it. 5786 */ 5787 mutex_lock(&power_domains->lock); 5788 5789 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5790 intel_power_well_disable(dev_priv, well); 5791 5792 mutex_unlock(&power_domains->lock); 5793 5794 usleep_range(10, 30); /* 10 us delay per Bspec */ 5795 } 5796 5797 struct buddy_page_mask { 5798 u32 page_mask; 5799 u8 type; 5800 u8 num_channels; 5801 }; 5802 5803 static const struct buddy_page_mask tgl_buddy_page_masks[] = { 5804 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, 5805 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF }, 5806 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, 5807 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C }, 5808 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, 5809 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E }, 5810 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, 5811 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 }, 5812 {} 5813 }; 5814 5815 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { 5816 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, 5817 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, 5818 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 }, 5819 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 }, 5820 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, 5821 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, 5822 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 }, 5823 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 }, 5824 {} 5825 }; 5826 5827 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) 5828 { 5829 enum intel_dram_type type = dev_priv->dram_info.type; 5830 u8 num_channels = dev_priv->dram_info.num_channels; 5831 const struct buddy_page_mask *table; 5832 unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask; 5833 int config, i; 5834 5835 /* BW_BUDDY registers are not used on dgpu's beyond DG1 */ 5836 if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv)) 5837 return; 5838 5839 if (IS_ALDERLAKE_S(dev_priv) || 5840 IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 5841 IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 5842 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) 5843 /* Wa_1409767108:tgl,dg1,adl-s */ 5844 table = wa_1409767108_buddy_page_masks; 5845 else 5846 table = tgl_buddy_page_masks; 5847 5848 for (config = 0; table[config].page_mask != 0; config++) 5849 if (table[config].num_channels == num_channels && 5850 table[config].type == type) 5851 break; 5852 5853 if (table[config].page_mask == 0) { 5854 drm_dbg(&dev_priv->drm, 5855 "Unknown memory configuration; disabling address buddy logic.\n"); 5856 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) 5857 intel_de_write(dev_priv, BW_BUDDY_CTL(i), 5858 BW_BUDDY_DISABLE); 5859 } else { 5860 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { 5861 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i), 5862 table[config].page_mask); 5863 5864 /* Wa_22010178259:tgl,dg1,rkl,adl-s */ 5865 if (DISPLAY_VER(dev_priv) == 12) 5866 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i), 5867 BW_BUDDY_TLB_REQ_TIMER_MASK, 5868 BW_BUDDY_TLB_REQ_TIMER(0x8)); 5869 } 5870 } 5871 } 5872 5873 static void icl_display_core_init(struct drm_i915_private *dev_priv, 5874 bool resume) 5875 { 5876 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5877 struct i915_power_well *well; 5878 u32 val; 5879 5880 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5881 5882 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ 5883 if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP && 5884 INTEL_PCH_TYPE(dev_priv) < PCH_DG1) 5885 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0, 5886 PCH_DPMGUNIT_CLOCK_GATE_DISABLE); 5887 5888 /* 1. Enable PCH reset handshake. */ 5889 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5890 5891 if (!HAS_DISPLAY(dev_priv)) 5892 return; 5893 5894 /* 2. Initialize all combo phys */ 5895 intel_combo_phy_init(dev_priv); 5896 5897 /* 5898 * 3. Enable Power Well 1 (PG1). 5899 * The AUX IO power wells will be enabled on demand. 5900 */ 5901 mutex_lock(&power_domains->lock); 5902 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5903 intel_power_well_enable(dev_priv, well); 5904 mutex_unlock(&power_domains->lock); 5905 5906 /* 4. Enable CDCLK. */ 5907 intel_cdclk_init_hw(dev_priv); 5908 5909 if (DISPLAY_VER(dev_priv) >= 12) 5910 gen12_dbuf_slices_config(dev_priv); 5911 5912 /* 5. Enable DBUF. */ 5913 gen9_dbuf_enable(dev_priv); 5914 5915 /* 6. Setup MBUS. */ 5916 icl_mbus_init(dev_priv); 5917 5918 /* 7. Program arbiter BW_BUDDY registers */ 5919 if (DISPLAY_VER(dev_priv) >= 12) 5920 tgl_bw_buddy_init(dev_priv); 5921 5922 /* 8. Ensure PHYs have completed calibration and adaptation */ 5923 if (IS_DG2(dev_priv)) 5924 intel_snps_phy_wait_for_calibration(dev_priv); 5925 5926 if (resume && intel_dmc_has_payload(dev_priv)) 5927 intel_dmc_load_program(dev_priv); 5928 5929 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */ 5930 if (DISPLAY_VER(dev_priv) >= 12) { 5931 val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | 5932 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR; 5933 intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val); 5934 } 5935 5936 /* Wa_14011503030:xelpd */ 5937 if (DISPLAY_VER(dev_priv) >= 13) 5938 intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0); 5939 } 5940 5941 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 5942 { 5943 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5944 struct i915_power_well *well; 5945 5946 if (!HAS_DISPLAY(dev_priv)) 5947 return; 5948 5949 gen9_disable_dc_states(dev_priv); 5950 5951 /* 1. Disable all display engine functions -> aready done */ 5952 5953 /* 2. Disable DBUF */ 5954 gen9_dbuf_disable(dev_priv); 5955 5956 /* 3. Disable CD clock */ 5957 intel_cdclk_uninit_hw(dev_priv); 5958 5959 /* 5960 * 4. Disable Power Well 1 (PG1). 5961 * The AUX IO power wells are toggled on demand, so they are already 5962 * disabled at this point. 5963 */ 5964 mutex_lock(&power_domains->lock); 5965 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5966 intel_power_well_disable(dev_priv, well); 5967 mutex_unlock(&power_domains->lock); 5968 5969 /* 5. */ 5970 intel_combo_phy_uninit(dev_priv); 5971 } 5972 5973 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 5974 { 5975 struct i915_power_well *cmn_bc = 5976 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5977 struct i915_power_well *cmn_d = 5978 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 5979 5980 /* 5981 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 5982 * workaround never ever read DISPLAY_PHY_CONTROL, and 5983 * instead maintain a shadow copy ourselves. Use the actual 5984 * power well state and lane status to reconstruct the 5985 * expected initial value. 5986 */ 5987 dev_priv->chv_phy_control = 5988 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 5989 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 5990 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 5991 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 5992 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 5993 5994 /* 5995 * If all lanes are disabled we leave the override disabled 5996 * with all power down bits cleared to match the state we 5997 * would use after disabling the port. Otherwise enable the 5998 * override and set the lane powerdown bits accding to the 5999 * current lane status. 6000 */ 6001 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 6002 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)); 6003 unsigned int mask; 6004 6005 mask = status & DPLL_PORTB_READY_MASK; 6006 if (mask == 0xf) 6007 mask = 0x0; 6008 else 6009 dev_priv->chv_phy_control |= 6010 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 6011 6012 dev_priv->chv_phy_control |= 6013 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 6014 6015 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 6016 if (mask == 0xf) 6017 mask = 0x0; 6018 else 6019 dev_priv->chv_phy_control |= 6020 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 6021 6022 dev_priv->chv_phy_control |= 6023 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 6024 6025 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 6026 6027 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 6028 } else { 6029 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 6030 } 6031 6032 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 6033 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS); 6034 unsigned int mask; 6035 6036 mask = status & DPLL_PORTD_READY_MASK; 6037 6038 if (mask == 0xf) 6039 mask = 0x0; 6040 else 6041 dev_priv->chv_phy_control |= 6042 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 6043 6044 dev_priv->chv_phy_control |= 6045 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 6046 6047 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 6048 6049 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 6050 } else { 6051 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 6052 } 6053 6054 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", 6055 dev_priv->chv_phy_control); 6056 6057 /* Defer application of initial phy_control to enabling the powerwell */ 6058 } 6059 6060 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 6061 { 6062 struct i915_power_well *cmn = 6063 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 6064 struct i915_power_well *disp2d = 6065 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 6066 6067 /* If the display might be already active skip this */ 6068 if (cmn->desc->ops->is_enabled(dev_priv, cmn) && 6069 disp2d->desc->ops->is_enabled(dev_priv, disp2d) && 6070 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST) 6071 return; 6072 6073 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); 6074 6075 /* cmnlane needs DPLL registers */ 6076 disp2d->desc->ops->enable(dev_priv, disp2d); 6077 6078 /* 6079 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 6080 * Need to assert and de-assert PHY SB reset by gating the 6081 * common lane power, then un-gating it. 6082 * Simply ungating isn't enough to reset the PHY enough to get 6083 * ports and lanes running. 6084 */ 6085 cmn->desc->ops->disable(dev_priv, cmn); 6086 } 6087 6088 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 6089 { 6090 bool ret; 6091 6092 vlv_punit_get(dev_priv); 6093 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 6094 vlv_punit_put(dev_priv); 6095 6096 return ret; 6097 } 6098 6099 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 6100 { 6101 drm_WARN(&dev_priv->drm, 6102 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 6103 "VED not power gated\n"); 6104 } 6105 6106 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 6107 { 6108 static const struct pci_device_id isp_ids[] = { 6109 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 6110 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 6111 {} 6112 }; 6113 6114 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && 6115 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 6116 "ISP not power gated\n"); 6117 } 6118 6119 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 6120 6121 /** 6122 * intel_power_domains_init_hw - initialize hardware power domain state 6123 * @i915: i915 device instance 6124 * @resume: Called from resume code paths or not 6125 * 6126 * This function initializes the hardware power domain state and enables all 6127 * power wells belonging to the INIT power domain. Power wells in other 6128 * domains (and not in the INIT domain) are referenced or disabled by 6129 * intel_modeset_readout_hw_state(). After that the reference count of each 6130 * power well must match its HW enabled state, see 6131 * intel_power_domains_verify_state(). 6132 * 6133 * It will return with power domains disabled (to be enabled later by 6134 * intel_power_domains_enable()) and must be paired with 6135 * intel_power_domains_driver_remove(). 6136 */ 6137 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 6138 { 6139 struct i915_power_domains *power_domains = &i915->power_domains; 6140 6141 power_domains->initializing = true; 6142 6143 if (DISPLAY_VER(i915) >= 11) { 6144 icl_display_core_init(i915, resume); 6145 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6146 bxt_display_core_init(i915, resume); 6147 } else if (DISPLAY_VER(i915) == 9) { 6148 skl_display_core_init(i915, resume); 6149 } else if (IS_CHERRYVIEW(i915)) { 6150 mutex_lock(&power_domains->lock); 6151 chv_phy_control_init(i915); 6152 mutex_unlock(&power_domains->lock); 6153 assert_isp_power_gated(i915); 6154 } else if (IS_VALLEYVIEW(i915)) { 6155 mutex_lock(&power_domains->lock); 6156 vlv_cmnlane_wa(i915); 6157 mutex_unlock(&power_domains->lock); 6158 assert_ved_power_gated(i915); 6159 assert_isp_power_gated(i915); 6160 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 6161 hsw_assert_cdclk(i915); 6162 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 6163 } else if (IS_IVYBRIDGE(i915)) { 6164 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 6165 } 6166 6167 /* 6168 * Keep all power wells enabled for any dependent HW access during 6169 * initialization and to make sure we keep BIOS enabled display HW 6170 * resources powered until display HW readout is complete. We drop 6171 * this reference in intel_power_domains_enable(). 6172 */ 6173 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6174 power_domains->init_wakeref = 6175 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6176 6177 /* Disable power support if the user asked so. */ 6178 if (!i915->params.disable_power_well) { 6179 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); 6180 i915->power_domains.disable_wakeref = intel_display_power_get(i915, 6181 POWER_DOMAIN_INIT); 6182 } 6183 intel_power_domains_sync_hw(i915); 6184 6185 power_domains->initializing = false; 6186 } 6187 6188 /** 6189 * intel_power_domains_driver_remove - deinitialize hw power domain state 6190 * @i915: i915 device instance 6191 * 6192 * De-initializes the display power domain HW state. It also ensures that the 6193 * device stays powered up so that the driver can be reloaded. 6194 * 6195 * It must be called with power domains already disabled (after a call to 6196 * intel_power_domains_disable()) and must be paired with 6197 * intel_power_domains_init_hw(). 6198 */ 6199 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 6200 { 6201 intel_wakeref_t wakeref __maybe_unused = 6202 fetch_and_zero(&i915->power_domains.init_wakeref); 6203 6204 /* Remove the refcount we took to keep power well support disabled. */ 6205 if (!i915->params.disable_power_well) 6206 intel_display_power_put(i915, POWER_DOMAIN_INIT, 6207 fetch_and_zero(&i915->power_domains.disable_wakeref)); 6208 6209 intel_display_power_flush_work_sync(i915); 6210 6211 intel_power_domains_verify_state(i915); 6212 6213 /* Keep the power well enabled, but cancel its rpm wakeref. */ 6214 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 6215 } 6216 6217 /** 6218 * intel_power_domains_sanitize_state - sanitize power domains state 6219 * @i915: i915 device instance 6220 * 6221 * Sanitize the power domains state during driver loading and system resume. 6222 * The function will disable all display power wells that BIOS has enabled 6223 * without a user for it (any user for a power well has taken a reference 6224 * on it by the time this function is called, after the state of all the 6225 * pipe, encoder, etc. HW resources have been sanitized). 6226 */ 6227 void intel_power_domains_sanitize_state(struct drm_i915_private *i915) 6228 { 6229 struct i915_power_domains *power_domains = &i915->power_domains; 6230 struct i915_power_well *power_well; 6231 6232 mutex_lock(&power_domains->lock); 6233 6234 for_each_power_well_reverse(i915, power_well) { 6235 if (power_well->desc->always_on || power_well->count || 6236 !power_well->desc->ops->is_enabled(i915, power_well)) 6237 continue; 6238 6239 drm_dbg_kms(&i915->drm, 6240 "BIOS left unused %s power well enabled, disabling it\n", 6241 power_well->desc->name); 6242 intel_power_well_disable(i915, power_well); 6243 } 6244 6245 mutex_unlock(&power_domains->lock); 6246 } 6247 6248 /** 6249 * intel_power_domains_enable - enable toggling of display power wells 6250 * @i915: i915 device instance 6251 * 6252 * Enable the ondemand enabling/disabling of the display power wells. Note that 6253 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 6254 * only at specific points of the display modeset sequence, thus they are not 6255 * affected by the intel_power_domains_enable()/disable() calls. The purpose 6256 * of these function is to keep the rest of power wells enabled until the end 6257 * of display HW readout (which will acquire the power references reflecting 6258 * the current HW state). 6259 */ 6260 void intel_power_domains_enable(struct drm_i915_private *i915) 6261 { 6262 intel_wakeref_t wakeref __maybe_unused = 6263 fetch_and_zero(&i915->power_domains.init_wakeref); 6264 6265 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 6266 intel_power_domains_verify_state(i915); 6267 } 6268 6269 /** 6270 * intel_power_domains_disable - disable toggling of display power wells 6271 * @i915: i915 device instance 6272 * 6273 * Disable the ondemand enabling/disabling of the display power wells. See 6274 * intel_power_domains_enable() for which power wells this call controls. 6275 */ 6276 void intel_power_domains_disable(struct drm_i915_private *i915) 6277 { 6278 struct i915_power_domains *power_domains = &i915->power_domains; 6279 6280 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6281 power_domains->init_wakeref = 6282 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6283 6284 intel_power_domains_verify_state(i915); 6285 } 6286 6287 /** 6288 * intel_power_domains_suspend - suspend power domain state 6289 * @i915: i915 device instance 6290 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 6291 * 6292 * This function prepares the hardware power domain state before entering 6293 * system suspend. 6294 * 6295 * It must be called with power domains already disabled (after a call to 6296 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 6297 */ 6298 void intel_power_domains_suspend(struct drm_i915_private *i915, 6299 enum i915_drm_suspend_mode suspend_mode) 6300 { 6301 struct i915_power_domains *power_domains = &i915->power_domains; 6302 intel_wakeref_t wakeref __maybe_unused = 6303 fetch_and_zero(&power_domains->init_wakeref); 6304 6305 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 6306 6307 /* 6308 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 6309 * support don't manually deinit the power domains. This also means the 6310 * DMC firmware will stay active, it will power down any HW 6311 * resources as required and also enable deeper system power states 6312 * that would be blocked if the firmware was inactive. 6313 */ 6314 if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) && 6315 suspend_mode == I915_DRM_SUSPEND_IDLE && 6316 intel_dmc_has_payload(i915)) { 6317 intel_display_power_flush_work(i915); 6318 intel_power_domains_verify_state(i915); 6319 return; 6320 } 6321 6322 /* 6323 * Even if power well support was disabled we still want to disable 6324 * power wells if power domains must be deinitialized for suspend. 6325 */ 6326 if (!i915->params.disable_power_well) 6327 intel_display_power_put(i915, POWER_DOMAIN_INIT, 6328 fetch_and_zero(&i915->power_domains.disable_wakeref)); 6329 6330 intel_display_power_flush_work(i915); 6331 intel_power_domains_verify_state(i915); 6332 6333 if (DISPLAY_VER(i915) >= 11) 6334 icl_display_core_uninit(i915); 6335 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) 6336 bxt_display_core_uninit(i915); 6337 else if (DISPLAY_VER(i915) == 9) 6338 skl_display_core_uninit(i915); 6339 6340 power_domains->display_core_suspended = true; 6341 } 6342 6343 /** 6344 * intel_power_domains_resume - resume power domain state 6345 * @i915: i915 device instance 6346 * 6347 * This function resume the hardware power domain state during system resume. 6348 * 6349 * It will return with power domain support disabled (to be enabled later by 6350 * intel_power_domains_enable()) and must be paired with 6351 * intel_power_domains_suspend(). 6352 */ 6353 void intel_power_domains_resume(struct drm_i915_private *i915) 6354 { 6355 struct i915_power_domains *power_domains = &i915->power_domains; 6356 6357 if (power_domains->display_core_suspended) { 6358 intel_power_domains_init_hw(i915, true); 6359 power_domains->display_core_suspended = false; 6360 } else { 6361 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6362 power_domains->init_wakeref = 6363 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6364 } 6365 6366 intel_power_domains_verify_state(i915); 6367 } 6368 6369 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 6370 6371 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 6372 { 6373 struct i915_power_domains *power_domains = &i915->power_domains; 6374 struct i915_power_well *power_well; 6375 6376 for_each_power_well(i915, power_well) { 6377 enum intel_display_power_domain domain; 6378 6379 drm_dbg(&i915->drm, "%-25s %d\n", 6380 power_well->desc->name, power_well->count); 6381 6382 for_each_power_domain(domain, power_well->desc->domains) 6383 drm_dbg(&i915->drm, " %-23s %d\n", 6384 intel_display_power_domain_str(domain), 6385 power_domains->domain_use_count[domain]); 6386 } 6387 } 6388 6389 /** 6390 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 6391 * @i915: i915 device instance 6392 * 6393 * Verify if the reference count of each power well matches its HW enabled 6394 * state and the total refcount of the domains it belongs to. This must be 6395 * called after modeset HW state sanitization, which is responsible for 6396 * acquiring reference counts for any power wells in use and disabling the 6397 * ones left on by BIOS but not required by any active output. 6398 */ 6399 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 6400 { 6401 struct i915_power_domains *power_domains = &i915->power_domains; 6402 struct i915_power_well *power_well; 6403 bool dump_domain_info; 6404 6405 mutex_lock(&power_domains->lock); 6406 6407 verify_async_put_domains_state(power_domains); 6408 6409 dump_domain_info = false; 6410 for_each_power_well(i915, power_well) { 6411 enum intel_display_power_domain domain; 6412 int domains_count; 6413 bool enabled; 6414 6415 enabled = power_well->desc->ops->is_enabled(i915, power_well); 6416 if ((power_well->count || power_well->desc->always_on) != 6417 enabled) 6418 drm_err(&i915->drm, 6419 "power well %s state mismatch (refcount %d/enabled %d)", 6420 power_well->desc->name, 6421 power_well->count, enabled); 6422 6423 domains_count = 0; 6424 for_each_power_domain(domain, power_well->desc->domains) 6425 domains_count += power_domains->domain_use_count[domain]; 6426 6427 if (power_well->count != domains_count) { 6428 drm_err(&i915->drm, 6429 "power well %s refcount/domain refcount mismatch " 6430 "(refcount %d/domains refcount %d)\n", 6431 power_well->desc->name, power_well->count, 6432 domains_count); 6433 dump_domain_info = true; 6434 } 6435 } 6436 6437 if (dump_domain_info) { 6438 static bool dumped; 6439 6440 if (!dumped) { 6441 intel_power_domains_dump_info(i915); 6442 dumped = true; 6443 } 6444 } 6445 6446 mutex_unlock(&power_domains->lock); 6447 } 6448 6449 #else 6450 6451 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 6452 { 6453 } 6454 6455 #endif 6456 6457 void intel_display_power_suspend_late(struct drm_i915_private *i915) 6458 { 6459 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 6460 IS_BROXTON(i915)) { 6461 bxt_enable_dc9(i915); 6462 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6463 hsw_enable_pc8(i915); 6464 } 6465 6466 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 6467 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 6468 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); 6469 } 6470 6471 void intel_display_power_resume_early(struct drm_i915_private *i915) 6472 { 6473 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 6474 IS_BROXTON(i915)) { 6475 gen9_sanitize_dc_state(i915); 6476 bxt_disable_dc9(i915); 6477 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6478 hsw_disable_pc8(i915); 6479 } 6480 6481 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 6482 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 6483 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); 6484 } 6485 6486 void intel_display_power_suspend(struct drm_i915_private *i915) 6487 { 6488 if (DISPLAY_VER(i915) >= 11) { 6489 icl_display_core_uninit(i915); 6490 bxt_enable_dc9(i915); 6491 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6492 bxt_display_core_uninit(i915); 6493 bxt_enable_dc9(i915); 6494 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6495 hsw_enable_pc8(i915); 6496 } 6497 } 6498 6499 void intel_display_power_resume(struct drm_i915_private *i915) 6500 { 6501 if (DISPLAY_VER(i915) >= 11) { 6502 bxt_disable_dc9(i915); 6503 icl_display_core_init(i915, true); 6504 if (intel_dmc_has_payload(i915)) { 6505 if (i915->dmc.allowed_dc_mask & 6506 DC_STATE_EN_UPTO_DC6) 6507 skl_enable_dc6(i915); 6508 else if (i915->dmc.allowed_dc_mask & 6509 DC_STATE_EN_UPTO_DC5) 6510 gen9_enable_dc5(i915); 6511 } 6512 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6513 bxt_disable_dc9(i915); 6514 bxt_display_core_init(i915, true); 6515 if (intel_dmc_has_payload(i915) && 6516 (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 6517 gen9_enable_dc5(i915); 6518 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6519 hsw_disable_pc8(i915); 6520 } 6521 } 6522 6523 void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m) 6524 { 6525 struct i915_power_domains *power_domains = &i915->power_domains; 6526 int i; 6527 6528 mutex_lock(&power_domains->lock); 6529 6530 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 6531 for (i = 0; i < power_domains->power_well_count; i++) { 6532 struct i915_power_well *power_well; 6533 enum intel_display_power_domain power_domain; 6534 6535 power_well = &power_domains->power_wells[i]; 6536 seq_printf(m, "%-25s %d\n", power_well->desc->name, 6537 power_well->count); 6538 6539 for_each_power_domain(power_domain, power_well->desc->domains) 6540 seq_printf(m, " %-23s %d\n", 6541 intel_display_power_domain_str(power_domain), 6542 power_domains->domain_use_count[power_domain]); 6543 } 6544 6545 mutex_unlock(&power_domains->lock); 6546 } 6547