1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_irq.h" 8 #include "intel_cdclk.h" 9 #include "intel_combo_phy.h" 10 #include "intel_combo_phy_regs.h" 11 #include "intel_crt.h" 12 #include "intel_de.h" 13 #include "intel_display_power.h" 14 #include "intel_display_types.h" 15 #include "intel_dmc.h" 16 #include "intel_dpio_phy.h" 17 #include "intel_dpll.h" 18 #include "intel_hotplug.h" 19 #include "intel_pch_refclk.h" 20 #include "intel_pcode.h" 21 #include "intel_pm.h" 22 #include "intel_pps.h" 23 #include "intel_snps_phy.h" 24 #include "intel_tc.h" 25 #include "intel_vga.h" 26 #include "vlv_sideband.h" 27 28 struct i915_power_well_ops { 29 /* 30 * Synchronize the well's hw state to match the current sw state, for 31 * example enable/disable it based on the current refcount. Called 32 * during driver init and resume time, possibly after first calling 33 * the enable/disable handlers. 34 */ 35 void (*sync_hw)(struct drm_i915_private *dev_priv, 36 struct i915_power_well *power_well); 37 /* 38 * Enable the well and resources that depend on it (for example 39 * interrupts located on the well). Called after the 0->1 refcount 40 * transition. 41 */ 42 void (*enable)(struct drm_i915_private *dev_priv, 43 struct i915_power_well *power_well); 44 /* 45 * Disable the well and resources that depend on it. Called after 46 * the 1->0 refcount transition. 47 */ 48 void (*disable)(struct drm_i915_private *dev_priv, 49 struct i915_power_well *power_well); 50 /* Returns the hw enabled state. */ 51 bool (*is_enabled)(struct drm_i915_private *dev_priv, 52 struct i915_power_well *power_well); 53 }; 54 55 struct i915_power_well_regs { 56 i915_reg_t bios; 57 i915_reg_t driver; 58 i915_reg_t kvmr; 59 i915_reg_t debug; 60 }; 61 62 /* Power well structure for haswell */ 63 struct i915_power_well_desc { 64 const char *name; 65 bool always_on; 66 u64 domains; 67 /* unique identifier for this power well */ 68 enum i915_power_well_id id; 69 /* 70 * Arbitraty data associated with this power well. Platform and power 71 * well specific. 72 */ 73 union { 74 struct { 75 /* 76 * request/status flag index in the PUNIT power well 77 * control/status registers. 78 */ 79 u8 idx; 80 } vlv; 81 struct { 82 enum dpio_phy phy; 83 } bxt; 84 struct { 85 const struct i915_power_well_regs *regs; 86 /* 87 * request/status flag index in the power well 88 * constrol/status registers. 89 */ 90 u8 idx; 91 /* Mask of pipes whose IRQ logic is backed by the pw */ 92 u8 irq_pipe_mask; 93 /* 94 * Instead of waiting for the status bit to ack enables, 95 * just wait a specific amount of time and then consider 96 * the well enabled. 97 */ 98 u16 fixed_enable_delay; 99 /* The pw is backing the VGA functionality */ 100 bool has_vga:1; 101 bool has_fuses:1; 102 /* 103 * The pw is for an ICL+ TypeC PHY port in 104 * Thunderbolt mode. 105 */ 106 bool is_tc_tbt:1; 107 } hsw; 108 }; 109 const struct i915_power_well_ops *ops; 110 }; 111 112 struct i915_power_well { 113 const struct i915_power_well_desc *desc; 114 /* power well enable/disable usage count */ 115 int count; 116 /* cached hw enabled state */ 117 bool hw_enabled; 118 }; 119 120 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 121 enum i915_power_well_id power_well_id); 122 123 const char * 124 intel_display_power_domain_str(enum intel_display_power_domain domain) 125 { 126 switch (domain) { 127 case POWER_DOMAIN_DISPLAY_CORE: 128 return "DISPLAY_CORE"; 129 case POWER_DOMAIN_PIPE_A: 130 return "PIPE_A"; 131 case POWER_DOMAIN_PIPE_B: 132 return "PIPE_B"; 133 case POWER_DOMAIN_PIPE_C: 134 return "PIPE_C"; 135 case POWER_DOMAIN_PIPE_D: 136 return "PIPE_D"; 137 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 138 return "PIPE_A_PANEL_FITTER"; 139 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 140 return "PIPE_B_PANEL_FITTER"; 141 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 142 return "PIPE_C_PANEL_FITTER"; 143 case POWER_DOMAIN_PIPE_D_PANEL_FITTER: 144 return "PIPE_D_PANEL_FITTER"; 145 case POWER_DOMAIN_TRANSCODER_A: 146 return "TRANSCODER_A"; 147 case POWER_DOMAIN_TRANSCODER_B: 148 return "TRANSCODER_B"; 149 case POWER_DOMAIN_TRANSCODER_C: 150 return "TRANSCODER_C"; 151 case POWER_DOMAIN_TRANSCODER_D: 152 return "TRANSCODER_D"; 153 case POWER_DOMAIN_TRANSCODER_EDP: 154 return "TRANSCODER_EDP"; 155 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 156 return "TRANSCODER_VDSC_PW2"; 157 case POWER_DOMAIN_TRANSCODER_DSI_A: 158 return "TRANSCODER_DSI_A"; 159 case POWER_DOMAIN_TRANSCODER_DSI_C: 160 return "TRANSCODER_DSI_C"; 161 case POWER_DOMAIN_PORT_DDI_A_LANES: 162 return "PORT_DDI_A_LANES"; 163 case POWER_DOMAIN_PORT_DDI_B_LANES: 164 return "PORT_DDI_B_LANES"; 165 case POWER_DOMAIN_PORT_DDI_C_LANES: 166 return "PORT_DDI_C_LANES"; 167 case POWER_DOMAIN_PORT_DDI_D_LANES: 168 return "PORT_DDI_D_LANES"; 169 case POWER_DOMAIN_PORT_DDI_E_LANES: 170 return "PORT_DDI_E_LANES"; 171 case POWER_DOMAIN_PORT_DDI_F_LANES: 172 return "PORT_DDI_F_LANES"; 173 case POWER_DOMAIN_PORT_DDI_G_LANES: 174 return "PORT_DDI_G_LANES"; 175 case POWER_DOMAIN_PORT_DDI_H_LANES: 176 return "PORT_DDI_H_LANES"; 177 case POWER_DOMAIN_PORT_DDI_I_LANES: 178 return "PORT_DDI_I_LANES"; 179 case POWER_DOMAIN_PORT_DDI_A_IO: 180 return "PORT_DDI_A_IO"; 181 case POWER_DOMAIN_PORT_DDI_B_IO: 182 return "PORT_DDI_B_IO"; 183 case POWER_DOMAIN_PORT_DDI_C_IO: 184 return "PORT_DDI_C_IO"; 185 case POWER_DOMAIN_PORT_DDI_D_IO: 186 return "PORT_DDI_D_IO"; 187 case POWER_DOMAIN_PORT_DDI_E_IO: 188 return "PORT_DDI_E_IO"; 189 case POWER_DOMAIN_PORT_DDI_F_IO: 190 return "PORT_DDI_F_IO"; 191 case POWER_DOMAIN_PORT_DDI_G_IO: 192 return "PORT_DDI_G_IO"; 193 case POWER_DOMAIN_PORT_DDI_H_IO: 194 return "PORT_DDI_H_IO"; 195 case POWER_DOMAIN_PORT_DDI_I_IO: 196 return "PORT_DDI_I_IO"; 197 case POWER_DOMAIN_PORT_DSI: 198 return "PORT_DSI"; 199 case POWER_DOMAIN_PORT_CRT: 200 return "PORT_CRT"; 201 case POWER_DOMAIN_PORT_OTHER: 202 return "PORT_OTHER"; 203 case POWER_DOMAIN_VGA: 204 return "VGA"; 205 case POWER_DOMAIN_AUDIO_MMIO: 206 return "AUDIO_MMIO"; 207 case POWER_DOMAIN_AUDIO_PLAYBACK: 208 return "AUDIO_PLAYBACK"; 209 case POWER_DOMAIN_AUX_A: 210 return "AUX_A"; 211 case POWER_DOMAIN_AUX_B: 212 return "AUX_B"; 213 case POWER_DOMAIN_AUX_C: 214 return "AUX_C"; 215 case POWER_DOMAIN_AUX_D: 216 return "AUX_D"; 217 case POWER_DOMAIN_AUX_E: 218 return "AUX_E"; 219 case POWER_DOMAIN_AUX_F: 220 return "AUX_F"; 221 case POWER_DOMAIN_AUX_G: 222 return "AUX_G"; 223 case POWER_DOMAIN_AUX_H: 224 return "AUX_H"; 225 case POWER_DOMAIN_AUX_I: 226 return "AUX_I"; 227 case POWER_DOMAIN_AUX_IO_A: 228 return "AUX_IO_A"; 229 case POWER_DOMAIN_AUX_C_TBT: 230 return "AUX_C_TBT"; 231 case POWER_DOMAIN_AUX_D_TBT: 232 return "AUX_D_TBT"; 233 case POWER_DOMAIN_AUX_E_TBT: 234 return "AUX_E_TBT"; 235 case POWER_DOMAIN_AUX_F_TBT: 236 return "AUX_F_TBT"; 237 case POWER_DOMAIN_AUX_G_TBT: 238 return "AUX_G_TBT"; 239 case POWER_DOMAIN_AUX_H_TBT: 240 return "AUX_H_TBT"; 241 case POWER_DOMAIN_AUX_I_TBT: 242 return "AUX_I_TBT"; 243 case POWER_DOMAIN_GMBUS: 244 return "GMBUS"; 245 case POWER_DOMAIN_INIT: 246 return "INIT"; 247 case POWER_DOMAIN_MODESET: 248 return "MODESET"; 249 case POWER_DOMAIN_GT_IRQ: 250 return "GT_IRQ"; 251 case POWER_DOMAIN_DC_OFF: 252 return "DC_OFF"; 253 case POWER_DOMAIN_TC_COLD_OFF: 254 return "TC_COLD_OFF"; 255 default: 256 MISSING_CASE(domain); 257 return "?"; 258 } 259 } 260 261 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 262 struct i915_power_well *power_well) 263 { 264 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name); 265 power_well->desc->ops->enable(dev_priv, power_well); 266 power_well->hw_enabled = true; 267 } 268 269 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 270 struct i915_power_well *power_well) 271 { 272 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name); 273 power_well->hw_enabled = false; 274 power_well->desc->ops->disable(dev_priv, power_well); 275 } 276 277 static void intel_power_well_get(struct drm_i915_private *dev_priv, 278 struct i915_power_well *power_well) 279 { 280 if (!power_well->count++) 281 intel_power_well_enable(dev_priv, power_well); 282 } 283 284 static void intel_power_well_put(struct drm_i915_private *dev_priv, 285 struct i915_power_well *power_well) 286 { 287 drm_WARN(&dev_priv->drm, !power_well->count, 288 "Use count on power well %s is already zero", 289 power_well->desc->name); 290 291 if (!--power_well->count) 292 intel_power_well_disable(dev_priv, power_well); 293 } 294 295 /** 296 * __intel_display_power_is_enabled - unlocked check for a power domain 297 * @dev_priv: i915 device instance 298 * @domain: power domain to check 299 * 300 * This is the unlocked version of intel_display_power_is_enabled() and should 301 * only be used from error capture and recovery code where deadlocks are 302 * possible. 303 * 304 * Returns: 305 * True when the power domain is enabled, false otherwise. 306 */ 307 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 308 enum intel_display_power_domain domain) 309 { 310 struct i915_power_well *power_well; 311 bool is_enabled; 312 313 if (dev_priv->runtime_pm.suspended) 314 return false; 315 316 is_enabled = true; 317 318 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { 319 if (power_well->desc->always_on) 320 continue; 321 322 if (!power_well->hw_enabled) { 323 is_enabled = false; 324 break; 325 } 326 } 327 328 return is_enabled; 329 } 330 331 /** 332 * intel_display_power_is_enabled - check for a power domain 333 * @dev_priv: i915 device instance 334 * @domain: power domain to check 335 * 336 * This function can be used to check the hw power domain state. It is mostly 337 * used in hardware state readout functions. Everywhere else code should rely 338 * upon explicit power domain reference counting to ensure that the hardware 339 * block is powered up before accessing it. 340 * 341 * Callers must hold the relevant modesetting locks to ensure that concurrent 342 * threads can't disable the power well while the caller tries to read a few 343 * registers. 344 * 345 * Returns: 346 * True when the power domain is enabled, false otherwise. 347 */ 348 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 349 enum intel_display_power_domain domain) 350 { 351 struct i915_power_domains *power_domains; 352 bool ret; 353 354 power_domains = &dev_priv->power_domains; 355 356 mutex_lock(&power_domains->lock); 357 ret = __intel_display_power_is_enabled(dev_priv, domain); 358 mutex_unlock(&power_domains->lock); 359 360 return ret; 361 } 362 363 /* 364 * Starting with Haswell, we have a "Power Down Well" that can be turned off 365 * when not needed anymore. We have 4 registers that can request the power well 366 * to be enabled, and it will only be disabled if none of the registers is 367 * requesting it to be enabled. 368 */ 369 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, 370 u8 irq_pipe_mask, bool has_vga) 371 { 372 if (has_vga) 373 intel_vga_reset_io_mem(dev_priv); 374 375 if (irq_pipe_mask) 376 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); 377 } 378 379 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, 380 u8 irq_pipe_mask) 381 { 382 if (irq_pipe_mask) 383 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); 384 } 385 386 #define ICL_AUX_PW_TO_CH(pw_idx) \ 387 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 388 389 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 390 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 391 392 static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) 393 { 394 int pw_idx = power_well->desc->hsw.idx; 395 396 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 397 ICL_AUX_PW_TO_CH(pw_idx); 398 } 399 400 static struct intel_digital_port * 401 aux_ch_to_digital_port(struct drm_i915_private *dev_priv, 402 enum aux_ch aux_ch) 403 { 404 struct intel_digital_port *dig_port = NULL; 405 struct intel_encoder *encoder; 406 407 for_each_intel_encoder(&dev_priv->drm, encoder) { 408 /* We'll check the MST primary port */ 409 if (encoder->type == INTEL_OUTPUT_DP_MST) 410 continue; 411 412 dig_port = enc_to_dig_port(encoder); 413 if (!dig_port) 414 continue; 415 416 if (dig_port->aux_ch != aux_ch) { 417 dig_port = NULL; 418 continue; 419 } 420 421 break; 422 } 423 424 return dig_port; 425 } 426 427 static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915, 428 const struct i915_power_well *power_well) 429 { 430 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 431 struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); 432 433 return intel_port_to_phy(i915, dig_port->base.port); 434 } 435 436 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, 437 struct i915_power_well *power_well, 438 bool timeout_expected) 439 { 440 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 441 int pw_idx = power_well->desc->hsw.idx; 442 int enable_delay = power_well->desc->hsw.fixed_enable_delay; 443 444 /* 445 * For some power wells we're not supposed to watch the status bit for 446 * an ack, but rather just wait a fixed amount of time and then 447 * proceed. This is only used on DG2. 448 */ 449 if (IS_DG2(dev_priv) && enable_delay) { 450 usleep_range(enable_delay, 2 * enable_delay); 451 return; 452 } 453 454 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 455 if (intel_de_wait_for_set(dev_priv, regs->driver, 456 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { 457 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", 458 power_well->desc->name); 459 460 drm_WARN_ON(&dev_priv->drm, !timeout_expected); 461 462 } 463 } 464 465 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, 466 const struct i915_power_well_regs *regs, 467 int pw_idx) 468 { 469 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 470 u32 ret; 471 472 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; 473 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; 474 if (regs->kvmr.reg) 475 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; 476 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; 477 478 return ret; 479 } 480 481 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, 482 struct i915_power_well *power_well) 483 { 484 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 485 int pw_idx = power_well->desc->hsw.idx; 486 bool disabled; 487 u32 reqs; 488 489 /* 490 * Bspec doesn't require waiting for PWs to get disabled, but still do 491 * this for paranoia. The known cases where a PW will be forced on: 492 * - a KVMR request on any power well via the KVMR request register 493 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 494 * DEBUG request registers 495 * Skip the wait in case any of the request bits are set and print a 496 * diagnostic message. 497 */ 498 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & 499 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 500 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); 501 if (disabled) 502 return; 503 504 drm_dbg_kms(&dev_priv->drm, 505 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 506 power_well->desc->name, 507 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 508 } 509 510 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, 511 enum skl_power_gate pg) 512 { 513 /* Timeout 5us for PG#0, for other PGs 1us */ 514 drm_WARN_ON(&dev_priv->drm, 515 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, 516 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 517 } 518 519 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 520 struct i915_power_well *power_well) 521 { 522 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 523 int pw_idx = power_well->desc->hsw.idx; 524 u32 val; 525 526 if (power_well->desc->hsw.has_fuses) { 527 enum skl_power_gate pg; 528 529 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 530 SKL_PW_CTL_IDX_TO_PG(pw_idx); 531 532 /* Wa_16013190616:adlp */ 533 if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1) 534 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC); 535 536 /* 537 * For PW1 we have to wait both for the PW0/PG0 fuse state 538 * before enabling the power well and PW1/PG1's own fuse 539 * state after the enabling. For all other power wells with 540 * fuses we only have to wait for that PW/PG's fuse state 541 * after the enabling. 542 */ 543 if (pg == SKL_PG1) 544 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 545 } 546 547 val = intel_de_read(dev_priv, regs->driver); 548 intel_de_write(dev_priv, regs->driver, 549 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 550 551 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 552 553 if (power_well->desc->hsw.has_fuses) { 554 enum skl_power_gate pg; 555 556 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 557 SKL_PW_CTL_IDX_TO_PG(pw_idx); 558 gen9_wait_for_power_well_fuses(dev_priv, pg); 559 } 560 561 hsw_power_well_post_enable(dev_priv, 562 power_well->desc->hsw.irq_pipe_mask, 563 power_well->desc->hsw.has_vga); 564 } 565 566 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 567 struct i915_power_well *power_well) 568 { 569 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 570 int pw_idx = power_well->desc->hsw.idx; 571 u32 val; 572 573 hsw_power_well_pre_disable(dev_priv, 574 power_well->desc->hsw.irq_pipe_mask); 575 576 val = intel_de_read(dev_priv, regs->driver); 577 intel_de_write(dev_priv, regs->driver, 578 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 579 hsw_wait_for_power_well_disable(dev_priv, power_well); 580 } 581 582 static void 583 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 584 struct i915_power_well *power_well) 585 { 586 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 587 int pw_idx = power_well->desc->hsw.idx; 588 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 589 u32 val; 590 591 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 592 593 val = intel_de_read(dev_priv, regs->driver); 594 intel_de_write(dev_priv, regs->driver, 595 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 596 597 if (DISPLAY_VER(dev_priv) < 12) { 598 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 599 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 600 val | ICL_LANE_ENABLE_AUX); 601 } 602 603 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 604 605 /* Display WA #1178: icl */ 606 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && 607 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { 608 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); 609 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; 610 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); 611 } 612 } 613 614 static void 615 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 616 struct i915_power_well *power_well) 617 { 618 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 619 int pw_idx = power_well->desc->hsw.idx; 620 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 621 u32 val; 622 623 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 624 625 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 626 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 627 val & ~ICL_LANE_ENABLE_AUX); 628 629 val = intel_de_read(dev_priv, regs->driver); 630 intel_de_write(dev_priv, regs->driver, 631 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 632 633 hsw_wait_for_power_well_disable(dev_priv, power_well); 634 } 635 636 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 637 638 static u64 async_put_domains_mask(struct i915_power_domains *power_domains); 639 640 static int power_well_async_ref_count(struct drm_i915_private *dev_priv, 641 struct i915_power_well *power_well) 642 { 643 int refs = hweight64(power_well->desc->domains & 644 async_put_domains_mask(&dev_priv->power_domains)); 645 646 drm_WARN_ON(&dev_priv->drm, refs > power_well->count); 647 648 return refs; 649 } 650 651 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 652 struct i915_power_well *power_well, 653 struct intel_digital_port *dig_port) 654 { 655 /* Bypass the check if all references are released asynchronously */ 656 if (power_well_async_ref_count(dev_priv, power_well) == 657 power_well->count) 658 return; 659 660 if (drm_WARN_ON(&dev_priv->drm, !dig_port)) 661 return; 662 663 if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) 664 return; 665 666 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); 667 } 668 669 #else 670 671 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 672 struct i915_power_well *power_well, 673 struct intel_digital_port *dig_port) 674 { 675 } 676 677 #endif 678 679 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 680 681 static void icl_tc_cold_exit(struct drm_i915_private *i915) 682 { 683 int ret, tries = 0; 684 685 while (1) { 686 ret = snb_pcode_write_timeout(i915, ICL_PCODE_EXIT_TCCOLD, 0, 687 250, 1); 688 if (ret != -EAGAIN || ++tries == 3) 689 break; 690 msleep(1); 691 } 692 693 /* Spec states that TC cold exit can take up to 1ms to complete */ 694 if (!ret) 695 msleep(1); 696 697 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ 698 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : 699 "succeeded"); 700 } 701 702 static void 703 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 704 struct i915_power_well *power_well) 705 { 706 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 707 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 708 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 709 bool is_tbt = power_well->desc->hsw.is_tc_tbt; 710 bool timeout_expected; 711 u32 val; 712 713 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 714 715 val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); 716 val &= ~DP_AUX_CH_CTL_TBT_IO; 717 if (is_tbt) 718 val |= DP_AUX_CH_CTL_TBT_IO; 719 intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); 720 721 val = intel_de_read(dev_priv, regs->driver); 722 intel_de_write(dev_priv, regs->driver, 723 val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx)); 724 725 /* 726 * An AUX timeout is expected if the TBT DP tunnel is down, 727 * or need to enable AUX on a legacy TypeC port as part of the TC-cold 728 * exit sequence. 729 */ 730 timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); 731 if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) 732 icl_tc_cold_exit(dev_priv); 733 734 hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); 735 736 if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) { 737 enum tc_port tc_port; 738 739 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); 740 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 741 HIP_INDEX_VAL(tc_port, 0x2)); 742 743 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), 744 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 745 drm_warn(&dev_priv->drm, 746 "Timeout waiting TC uC health\n"); 747 } 748 } 749 750 static void 751 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 752 struct i915_power_well *power_well) 753 { 754 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 755 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 756 757 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 758 759 hsw_power_well_disable(dev_priv, power_well); 760 } 761 762 static void 763 icl_aux_power_well_enable(struct drm_i915_private *dev_priv, 764 struct i915_power_well *power_well) 765 { 766 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 767 768 if (intel_phy_is_tc(dev_priv, phy)) 769 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); 770 else if (IS_ICELAKE(dev_priv)) 771 return icl_combo_phy_aux_power_well_enable(dev_priv, 772 power_well); 773 else 774 return hsw_power_well_enable(dev_priv, power_well); 775 } 776 777 static void 778 icl_aux_power_well_disable(struct drm_i915_private *dev_priv, 779 struct i915_power_well *power_well) 780 { 781 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 782 783 if (intel_phy_is_tc(dev_priv, phy)) 784 return icl_tc_phy_aux_power_well_disable(dev_priv, power_well); 785 else if (IS_ICELAKE(dev_priv)) 786 return icl_combo_phy_aux_power_well_disable(dev_priv, 787 power_well); 788 else 789 return hsw_power_well_disable(dev_priv, power_well); 790 } 791 792 /* 793 * We should only use the power well if we explicitly asked the hardware to 794 * enable it, so check if it's enabled and also check if we've requested it to 795 * be enabled. 796 */ 797 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 798 struct i915_power_well *power_well) 799 { 800 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 801 enum i915_power_well_id id = power_well->desc->id; 802 int pw_idx = power_well->desc->hsw.idx; 803 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 804 HSW_PWR_WELL_CTL_STATE(pw_idx); 805 u32 val; 806 807 val = intel_de_read(dev_priv, regs->driver); 808 809 /* 810 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 811 * and the MISC_IO PW will be not restored, so check instead for the 812 * BIOS's own request bits, which are forced-on for these power wells 813 * when exiting DC5/6. 814 */ 815 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) && 816 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 817 val |= intel_de_read(dev_priv, regs->bios); 818 819 return (val & mask) == mask; 820 } 821 822 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 823 { 824 drm_WARN_ONCE(&dev_priv->drm, 825 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), 826 "DC9 already programmed to be enabled.\n"); 827 drm_WARN_ONCE(&dev_priv->drm, 828 intel_de_read(dev_priv, DC_STATE_EN) & 829 DC_STATE_EN_UPTO_DC5, 830 "DC5 still not disabled to enable DC9.\n"); 831 drm_WARN_ONCE(&dev_priv->drm, 832 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & 833 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 834 "Power well 2 on.\n"); 835 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 836 "Interrupts not disabled yet.\n"); 837 838 /* 839 * TODO: check for the following to verify the conditions to enter DC9 840 * state are satisfied: 841 * 1] Check relevant display engine registers to verify if mode set 842 * disable sequence was followed. 843 * 2] Check if display uninitialize sequence is initialized. 844 */ 845 } 846 847 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 848 { 849 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 850 "Interrupts not disabled yet.\n"); 851 drm_WARN_ONCE(&dev_priv->drm, 852 intel_de_read(dev_priv, DC_STATE_EN) & 853 DC_STATE_EN_UPTO_DC5, 854 "DC5 still not disabled.\n"); 855 856 /* 857 * TODO: check for the following to verify DC9 state was indeed 858 * entered before programming to disable it: 859 * 1] Check relevant display engine registers to verify if mode 860 * set disable sequence was followed. 861 * 2] Check if display uninitialize sequence is initialized. 862 */ 863 } 864 865 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 866 u32 state) 867 { 868 int rewrites = 0; 869 int rereads = 0; 870 u32 v; 871 872 intel_de_write(dev_priv, DC_STATE_EN, state); 873 874 /* It has been observed that disabling the dc6 state sometimes 875 * doesn't stick and dmc keeps returning old value. Make sure 876 * the write really sticks enough times and also force rewrite until 877 * we are confident that state is exactly what we want. 878 */ 879 do { 880 v = intel_de_read(dev_priv, DC_STATE_EN); 881 882 if (v != state) { 883 intel_de_write(dev_priv, DC_STATE_EN, state); 884 rewrites++; 885 rereads = 0; 886 } else if (rereads++ > 5) { 887 break; 888 } 889 890 } while (rewrites < 100); 891 892 if (v != state) 893 drm_err(&dev_priv->drm, 894 "Writing dc state to 0x%x failed, now 0x%x\n", 895 state, v); 896 897 /* Most of the times we need one retry, avoid spam */ 898 if (rewrites > 1) 899 drm_dbg_kms(&dev_priv->drm, 900 "Rewrote dc state to 0x%x %d times\n", 901 state, rewrites); 902 } 903 904 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 905 { 906 u32 mask; 907 908 mask = DC_STATE_EN_UPTO_DC5; 909 910 if (DISPLAY_VER(dev_priv) >= 12) 911 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 912 | DC_STATE_EN_DC9; 913 else if (DISPLAY_VER(dev_priv) == 11) 914 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 915 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 916 mask |= DC_STATE_EN_DC9; 917 else 918 mask |= DC_STATE_EN_UPTO_DC6; 919 920 return mask; 921 } 922 923 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 924 { 925 u32 val; 926 927 if (!HAS_DISPLAY(dev_priv)) 928 return; 929 930 val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); 931 932 drm_dbg_kms(&dev_priv->drm, 933 "Resetting DC state tracking from %02x to %02x\n", 934 dev_priv->dmc.dc_state, val); 935 dev_priv->dmc.dc_state = val; 936 } 937 938 /** 939 * gen9_set_dc_state - set target display C power state 940 * @dev_priv: i915 device instance 941 * @state: target DC power state 942 * - DC_STATE_DISABLE 943 * - DC_STATE_EN_UPTO_DC5 944 * - DC_STATE_EN_UPTO_DC6 945 * - DC_STATE_EN_DC9 946 * 947 * Signal to DMC firmware/HW the target DC power state passed in @state. 948 * DMC/HW can turn off individual display clocks and power rails when entering 949 * a deeper DC power state (higher in number) and turns these back when exiting 950 * that state to a shallower power state (lower in number). The HW will decide 951 * when to actually enter a given state on an on-demand basis, for instance 952 * depending on the active state of display pipes. The state of display 953 * registers backed by affected power rails are saved/restored as needed. 954 * 955 * Based on the above enabling a deeper DC power state is asynchronous wrt. 956 * enabling it. Disabling a deeper power state is synchronous: for instance 957 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 958 * back on and register state is restored. This is guaranteed by the MMIO write 959 * to DC_STATE_EN blocking until the state is restored. 960 */ 961 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) 962 { 963 u32 val; 964 u32 mask; 965 966 if (!HAS_DISPLAY(dev_priv)) 967 return; 968 969 if (drm_WARN_ON_ONCE(&dev_priv->drm, 970 state & ~dev_priv->dmc.allowed_dc_mask)) 971 state &= dev_priv->dmc.allowed_dc_mask; 972 973 val = intel_de_read(dev_priv, DC_STATE_EN); 974 mask = gen9_dc_mask(dev_priv); 975 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", 976 val & mask, state); 977 978 /* Check if DMC is ignoring our DC state requests */ 979 if ((val & mask) != dev_priv->dmc.dc_state) 980 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", 981 dev_priv->dmc.dc_state, val & mask); 982 983 val &= ~mask; 984 val |= state; 985 986 gen9_write_dc_state(dev_priv, val); 987 988 dev_priv->dmc.dc_state = val & mask; 989 } 990 991 static u32 992 sanitize_target_dc_state(struct drm_i915_private *dev_priv, 993 u32 target_dc_state) 994 { 995 static const u32 states[] = { 996 DC_STATE_EN_UPTO_DC6, 997 DC_STATE_EN_UPTO_DC5, 998 DC_STATE_EN_DC3CO, 999 DC_STATE_DISABLE, 1000 }; 1001 int i; 1002 1003 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 1004 if (target_dc_state != states[i]) 1005 continue; 1006 1007 if (dev_priv->dmc.allowed_dc_mask & target_dc_state) 1008 break; 1009 1010 target_dc_state = states[i + 1]; 1011 } 1012 1013 return target_dc_state; 1014 } 1015 1016 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) 1017 { 1018 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); 1019 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); 1020 } 1021 1022 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) 1023 { 1024 u32 val; 1025 1026 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); 1027 val = intel_de_read(dev_priv, DC_STATE_EN); 1028 val &= ~DC_STATE_DC3CO_STATUS; 1029 intel_de_write(dev_priv, DC_STATE_EN, val); 1030 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1031 /* 1032 * Delay of 200us DC3CO Exit time B.Spec 49196 1033 */ 1034 usleep_range(200, 210); 1035 } 1036 1037 static void bxt_enable_dc9(struct drm_i915_private *dev_priv) 1038 { 1039 assert_can_enable_dc9(dev_priv); 1040 1041 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); 1042 /* 1043 * Power sequencer reset is not needed on 1044 * platforms with South Display Engine on PCH, 1045 * because PPS registers are always on. 1046 */ 1047 if (!HAS_PCH_SPLIT(dev_priv)) 1048 intel_pps_reset_all(dev_priv); 1049 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 1050 } 1051 1052 static void bxt_disable_dc9(struct drm_i915_private *dev_priv) 1053 { 1054 assert_can_disable_dc9(dev_priv); 1055 1056 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); 1057 1058 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1059 1060 intel_pps_unlock_regs_wa(dev_priv); 1061 } 1062 1063 static void assert_dmc_loaded(struct drm_i915_private *dev_priv) 1064 { 1065 drm_WARN_ONCE(&dev_priv->drm, 1066 !intel_de_read(dev_priv, 1067 DMC_PROGRAM(dev_priv->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), 1068 "DMC program storage start is NULL\n"); 1069 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE), 1070 "DMC SSP Base Not fine\n"); 1071 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL), 1072 "DMC HTP Not fine\n"); 1073 } 1074 1075 static struct i915_power_well * 1076 lookup_power_well(struct drm_i915_private *dev_priv, 1077 enum i915_power_well_id power_well_id) 1078 { 1079 struct i915_power_well *power_well; 1080 1081 for_each_power_well(dev_priv, power_well) 1082 if (power_well->desc->id == power_well_id) 1083 return power_well; 1084 1085 /* 1086 * It's not feasible to add error checking code to the callers since 1087 * this condition really shouldn't happen and it doesn't even make sense 1088 * to abort things like display initialization sequences. Just return 1089 * the first power well and hope the WARN gets reported so we can fix 1090 * our driver. 1091 */ 1092 drm_WARN(&dev_priv->drm, 1, 1093 "Power well %d not defined for this platform\n", 1094 power_well_id); 1095 return &dev_priv->power_domains.power_wells[0]; 1096 } 1097 1098 /** 1099 * intel_display_power_set_target_dc_state - Set target dc state. 1100 * @dev_priv: i915 device 1101 * @state: state which needs to be set as target_dc_state. 1102 * 1103 * This function set the "DC off" power well target_dc_state, 1104 * based upon this target_dc_stste, "DC off" power well will 1105 * enable desired DC state. 1106 */ 1107 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, 1108 u32 state) 1109 { 1110 struct i915_power_well *power_well; 1111 bool dc_off_enabled; 1112 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1113 1114 mutex_lock(&power_domains->lock); 1115 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); 1116 1117 if (drm_WARN_ON(&dev_priv->drm, !power_well)) 1118 goto unlock; 1119 1120 state = sanitize_target_dc_state(dev_priv, state); 1121 1122 if (state == dev_priv->dmc.target_dc_state) 1123 goto unlock; 1124 1125 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv, 1126 power_well); 1127 /* 1128 * If DC off power well is disabled, need to enable and disable the 1129 * DC off power well to effect target DC state. 1130 */ 1131 if (!dc_off_enabled) 1132 power_well->desc->ops->enable(dev_priv, power_well); 1133 1134 dev_priv->dmc.target_dc_state = state; 1135 1136 if (!dc_off_enabled) 1137 power_well->desc->ops->disable(dev_priv, power_well); 1138 1139 unlock: 1140 mutex_unlock(&power_domains->lock); 1141 } 1142 1143 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 1144 { 1145 enum i915_power_well_id high_pg; 1146 1147 /* Power wells at this level and above must be disabled for DC5 entry */ 1148 if (DISPLAY_VER(dev_priv) == 12) 1149 high_pg = ICL_DISP_PW_3; 1150 else 1151 high_pg = SKL_DISP_PW_2; 1152 1153 drm_WARN_ONCE(&dev_priv->drm, 1154 intel_display_power_well_is_enabled(dev_priv, high_pg), 1155 "Power wells above platform's DC5 limit still enabled.\n"); 1156 1157 drm_WARN_ONCE(&dev_priv->drm, 1158 (intel_de_read(dev_priv, DC_STATE_EN) & 1159 DC_STATE_EN_UPTO_DC5), 1160 "DC5 already programmed to be enabled.\n"); 1161 assert_rpm_wakelock_held(&dev_priv->runtime_pm); 1162 1163 assert_dmc_loaded(dev_priv); 1164 } 1165 1166 static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 1167 { 1168 assert_can_enable_dc5(dev_priv); 1169 1170 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); 1171 1172 /* Wa Display #1183: skl,kbl,cfl */ 1173 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 1174 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1175 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1176 1177 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 1178 } 1179 1180 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 1181 { 1182 drm_WARN_ONCE(&dev_priv->drm, 1183 intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 1184 "Backlight is not disabled.\n"); 1185 drm_WARN_ONCE(&dev_priv->drm, 1186 (intel_de_read(dev_priv, DC_STATE_EN) & 1187 DC_STATE_EN_UPTO_DC6), 1188 "DC6 already programmed to be enabled.\n"); 1189 1190 assert_dmc_loaded(dev_priv); 1191 } 1192 1193 static void skl_enable_dc6(struct drm_i915_private *dev_priv) 1194 { 1195 assert_can_enable_dc6(dev_priv); 1196 1197 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); 1198 1199 /* Wa Display #1183: skl,kbl,cfl */ 1200 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 1201 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1202 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1203 1204 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 1205 } 1206 1207 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 1208 struct i915_power_well *power_well) 1209 { 1210 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 1211 int pw_idx = power_well->desc->hsw.idx; 1212 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 1213 u32 bios_req = intel_de_read(dev_priv, regs->bios); 1214 1215 /* Take over the request bit if set by BIOS. */ 1216 if (bios_req & mask) { 1217 u32 drv_req = intel_de_read(dev_priv, regs->driver); 1218 1219 if (!(drv_req & mask)) 1220 intel_de_write(dev_priv, regs->driver, drv_req | mask); 1221 intel_de_write(dev_priv, regs->bios, bios_req & ~mask); 1222 } 1223 } 1224 1225 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1226 struct i915_power_well *power_well) 1227 { 1228 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); 1229 } 1230 1231 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1232 struct i915_power_well *power_well) 1233 { 1234 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); 1235 } 1236 1237 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 1238 struct i915_power_well *power_well) 1239 { 1240 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); 1241 } 1242 1243 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 1244 { 1245 struct i915_power_well *power_well; 1246 1247 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); 1248 if (power_well->count > 0) 1249 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1250 1251 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1252 if (power_well->count > 0) 1253 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1254 1255 if (IS_GEMINILAKE(dev_priv)) { 1256 power_well = lookup_power_well(dev_priv, 1257 GLK_DISP_PW_DPIO_CMN_C); 1258 if (power_well->count > 0) 1259 bxt_ddi_phy_verify_state(dev_priv, 1260 power_well->desc->bxt.phy); 1261 } 1262 } 1263 1264 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 1265 struct i915_power_well *power_well) 1266 { 1267 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && 1268 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); 1269 } 1270 1271 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 1272 { 1273 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); 1274 u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices; 1275 1276 drm_WARN(&dev_priv->drm, 1277 hw_enabled_dbuf_slices != enabled_dbuf_slices, 1278 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", 1279 hw_enabled_dbuf_slices, 1280 enabled_dbuf_slices); 1281 } 1282 1283 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) 1284 { 1285 struct intel_cdclk_config cdclk_config = {}; 1286 1287 if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) { 1288 tgl_disable_dc3co(dev_priv); 1289 return; 1290 } 1291 1292 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1293 1294 if (!HAS_DISPLAY(dev_priv)) 1295 return; 1296 1297 intel_cdclk_get_cdclk(dev_priv, &cdclk_config); 1298 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 1299 drm_WARN_ON(&dev_priv->drm, 1300 intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, 1301 &cdclk_config)); 1302 1303 gen9_assert_dbuf_enabled(dev_priv); 1304 1305 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 1306 bxt_verify_ddi_phy_power_wells(dev_priv); 1307 1308 if (DISPLAY_VER(dev_priv) >= 11) 1309 /* 1310 * DMC retains HW context only for port A, the other combo 1311 * PHY's HW context for port B is lost after DC transitions, 1312 * so we need to restore it manually. 1313 */ 1314 intel_combo_phy_init(dev_priv); 1315 } 1316 1317 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 1318 struct i915_power_well *power_well) 1319 { 1320 gen9_disable_dc_states(dev_priv); 1321 } 1322 1323 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 1324 struct i915_power_well *power_well) 1325 { 1326 if (!intel_dmc_has_payload(dev_priv)) 1327 return; 1328 1329 switch (dev_priv->dmc.target_dc_state) { 1330 case DC_STATE_EN_DC3CO: 1331 tgl_enable_dc3co(dev_priv); 1332 break; 1333 case DC_STATE_EN_UPTO_DC6: 1334 skl_enable_dc6(dev_priv); 1335 break; 1336 case DC_STATE_EN_UPTO_DC5: 1337 gen9_enable_dc5(dev_priv); 1338 break; 1339 } 1340 } 1341 1342 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, 1343 struct i915_power_well *power_well) 1344 { 1345 } 1346 1347 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 1348 struct i915_power_well *power_well) 1349 { 1350 } 1351 1352 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 1353 struct i915_power_well *power_well) 1354 { 1355 return true; 1356 } 1357 1358 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 1359 struct i915_power_well *power_well) 1360 { 1361 if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) 1362 i830_enable_pipe(dev_priv, PIPE_A); 1363 if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) 1364 i830_enable_pipe(dev_priv, PIPE_B); 1365 } 1366 1367 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, 1368 struct i915_power_well *power_well) 1369 { 1370 i830_disable_pipe(dev_priv, PIPE_B); 1371 i830_disable_pipe(dev_priv, PIPE_A); 1372 } 1373 1374 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 1375 struct i915_power_well *power_well) 1376 { 1377 return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && 1378 intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; 1379 } 1380 1381 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, 1382 struct i915_power_well *power_well) 1383 { 1384 if (power_well->count > 0) 1385 i830_pipes_power_well_enable(dev_priv, power_well); 1386 else 1387 i830_pipes_power_well_disable(dev_priv, power_well); 1388 } 1389 1390 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 1391 struct i915_power_well *power_well, bool enable) 1392 { 1393 int pw_idx = power_well->desc->vlv.idx; 1394 u32 mask; 1395 u32 state; 1396 u32 ctrl; 1397 1398 mask = PUNIT_PWRGT_MASK(pw_idx); 1399 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1400 PUNIT_PWRGT_PWR_GATE(pw_idx); 1401 1402 vlv_punit_get(dev_priv); 1403 1404 #define COND \ 1405 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1406 1407 if (COND) 1408 goto out; 1409 1410 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 1411 ctrl &= ~mask; 1412 ctrl |= state; 1413 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1414 1415 if (wait_for(COND, 100)) 1416 drm_err(&dev_priv->drm, 1417 "timeout setting power well state %08x (%08x)\n", 1418 state, 1419 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1420 1421 #undef COND 1422 1423 out: 1424 vlv_punit_put(dev_priv); 1425 } 1426 1427 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1428 struct i915_power_well *power_well) 1429 { 1430 vlv_set_power_well(dev_priv, power_well, true); 1431 } 1432 1433 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1434 struct i915_power_well *power_well) 1435 { 1436 vlv_set_power_well(dev_priv, power_well, false); 1437 } 1438 1439 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1440 struct i915_power_well *power_well) 1441 { 1442 int pw_idx = power_well->desc->vlv.idx; 1443 bool enabled = false; 1444 u32 mask; 1445 u32 state; 1446 u32 ctrl; 1447 1448 mask = PUNIT_PWRGT_MASK(pw_idx); 1449 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1450 1451 vlv_punit_get(dev_priv); 1452 1453 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1454 /* 1455 * We only ever set the power-on and power-gate states, anything 1456 * else is unexpected. 1457 */ 1458 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1459 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1460 if (state == ctrl) 1461 enabled = true; 1462 1463 /* 1464 * A transient state at this point would mean some unexpected party 1465 * is poking at the power controls too. 1466 */ 1467 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1468 drm_WARN_ON(&dev_priv->drm, ctrl != state); 1469 1470 vlv_punit_put(dev_priv); 1471 1472 return enabled; 1473 } 1474 1475 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1476 { 1477 u32 val; 1478 1479 /* 1480 * On driver load, a pipe may be active and driving a DSI display. 1481 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1482 * (and never recovering) in this case. intel_dsi_post_disable() will 1483 * clear it when we turn off the display. 1484 */ 1485 val = intel_de_read(dev_priv, DSPCLK_GATE_D); 1486 val &= DPOUNIT_CLOCK_GATE_DISABLE; 1487 val |= VRHUNIT_CLOCK_GATE_DISABLE; 1488 intel_de_write(dev_priv, DSPCLK_GATE_D, val); 1489 1490 /* 1491 * Disable trickle feed and enable pnd deadline calculation 1492 */ 1493 intel_de_write(dev_priv, MI_ARB_VLV, 1494 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1495 intel_de_write(dev_priv, CBR1_VLV, 0); 1496 1497 drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); 1498 intel_de_write(dev_priv, RAWCLK_FREQ_VLV, 1499 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 1500 1000)); 1501 } 1502 1503 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1504 { 1505 struct intel_encoder *encoder; 1506 enum pipe pipe; 1507 1508 /* 1509 * Enable the CRI clock source so we can get at the 1510 * display and the reference clock for VGA 1511 * hotplug / manual detection. Supposedly DSI also 1512 * needs the ref clock up and running. 1513 * 1514 * CHV DPLL B/C have some issues if VGA mode is enabled. 1515 */ 1516 for_each_pipe(dev_priv, pipe) { 1517 u32 val = intel_de_read(dev_priv, DPLL(pipe)); 1518 1519 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1520 if (pipe != PIPE_A) 1521 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1522 1523 intel_de_write(dev_priv, DPLL(pipe), val); 1524 } 1525 1526 vlv_init_display_clock_gating(dev_priv); 1527 1528 spin_lock_irq(&dev_priv->irq_lock); 1529 valleyview_enable_display_irqs(dev_priv); 1530 spin_unlock_irq(&dev_priv->irq_lock); 1531 1532 /* 1533 * During driver initialization/resume we can avoid restoring the 1534 * part of the HW/SW state that will be inited anyway explicitly. 1535 */ 1536 if (dev_priv->power_domains.initializing) 1537 return; 1538 1539 intel_hpd_init(dev_priv); 1540 intel_hpd_poll_disable(dev_priv); 1541 1542 /* Re-enable the ADPA, if we have one */ 1543 for_each_intel_encoder(&dev_priv->drm, encoder) { 1544 if (encoder->type == INTEL_OUTPUT_ANALOG) 1545 intel_crt_reset(&encoder->base); 1546 } 1547 1548 intel_vga_redisable_power_on(dev_priv); 1549 1550 intel_pps_unlock_regs_wa(dev_priv); 1551 } 1552 1553 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1554 { 1555 spin_lock_irq(&dev_priv->irq_lock); 1556 valleyview_disable_display_irqs(dev_priv); 1557 spin_unlock_irq(&dev_priv->irq_lock); 1558 1559 /* make sure we're done processing display irqs */ 1560 intel_synchronize_irq(dev_priv); 1561 1562 intel_pps_reset_all(dev_priv); 1563 1564 /* Prevent us from re-enabling polling on accident in late suspend */ 1565 if (!dev_priv->drm.dev->power.is_suspended) 1566 intel_hpd_poll_enable(dev_priv); 1567 } 1568 1569 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1570 struct i915_power_well *power_well) 1571 { 1572 vlv_set_power_well(dev_priv, power_well, true); 1573 1574 vlv_display_power_well_init(dev_priv); 1575 } 1576 1577 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1578 struct i915_power_well *power_well) 1579 { 1580 vlv_display_power_well_deinit(dev_priv); 1581 1582 vlv_set_power_well(dev_priv, power_well, false); 1583 } 1584 1585 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1586 struct i915_power_well *power_well) 1587 { 1588 /* since ref/cri clock was enabled */ 1589 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1590 1591 vlv_set_power_well(dev_priv, power_well, true); 1592 1593 /* 1594 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1595 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1596 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1597 * b. The other bits such as sfr settings / modesel may all 1598 * be set to 0. 1599 * 1600 * This should only be done on init and resume from S3 with 1601 * both PLLs disabled, or we risk losing DPIO and PLL 1602 * synchronization. 1603 */ 1604 intel_de_write(dev_priv, DPIO_CTL, 1605 intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); 1606 } 1607 1608 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1609 struct i915_power_well *power_well) 1610 { 1611 enum pipe pipe; 1612 1613 for_each_pipe(dev_priv, pipe) 1614 assert_pll_disabled(dev_priv, pipe); 1615 1616 /* Assert common reset */ 1617 intel_de_write(dev_priv, DPIO_CTL, 1618 intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); 1619 1620 vlv_set_power_well(dev_priv, power_well, false); 1621 } 1622 1623 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 1624 1625 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1626 1627 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1628 { 1629 struct i915_power_well *cmn_bc = 1630 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1631 struct i915_power_well *cmn_d = 1632 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1633 u32 phy_control = dev_priv->chv_phy_control; 1634 u32 phy_status = 0; 1635 u32 phy_status_mask = 0xffffffff; 1636 1637 /* 1638 * The BIOS can leave the PHY is some weird state 1639 * where it doesn't fully power down some parts. 1640 * Disable the asserts until the PHY has been fully 1641 * reset (ie. the power well has been disabled at 1642 * least once). 1643 */ 1644 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1645 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1646 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1647 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1648 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1649 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1650 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1651 1652 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1653 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1654 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1655 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1656 1657 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 1658 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1659 1660 /* this assumes override is only used to enable lanes */ 1661 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1662 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1663 1664 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1665 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1666 1667 /* CL1 is on whenever anything is on in either channel */ 1668 if (BITS_SET(phy_control, 1669 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1670 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1671 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1672 1673 /* 1674 * The DPLLB check accounts for the pipe B + port A usage 1675 * with CL2 powered up but all the lanes in the second channel 1676 * powered down. 1677 */ 1678 if (BITS_SET(phy_control, 1679 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1680 (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1681 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1682 1683 if (BITS_SET(phy_control, 1684 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1685 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1686 if (BITS_SET(phy_control, 1687 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1688 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1689 1690 if (BITS_SET(phy_control, 1691 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1692 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1693 if (BITS_SET(phy_control, 1694 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1695 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1696 } 1697 1698 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 1699 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1700 1701 /* this assumes override is only used to enable lanes */ 1702 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1703 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1704 1705 if (BITS_SET(phy_control, 1706 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1707 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1708 1709 if (BITS_SET(phy_control, 1710 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1711 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1712 if (BITS_SET(phy_control, 1713 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1714 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1715 } 1716 1717 phy_status &= phy_status_mask; 1718 1719 /* 1720 * The PHY may be busy with some initial calibration and whatnot, 1721 * so the power state can take a while to actually change. 1722 */ 1723 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, 1724 phy_status_mask, phy_status, 10)) 1725 drm_err(&dev_priv->drm, 1726 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1727 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, 1728 phy_status, dev_priv->chv_phy_control); 1729 } 1730 1731 #undef BITS_SET 1732 1733 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1734 struct i915_power_well *power_well) 1735 { 1736 enum dpio_phy phy; 1737 enum pipe pipe; 1738 u32 tmp; 1739 1740 drm_WARN_ON_ONCE(&dev_priv->drm, 1741 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1742 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1743 1744 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1745 pipe = PIPE_A; 1746 phy = DPIO_PHY0; 1747 } else { 1748 pipe = PIPE_C; 1749 phy = DPIO_PHY1; 1750 } 1751 1752 /* since ref/cri clock was enabled */ 1753 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1754 vlv_set_power_well(dev_priv, power_well, true); 1755 1756 /* Poll for phypwrgood signal */ 1757 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, 1758 PHY_POWERGOOD(phy), 1)) 1759 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", 1760 phy); 1761 1762 vlv_dpio_get(dev_priv); 1763 1764 /* Enable dynamic power down */ 1765 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1766 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1767 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1768 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1769 1770 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1771 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1772 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1773 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1774 } else { 1775 /* 1776 * Force the non-existing CL2 off. BXT does this 1777 * too, so maybe it saves some power even though 1778 * CL2 doesn't exist? 1779 */ 1780 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1781 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1782 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1783 } 1784 1785 vlv_dpio_put(dev_priv); 1786 1787 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1788 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1789 dev_priv->chv_phy_control); 1790 1791 drm_dbg_kms(&dev_priv->drm, 1792 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1793 phy, dev_priv->chv_phy_control); 1794 1795 assert_chv_phy_status(dev_priv); 1796 } 1797 1798 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1799 struct i915_power_well *power_well) 1800 { 1801 enum dpio_phy phy; 1802 1803 drm_WARN_ON_ONCE(&dev_priv->drm, 1804 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1805 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1806 1807 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1808 phy = DPIO_PHY0; 1809 assert_pll_disabled(dev_priv, PIPE_A); 1810 assert_pll_disabled(dev_priv, PIPE_B); 1811 } else { 1812 phy = DPIO_PHY1; 1813 assert_pll_disabled(dev_priv, PIPE_C); 1814 } 1815 1816 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1817 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1818 dev_priv->chv_phy_control); 1819 1820 vlv_set_power_well(dev_priv, power_well, false); 1821 1822 drm_dbg_kms(&dev_priv->drm, 1823 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1824 phy, dev_priv->chv_phy_control); 1825 1826 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1827 dev_priv->chv_phy_assert[phy] = true; 1828 1829 assert_chv_phy_status(dev_priv); 1830 } 1831 1832 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1833 enum dpio_channel ch, bool override, unsigned int mask) 1834 { 1835 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1836 u32 reg, val, expected, actual; 1837 1838 /* 1839 * The BIOS can leave the PHY is some weird state 1840 * where it doesn't fully power down some parts. 1841 * Disable the asserts until the PHY has been fully 1842 * reset (ie. the power well has been disabled at 1843 * least once). 1844 */ 1845 if (!dev_priv->chv_phy_assert[phy]) 1846 return; 1847 1848 if (ch == DPIO_CH0) 1849 reg = _CHV_CMN_DW0_CH0; 1850 else 1851 reg = _CHV_CMN_DW6_CH1; 1852 1853 vlv_dpio_get(dev_priv); 1854 val = vlv_dpio_read(dev_priv, pipe, reg); 1855 vlv_dpio_put(dev_priv); 1856 1857 /* 1858 * This assumes !override is only used when the port is disabled. 1859 * All lanes should power down even without the override when 1860 * the port is disabled. 1861 */ 1862 if (!override || mask == 0xf) { 1863 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1864 /* 1865 * If CH1 common lane is not active anymore 1866 * (eg. for pipe B DPLL) the entire channel will 1867 * shut down, which causes the common lane registers 1868 * to read as 0. That means we can't actually check 1869 * the lane power down status bits, but as the entire 1870 * register reads as 0 it's a good indication that the 1871 * channel is indeed entirely powered down. 1872 */ 1873 if (ch == DPIO_CH1 && val == 0) 1874 expected = 0; 1875 } else if (mask != 0x0) { 1876 expected = DPIO_ANYDL_POWERDOWN; 1877 } else { 1878 expected = 0; 1879 } 1880 1881 if (ch == DPIO_CH0) 1882 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1883 else 1884 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1885 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1886 1887 drm_WARN(&dev_priv->drm, actual != expected, 1888 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1889 !!(actual & DPIO_ALLDL_POWERDOWN), 1890 !!(actual & DPIO_ANYDL_POWERDOWN), 1891 !!(expected & DPIO_ALLDL_POWERDOWN), 1892 !!(expected & DPIO_ANYDL_POWERDOWN), 1893 reg, val); 1894 } 1895 1896 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1897 enum dpio_channel ch, bool override) 1898 { 1899 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1900 bool was_override; 1901 1902 mutex_lock(&power_domains->lock); 1903 1904 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1905 1906 if (override == was_override) 1907 goto out; 1908 1909 if (override) 1910 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1911 else 1912 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1913 1914 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1915 dev_priv->chv_phy_control); 1916 1917 drm_dbg_kms(&dev_priv->drm, 1918 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1919 phy, ch, dev_priv->chv_phy_control); 1920 1921 assert_chv_phy_status(dev_priv); 1922 1923 out: 1924 mutex_unlock(&power_domains->lock); 1925 1926 return was_override; 1927 } 1928 1929 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1930 bool override, unsigned int mask) 1931 { 1932 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1933 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1934 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); 1935 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); 1936 1937 mutex_lock(&power_domains->lock); 1938 1939 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1940 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1941 1942 if (override) 1943 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1944 else 1945 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1946 1947 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1948 dev_priv->chv_phy_control); 1949 1950 drm_dbg_kms(&dev_priv->drm, 1951 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1952 phy, ch, mask, dev_priv->chv_phy_control); 1953 1954 assert_chv_phy_status(dev_priv); 1955 1956 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1957 1958 mutex_unlock(&power_domains->lock); 1959 } 1960 1961 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1962 struct i915_power_well *power_well) 1963 { 1964 enum pipe pipe = PIPE_A; 1965 bool enabled; 1966 u32 state, ctrl; 1967 1968 vlv_punit_get(dev_priv); 1969 1970 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1971 /* 1972 * We only ever set the power-on and power-gate states, anything 1973 * else is unexpected. 1974 */ 1975 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && 1976 state != DP_SSS_PWR_GATE(pipe)); 1977 enabled = state == DP_SSS_PWR_ON(pipe); 1978 1979 /* 1980 * A transient state at this point would mean some unexpected party 1981 * is poking at the power controls too. 1982 */ 1983 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1984 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); 1985 1986 vlv_punit_put(dev_priv); 1987 1988 return enabled; 1989 } 1990 1991 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1992 struct i915_power_well *power_well, 1993 bool enable) 1994 { 1995 enum pipe pipe = PIPE_A; 1996 u32 state; 1997 u32 ctrl; 1998 1999 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 2000 2001 vlv_punit_get(dev_priv); 2002 2003 #define COND \ 2004 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 2005 2006 if (COND) 2007 goto out; 2008 2009 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 2010 ctrl &= ~DP_SSC_MASK(pipe); 2011 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 2012 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); 2013 2014 if (wait_for(COND, 100)) 2015 drm_err(&dev_priv->drm, 2016 "timeout setting power well state %08x (%08x)\n", 2017 state, 2018 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); 2019 2020 #undef COND 2021 2022 out: 2023 vlv_punit_put(dev_priv); 2024 } 2025 2026 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 2027 struct i915_power_well *power_well) 2028 { 2029 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 2030 dev_priv->chv_phy_control); 2031 } 2032 2033 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 2034 struct i915_power_well *power_well) 2035 { 2036 chv_set_pipe_power_well(dev_priv, power_well, true); 2037 2038 vlv_display_power_well_init(dev_priv); 2039 } 2040 2041 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 2042 struct i915_power_well *power_well) 2043 { 2044 vlv_display_power_well_deinit(dev_priv); 2045 2046 chv_set_pipe_power_well(dev_priv, power_well, false); 2047 } 2048 2049 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) 2050 { 2051 return power_domains->async_put_domains[0] | 2052 power_domains->async_put_domains[1]; 2053 } 2054 2055 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2056 2057 static bool 2058 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 2059 { 2060 struct drm_i915_private *i915 = container_of(power_domains, 2061 struct drm_i915_private, 2062 power_domains); 2063 return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] & 2064 power_domains->async_put_domains[1]); 2065 } 2066 2067 static bool 2068 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 2069 { 2070 struct drm_i915_private *i915 = container_of(power_domains, 2071 struct drm_i915_private, 2072 power_domains); 2073 enum intel_display_power_domain domain; 2074 bool err = false; 2075 2076 err |= !assert_async_put_domain_masks_disjoint(power_domains); 2077 err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref != 2078 !!__async_put_domains_mask(power_domains)); 2079 2080 for_each_power_domain(domain, __async_put_domains_mask(power_domains)) 2081 err |= drm_WARN_ON(&i915->drm, 2082 power_domains->domain_use_count[domain] != 1); 2083 2084 return !err; 2085 } 2086 2087 static void print_power_domains(struct i915_power_domains *power_domains, 2088 const char *prefix, u64 mask) 2089 { 2090 struct drm_i915_private *i915 = container_of(power_domains, 2091 struct drm_i915_private, 2092 power_domains); 2093 enum intel_display_power_domain domain; 2094 2095 drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask)); 2096 for_each_power_domain(domain, mask) 2097 drm_dbg(&i915->drm, "%s use_count %d\n", 2098 intel_display_power_domain_str(domain), 2099 power_domains->domain_use_count[domain]); 2100 } 2101 2102 static void 2103 print_async_put_domains_state(struct i915_power_domains *power_domains) 2104 { 2105 struct drm_i915_private *i915 = container_of(power_domains, 2106 struct drm_i915_private, 2107 power_domains); 2108 2109 drm_dbg(&i915->drm, "async_put_wakeref %u\n", 2110 power_domains->async_put_wakeref); 2111 2112 print_power_domains(power_domains, "async_put_domains[0]", 2113 power_domains->async_put_domains[0]); 2114 print_power_domains(power_domains, "async_put_domains[1]", 2115 power_domains->async_put_domains[1]); 2116 } 2117 2118 static void 2119 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2120 { 2121 if (!__async_put_domains_state_ok(power_domains)) 2122 print_async_put_domains_state(power_domains); 2123 } 2124 2125 #else 2126 2127 static void 2128 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 2129 { 2130 } 2131 2132 static void 2133 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2134 { 2135 } 2136 2137 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 2138 2139 static u64 async_put_domains_mask(struct i915_power_domains *power_domains) 2140 { 2141 assert_async_put_domain_masks_disjoint(power_domains); 2142 2143 return __async_put_domains_mask(power_domains); 2144 } 2145 2146 static void 2147 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 2148 enum intel_display_power_domain domain) 2149 { 2150 assert_async_put_domain_masks_disjoint(power_domains); 2151 2152 power_domains->async_put_domains[0] &= ~BIT_ULL(domain); 2153 power_domains->async_put_domains[1] &= ~BIT_ULL(domain); 2154 } 2155 2156 static bool 2157 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 2158 enum intel_display_power_domain domain) 2159 { 2160 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2161 bool ret = false; 2162 2163 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) 2164 goto out_verify; 2165 2166 async_put_domains_clear_domain(power_domains, domain); 2167 2168 ret = true; 2169 2170 if (async_put_domains_mask(power_domains)) 2171 goto out_verify; 2172 2173 cancel_delayed_work(&power_domains->async_put_work); 2174 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 2175 fetch_and_zero(&power_domains->async_put_wakeref)); 2176 out_verify: 2177 verify_async_put_domains_state(power_domains); 2178 2179 return ret; 2180 } 2181 2182 static void 2183 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 2184 enum intel_display_power_domain domain) 2185 { 2186 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2187 struct i915_power_well *power_well; 2188 2189 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 2190 return; 2191 2192 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) 2193 intel_power_well_get(dev_priv, power_well); 2194 2195 power_domains->domain_use_count[domain]++; 2196 } 2197 2198 /** 2199 * intel_display_power_get - grab a power domain reference 2200 * @dev_priv: i915 device instance 2201 * @domain: power domain to reference 2202 * 2203 * This function grabs a power domain reference for @domain and ensures that the 2204 * power domain and all its parents are powered up. Therefore users should only 2205 * grab a reference to the innermost power domain they need. 2206 * 2207 * Any power domain reference obtained by this function must have a symmetric 2208 * call to intel_display_power_put() to release the reference again. 2209 */ 2210 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 2211 enum intel_display_power_domain domain) 2212 { 2213 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2214 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2215 2216 mutex_lock(&power_domains->lock); 2217 __intel_display_power_get_domain(dev_priv, domain); 2218 mutex_unlock(&power_domains->lock); 2219 2220 return wakeref; 2221 } 2222 2223 /** 2224 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 2225 * @dev_priv: i915 device instance 2226 * @domain: power domain to reference 2227 * 2228 * This function grabs a power domain reference for @domain and ensures that the 2229 * power domain and all its parents are powered up. Therefore users should only 2230 * grab a reference to the innermost power domain they need. 2231 * 2232 * Any power domain reference obtained by this function must have a symmetric 2233 * call to intel_display_power_put() to release the reference again. 2234 */ 2235 intel_wakeref_t 2236 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 2237 enum intel_display_power_domain domain) 2238 { 2239 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2240 intel_wakeref_t wakeref; 2241 bool is_enabled; 2242 2243 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 2244 if (!wakeref) 2245 return false; 2246 2247 mutex_lock(&power_domains->lock); 2248 2249 if (__intel_display_power_is_enabled(dev_priv, domain)) { 2250 __intel_display_power_get_domain(dev_priv, domain); 2251 is_enabled = true; 2252 } else { 2253 is_enabled = false; 2254 } 2255 2256 mutex_unlock(&power_domains->lock); 2257 2258 if (!is_enabled) { 2259 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2260 wakeref = 0; 2261 } 2262 2263 return wakeref; 2264 } 2265 2266 static void 2267 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 2268 enum intel_display_power_domain domain) 2269 { 2270 struct i915_power_domains *power_domains; 2271 struct i915_power_well *power_well; 2272 const char *name = intel_display_power_domain_str(domain); 2273 2274 power_domains = &dev_priv->power_domains; 2275 2276 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], 2277 "Use count on domain %s is already zero\n", 2278 name); 2279 drm_WARN(&dev_priv->drm, 2280 async_put_domains_mask(power_domains) & BIT_ULL(domain), 2281 "Async disabling of domain %s is pending\n", 2282 name); 2283 2284 power_domains->domain_use_count[domain]--; 2285 2286 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) 2287 intel_power_well_put(dev_priv, power_well); 2288 } 2289 2290 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 2291 enum intel_display_power_domain domain) 2292 { 2293 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2294 2295 mutex_lock(&power_domains->lock); 2296 __intel_display_power_put_domain(dev_priv, domain); 2297 mutex_unlock(&power_domains->lock); 2298 } 2299 2300 static void 2301 queue_async_put_domains_work(struct i915_power_domains *power_domains, 2302 intel_wakeref_t wakeref) 2303 { 2304 struct drm_i915_private *i915 = container_of(power_domains, 2305 struct drm_i915_private, 2306 power_domains); 2307 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2308 power_domains->async_put_wakeref = wakeref; 2309 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, 2310 &power_domains->async_put_work, 2311 msecs_to_jiffies(100))); 2312 } 2313 2314 static void 2315 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) 2316 { 2317 struct drm_i915_private *dev_priv = 2318 container_of(power_domains, struct drm_i915_private, 2319 power_domains); 2320 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2321 enum intel_display_power_domain domain; 2322 intel_wakeref_t wakeref; 2323 2324 /* 2325 * The caller must hold already raw wakeref, upgrade that to a proper 2326 * wakeref to make the state checker happy about the HW access during 2327 * power well disabling. 2328 */ 2329 assert_rpm_raw_wakeref_held(rpm); 2330 wakeref = intel_runtime_pm_get(rpm); 2331 2332 for_each_power_domain(domain, mask) { 2333 /* Clear before put, so put's sanity check is happy. */ 2334 async_put_domains_clear_domain(power_domains, domain); 2335 __intel_display_power_put_domain(dev_priv, domain); 2336 } 2337 2338 intel_runtime_pm_put(rpm, wakeref); 2339 } 2340 2341 static void 2342 intel_display_power_put_async_work(struct work_struct *work) 2343 { 2344 struct drm_i915_private *dev_priv = 2345 container_of(work, struct drm_i915_private, 2346 power_domains.async_put_work.work); 2347 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2348 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2349 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 2350 intel_wakeref_t old_work_wakeref = 0; 2351 2352 mutex_lock(&power_domains->lock); 2353 2354 /* 2355 * Bail out if all the domain refs pending to be released were grabbed 2356 * by subsequent gets or a flush_work. 2357 */ 2358 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2359 if (!old_work_wakeref) 2360 goto out_verify; 2361 2362 release_async_put_domains(power_domains, 2363 power_domains->async_put_domains[0]); 2364 2365 /* Requeue the work if more domains were async put meanwhile. */ 2366 if (power_domains->async_put_domains[1]) { 2367 power_domains->async_put_domains[0] = 2368 fetch_and_zero(&power_domains->async_put_domains[1]); 2369 queue_async_put_domains_work(power_domains, 2370 fetch_and_zero(&new_work_wakeref)); 2371 } else { 2372 /* 2373 * Cancel the work that got queued after this one got dequeued, 2374 * since here we released the corresponding async-put reference. 2375 */ 2376 cancel_delayed_work(&power_domains->async_put_work); 2377 } 2378 2379 out_verify: 2380 verify_async_put_domains_state(power_domains); 2381 2382 mutex_unlock(&power_domains->lock); 2383 2384 if (old_work_wakeref) 2385 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 2386 if (new_work_wakeref) 2387 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 2388 } 2389 2390 /** 2391 * intel_display_power_put_async - release a power domain reference asynchronously 2392 * @i915: i915 device instance 2393 * @domain: power domain to reference 2394 * @wakeref: wakeref acquired for the reference that is being released 2395 * 2396 * This function drops the power domain reference obtained by 2397 * intel_display_power_get*() and schedules a work to power down the 2398 * corresponding hardware block if this is the last reference. 2399 */ 2400 void __intel_display_power_put_async(struct drm_i915_private *i915, 2401 enum intel_display_power_domain domain, 2402 intel_wakeref_t wakeref) 2403 { 2404 struct i915_power_domains *power_domains = &i915->power_domains; 2405 struct intel_runtime_pm *rpm = &i915->runtime_pm; 2406 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 2407 2408 mutex_lock(&power_domains->lock); 2409 2410 if (power_domains->domain_use_count[domain] > 1) { 2411 __intel_display_power_put_domain(i915, domain); 2412 2413 goto out_verify; 2414 } 2415 2416 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); 2417 2418 /* Let a pending work requeue itself or queue a new one. */ 2419 if (power_domains->async_put_wakeref) { 2420 power_domains->async_put_domains[1] |= BIT_ULL(domain); 2421 } else { 2422 power_domains->async_put_domains[0] |= BIT_ULL(domain); 2423 queue_async_put_domains_work(power_domains, 2424 fetch_and_zero(&work_wakeref)); 2425 } 2426 2427 out_verify: 2428 verify_async_put_domains_state(power_domains); 2429 2430 mutex_unlock(&power_domains->lock); 2431 2432 if (work_wakeref) 2433 intel_runtime_pm_put_raw(rpm, work_wakeref); 2434 2435 intel_runtime_pm_put(rpm, wakeref); 2436 } 2437 2438 /** 2439 * intel_display_power_flush_work - flushes the async display power disabling work 2440 * @i915: i915 device instance 2441 * 2442 * Flushes any pending work that was scheduled by a preceding 2443 * intel_display_power_put_async() call, completing the disabling of the 2444 * corresponding power domains. 2445 * 2446 * Note that the work handler function may still be running after this 2447 * function returns; to ensure that the work handler isn't running use 2448 * intel_display_power_flush_work_sync() instead. 2449 */ 2450 void intel_display_power_flush_work(struct drm_i915_private *i915) 2451 { 2452 struct i915_power_domains *power_domains = &i915->power_domains; 2453 intel_wakeref_t work_wakeref; 2454 2455 mutex_lock(&power_domains->lock); 2456 2457 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2458 if (!work_wakeref) 2459 goto out_verify; 2460 2461 release_async_put_domains(power_domains, 2462 async_put_domains_mask(power_domains)); 2463 cancel_delayed_work(&power_domains->async_put_work); 2464 2465 out_verify: 2466 verify_async_put_domains_state(power_domains); 2467 2468 mutex_unlock(&power_domains->lock); 2469 2470 if (work_wakeref) 2471 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 2472 } 2473 2474 /** 2475 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 2476 * @i915: i915 device instance 2477 * 2478 * Like intel_display_power_flush_work(), but also ensure that the work 2479 * handler function is not running any more when this function returns. 2480 */ 2481 static void 2482 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 2483 { 2484 struct i915_power_domains *power_domains = &i915->power_domains; 2485 2486 intel_display_power_flush_work(i915); 2487 cancel_delayed_work_sync(&power_domains->async_put_work); 2488 2489 verify_async_put_domains_state(power_domains); 2490 2491 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2492 } 2493 2494 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2495 /** 2496 * intel_display_power_put - release a power domain reference 2497 * @dev_priv: i915 device instance 2498 * @domain: power domain to reference 2499 * @wakeref: wakeref acquired for the reference that is being released 2500 * 2501 * This function drops the power domain reference obtained by 2502 * intel_display_power_get() and might power down the corresponding hardware 2503 * block right away if this is the last reference. 2504 */ 2505 void intel_display_power_put(struct drm_i915_private *dev_priv, 2506 enum intel_display_power_domain domain, 2507 intel_wakeref_t wakeref) 2508 { 2509 __intel_display_power_put(dev_priv, domain); 2510 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2511 } 2512 #else 2513 /** 2514 * intel_display_power_put_unchecked - release an unchecked power domain reference 2515 * @dev_priv: i915 device instance 2516 * @domain: power domain to reference 2517 * 2518 * This function drops the power domain reference obtained by 2519 * intel_display_power_get() and might power down the corresponding hardware 2520 * block right away if this is the last reference. 2521 * 2522 * This function is only for the power domain code's internal use to suppress wakeref 2523 * tracking when the correspondig debug kconfig option is disabled, should not 2524 * be used otherwise. 2525 */ 2526 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 2527 enum intel_display_power_domain domain) 2528 { 2529 __intel_display_power_put(dev_priv, domain); 2530 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 2531 } 2532 #endif 2533 2534 void 2535 intel_display_power_get_in_set(struct drm_i915_private *i915, 2536 struct intel_display_power_domain_set *power_domain_set, 2537 enum intel_display_power_domain domain) 2538 { 2539 intel_wakeref_t __maybe_unused wf; 2540 2541 drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); 2542 2543 wf = intel_display_power_get(i915, domain); 2544 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2545 power_domain_set->wakerefs[domain] = wf; 2546 #endif 2547 power_domain_set->mask |= BIT_ULL(domain); 2548 } 2549 2550 bool 2551 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, 2552 struct intel_display_power_domain_set *power_domain_set, 2553 enum intel_display_power_domain domain) 2554 { 2555 intel_wakeref_t wf; 2556 2557 drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); 2558 2559 wf = intel_display_power_get_if_enabled(i915, domain); 2560 if (!wf) 2561 return false; 2562 2563 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2564 power_domain_set->wakerefs[domain] = wf; 2565 #endif 2566 power_domain_set->mask |= BIT_ULL(domain); 2567 2568 return true; 2569 } 2570 2571 void 2572 intel_display_power_put_mask_in_set(struct drm_i915_private *i915, 2573 struct intel_display_power_domain_set *power_domain_set, 2574 u64 mask) 2575 { 2576 enum intel_display_power_domain domain; 2577 2578 drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask); 2579 2580 for_each_power_domain(domain, mask) { 2581 intel_wakeref_t __maybe_unused wf = -1; 2582 2583 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2584 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); 2585 #endif 2586 intel_display_power_put(i915, domain, wf); 2587 power_domain_set->mask &= ~BIT_ULL(domain); 2588 } 2589 } 2590 2591 #define I830_PIPES_POWER_DOMAINS ( \ 2592 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2593 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2594 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2595 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2596 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2597 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2598 BIT_ULL(POWER_DOMAIN_INIT)) 2599 2600 #define VLV_DISPLAY_POWER_DOMAINS ( \ 2601 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2602 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2603 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2604 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2605 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2606 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2607 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2608 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2609 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2610 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2611 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2612 BIT_ULL(POWER_DOMAIN_VGA) | \ 2613 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2614 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2615 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2616 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2617 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2618 BIT_ULL(POWER_DOMAIN_INIT)) 2619 2620 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2621 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2622 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2623 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2624 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2625 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2626 BIT_ULL(POWER_DOMAIN_INIT)) 2627 2628 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 2629 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2630 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2631 BIT_ULL(POWER_DOMAIN_INIT)) 2632 2633 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 2634 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2635 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2636 BIT_ULL(POWER_DOMAIN_INIT)) 2637 2638 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 2639 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2640 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2641 BIT_ULL(POWER_DOMAIN_INIT)) 2642 2643 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 2644 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2645 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2646 BIT_ULL(POWER_DOMAIN_INIT)) 2647 2648 #define CHV_DISPLAY_POWER_DOMAINS ( \ 2649 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2650 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2651 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2652 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2653 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2654 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2655 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2656 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2657 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2658 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2659 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2660 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2661 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2662 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2663 BIT_ULL(POWER_DOMAIN_VGA) | \ 2664 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2665 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2666 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2667 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2668 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2669 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2670 BIT_ULL(POWER_DOMAIN_INIT)) 2671 2672 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2673 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2674 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2675 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2676 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2677 BIT_ULL(POWER_DOMAIN_INIT)) 2678 2679 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 2680 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2681 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2682 BIT_ULL(POWER_DOMAIN_INIT)) 2683 2684 #define HSW_DISPLAY_POWER_DOMAINS ( \ 2685 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2686 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2687 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2688 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2689 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2690 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2691 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2692 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2693 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2694 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2695 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2696 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2697 BIT_ULL(POWER_DOMAIN_VGA) | \ 2698 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2699 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2700 BIT_ULL(POWER_DOMAIN_INIT)) 2701 2702 #define BDW_DISPLAY_POWER_DOMAINS ( \ 2703 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2704 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2705 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2706 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2707 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2708 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2709 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2710 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2711 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2712 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2713 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2714 BIT_ULL(POWER_DOMAIN_VGA) | \ 2715 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2716 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2717 BIT_ULL(POWER_DOMAIN_INIT)) 2718 2719 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2720 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2721 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2722 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2723 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2724 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2725 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2726 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2727 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2728 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2729 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2730 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2731 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2732 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2733 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2734 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2735 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2736 BIT_ULL(POWER_DOMAIN_VGA) | \ 2737 BIT_ULL(POWER_DOMAIN_INIT)) 2738 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ 2739 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2740 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ 2741 BIT_ULL(POWER_DOMAIN_INIT)) 2742 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2743 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2744 BIT_ULL(POWER_DOMAIN_INIT)) 2745 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2746 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2747 BIT_ULL(POWER_DOMAIN_INIT)) 2748 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ 2749 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2750 BIT_ULL(POWER_DOMAIN_INIT)) 2751 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2752 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2753 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2754 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2755 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2756 BIT_ULL(POWER_DOMAIN_INIT)) 2757 2758 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2759 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2760 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2761 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2762 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2763 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2764 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2765 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2766 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2767 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2768 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2769 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2770 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2771 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2772 BIT_ULL(POWER_DOMAIN_VGA) | \ 2773 BIT_ULL(POWER_DOMAIN_INIT)) 2774 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2775 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2776 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2777 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2778 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2779 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2780 BIT_ULL(POWER_DOMAIN_INIT)) 2781 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 2782 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2783 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2784 BIT_ULL(POWER_DOMAIN_INIT)) 2785 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 2786 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2787 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2788 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2789 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2790 BIT_ULL(POWER_DOMAIN_INIT)) 2791 2792 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2793 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2794 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2795 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2796 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2797 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2798 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2799 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2800 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2801 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2802 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2803 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2804 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2805 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2806 BIT_ULL(POWER_DOMAIN_VGA) | \ 2807 BIT_ULL(POWER_DOMAIN_INIT)) 2808 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ 2809 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2810 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2811 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2812 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2813 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2814 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ 2815 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2816 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2817 BIT_ULL(POWER_DOMAIN_INIT)) 2818 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ 2819 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2820 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2821 BIT_ULL(POWER_DOMAIN_INIT)) 2822 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ 2823 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2824 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2825 BIT_ULL(POWER_DOMAIN_INIT)) 2826 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2827 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2828 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2829 BIT_ULL(POWER_DOMAIN_INIT)) 2830 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2831 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2832 BIT_ULL(POWER_DOMAIN_INIT)) 2833 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2834 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2835 BIT_ULL(POWER_DOMAIN_INIT)) 2836 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2837 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2838 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2839 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2840 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2841 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2842 BIT_ULL(POWER_DOMAIN_INIT)) 2843 2844 /* 2845 * ICL PW_0/PG_0 domains (HW/DMC control): 2846 * - PCI 2847 * - clocks except port PLL 2848 * - central power except FBC 2849 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers 2850 * ICL PW_1/PG_1 domains (HW/DMC control): 2851 * - DBUF function 2852 * - PIPE_A and its planes, except VGA 2853 * - transcoder EDP + PSR 2854 * - transcoder DSI 2855 * - DDI_A 2856 * - FBC 2857 */ 2858 #define ICL_PW_4_POWER_DOMAINS ( \ 2859 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2860 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2861 BIT_ULL(POWER_DOMAIN_INIT)) 2862 /* VDSC/joining */ 2863 #define ICL_PW_3_POWER_DOMAINS ( \ 2864 ICL_PW_4_POWER_DOMAINS | \ 2865 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2866 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2867 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2868 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2869 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2870 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2871 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2872 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2873 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2874 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2875 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2876 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2877 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2878 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2879 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2880 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ 2881 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2882 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2883 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2884 BIT_ULL(POWER_DOMAIN_VGA) | \ 2885 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2886 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2887 BIT_ULL(POWER_DOMAIN_INIT)) 2888 /* 2889 * - transcoder WD 2890 * - KVMR (HW control) 2891 */ 2892 #define ICL_PW_2_POWER_DOMAINS ( \ 2893 ICL_PW_3_POWER_DOMAINS | \ 2894 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2895 BIT_ULL(POWER_DOMAIN_INIT)) 2896 /* 2897 * - KVMR (HW control) 2898 */ 2899 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2900 ICL_PW_2_POWER_DOMAINS | \ 2901 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2902 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2903 BIT_ULL(POWER_DOMAIN_DC_OFF) | \ 2904 BIT_ULL(POWER_DOMAIN_INIT)) 2905 2906 #define ICL_DDI_IO_A_POWER_DOMAINS ( \ 2907 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2908 #define ICL_DDI_IO_B_POWER_DOMAINS ( \ 2909 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2910 #define ICL_DDI_IO_C_POWER_DOMAINS ( \ 2911 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2912 #define ICL_DDI_IO_D_POWER_DOMAINS ( \ 2913 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2914 #define ICL_DDI_IO_E_POWER_DOMAINS ( \ 2915 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2916 #define ICL_DDI_IO_F_POWER_DOMAINS ( \ 2917 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2918 2919 #define ICL_AUX_A_IO_POWER_DOMAINS ( \ 2920 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2921 BIT_ULL(POWER_DOMAIN_AUX_A)) 2922 #define ICL_AUX_B_IO_POWER_DOMAINS ( \ 2923 BIT_ULL(POWER_DOMAIN_AUX_B)) 2924 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ 2925 BIT_ULL(POWER_DOMAIN_AUX_C)) 2926 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ 2927 BIT_ULL(POWER_DOMAIN_AUX_D)) 2928 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ 2929 BIT_ULL(POWER_DOMAIN_AUX_E)) 2930 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ 2931 BIT_ULL(POWER_DOMAIN_AUX_F)) 2932 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ 2933 BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) 2934 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ 2935 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2936 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ 2937 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2938 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ 2939 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2940 2941 #define TGL_PW_5_POWER_DOMAINS ( \ 2942 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 2943 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 2944 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 2945 BIT_ULL(POWER_DOMAIN_INIT)) 2946 2947 #define TGL_PW_4_POWER_DOMAINS ( \ 2948 TGL_PW_5_POWER_DOMAINS | \ 2949 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2950 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2951 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2952 BIT_ULL(POWER_DOMAIN_INIT)) 2953 2954 #define TGL_PW_3_POWER_DOMAINS ( \ 2955 TGL_PW_4_POWER_DOMAINS | \ 2956 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2957 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2958 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2959 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 2960 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 2961 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ 2962 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ 2963 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \ 2964 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \ 2965 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 2966 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 2967 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 2968 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 2969 BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ 2970 BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ 2971 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 2972 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 2973 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 2974 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 2975 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ 2976 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ 2977 BIT_ULL(POWER_DOMAIN_VGA) | \ 2978 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2979 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2980 BIT_ULL(POWER_DOMAIN_INIT)) 2981 2982 #define TGL_PW_2_POWER_DOMAINS ( \ 2983 TGL_PW_3_POWER_DOMAINS | \ 2984 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2985 BIT_ULL(POWER_DOMAIN_INIT)) 2986 2987 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2988 TGL_PW_3_POWER_DOMAINS | \ 2989 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2990 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2991 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2992 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2993 BIT_ULL(POWER_DOMAIN_INIT)) 2994 2995 #define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) 2996 #define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) 2997 #define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) 2998 #define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) 2999 #define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5) 3000 #define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6) 3001 3002 #define TGL_AUX_A_IO_POWER_DOMAINS ( \ 3003 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 3004 BIT_ULL(POWER_DOMAIN_AUX_A)) 3005 #define TGL_AUX_B_IO_POWER_DOMAINS ( \ 3006 BIT_ULL(POWER_DOMAIN_AUX_B)) 3007 #define TGL_AUX_C_IO_POWER_DOMAINS ( \ 3008 BIT_ULL(POWER_DOMAIN_AUX_C)) 3009 3010 #define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) 3011 #define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) 3012 #define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) 3013 #define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) 3014 #define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5) 3015 #define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6) 3016 3017 #define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) 3018 #define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) 3019 #define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) 3020 #define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) 3021 #define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5) 3022 #define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6) 3023 3024 #define TGL_TC_COLD_OFF_POWER_DOMAINS ( \ 3025 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3026 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3027 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 3028 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 3029 BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ 3030 BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ 3031 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 3032 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 3033 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 3034 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 3035 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ 3036 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ 3037 BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) 3038 3039 #define RKL_PW_4_POWER_DOMAINS ( \ 3040 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 3041 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 3042 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 3043 BIT_ULL(POWER_DOMAIN_INIT)) 3044 3045 #define RKL_PW_3_POWER_DOMAINS ( \ 3046 RKL_PW_4_POWER_DOMAINS | \ 3047 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 3048 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 3049 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 3050 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 3051 BIT_ULL(POWER_DOMAIN_VGA) | \ 3052 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 3053 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 3054 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 3055 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3056 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3057 BIT_ULL(POWER_DOMAIN_INIT)) 3058 3059 /* 3060 * There is no PW_2/PG_2 on RKL. 3061 * 3062 * RKL PW_1/PG_1 domains (under HW/DMC control): 3063 * - DBUF function (note: registers are in PW0) 3064 * - PIPE_A and its planes and VDSC/joining, except VGA 3065 * - transcoder A 3066 * - DDI_A and DDI_B 3067 * - FBC 3068 * 3069 * RKL PW_0/PG_0 domains (under HW/DMC control): 3070 * - PCI 3071 * - clocks except port PLL 3072 * - shared functions: 3073 * * interrupts except pipe interrupts 3074 * * MBus except PIPE_MBUS_DBOX_CTL 3075 * * DBUF registers 3076 * - central power except FBC 3077 * - top-level GTC (DDI-level GTC is in the well associated with the DDI) 3078 */ 3079 3080 #define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 3081 RKL_PW_3_POWER_DOMAINS | \ 3082 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3083 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3084 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3085 BIT_ULL(POWER_DOMAIN_INIT)) 3086 3087 /* 3088 * DG1 onwards Audio MMIO/VERBS lies in PG0 power well. 3089 */ 3090 #define DG1_PW_3_POWER_DOMAINS ( \ 3091 TGL_PW_4_POWER_DOMAINS | \ 3092 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 3093 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 3094 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 3095 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 3096 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 3097 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3098 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3099 BIT_ULL(POWER_DOMAIN_VGA) | \ 3100 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 3101 BIT_ULL(POWER_DOMAIN_INIT)) 3102 3103 #define DG1_PW_2_POWER_DOMAINS ( \ 3104 DG1_PW_3_POWER_DOMAINS | \ 3105 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 3106 BIT_ULL(POWER_DOMAIN_INIT)) 3107 3108 #define DG1_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 3109 DG1_PW_3_POWER_DOMAINS | \ 3110 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 3111 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3112 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3113 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3114 BIT_ULL(POWER_DOMAIN_INIT)) 3115 3116 /* 3117 * XE_LPD Power Domains 3118 * 3119 * Previous platforms required that PG(n-1) be enabled before PG(n). That 3120 * dependency chain turns into a dependency tree on XE_LPD: 3121 * 3122 * PG0 3123 * | 3124 * --PG1-- 3125 * / \ 3126 * PGA --PG2-- 3127 * / | \ 3128 * PGB PGC PGD 3129 * 3130 * Power wells must be enabled from top to bottom and disabled from bottom 3131 * to top. This allows pipes to be power gated independently. 3132 */ 3133 3134 #define XELPD_PW_D_POWER_DOMAINS ( \ 3135 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 3136 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 3137 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 3138 BIT_ULL(POWER_DOMAIN_INIT)) 3139 3140 #define XELPD_PW_C_POWER_DOMAINS ( \ 3141 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 3142 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 3143 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 3144 BIT_ULL(POWER_DOMAIN_INIT)) 3145 3146 #define XELPD_PW_B_POWER_DOMAINS ( \ 3147 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 3148 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 3149 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 3150 BIT_ULL(POWER_DOMAIN_INIT)) 3151 3152 #define XELPD_PW_A_POWER_DOMAINS ( \ 3153 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 3154 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 3155 BIT_ULL(POWER_DOMAIN_INIT)) 3156 3157 #define XELPD_PW_2_POWER_DOMAINS ( \ 3158 XELPD_PW_B_POWER_DOMAINS | \ 3159 XELPD_PW_C_POWER_DOMAINS | \ 3160 XELPD_PW_D_POWER_DOMAINS | \ 3161 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 3162 BIT_ULL(POWER_DOMAIN_VGA) | \ 3163 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 3164 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) | \ 3165 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) | \ 3166 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 3167 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 3168 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ 3169 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ 3170 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 3171 BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) | \ 3172 BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) | \ 3173 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3174 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3175 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 3176 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 3177 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 3178 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 3179 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 3180 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 3181 BIT_ULL(POWER_DOMAIN_INIT)) 3182 3183 /* 3184 * XELPD PW_1/PG_1 domains (under HW/DMC control): 3185 * - DBUF function (registers are in PW0) 3186 * - Transcoder A 3187 * - DDI_A and DDI_B 3188 * 3189 * XELPD PW_0/PW_1 domains (under HW/DMC control): 3190 * - PCI 3191 * - Clocks except port PLL 3192 * - Shared functions: 3193 * * interrupts except pipe interrupts 3194 * * MBus except PIPE_MBUS_DBOX_CTL 3195 * * DBUF registers 3196 * - Central power except FBC 3197 * - Top-level GTC (DDI-level GTC is in the well associated with the DDI) 3198 */ 3199 3200 #define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 3201 XELPD_PW_2_POWER_DOMAINS | \ 3202 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 3203 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3204 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3205 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3206 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 3207 BIT_ULL(POWER_DOMAIN_INIT)) 3208 3209 #define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) 3210 #define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) 3211 #define XELPD_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) 3212 #define XELPD_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) 3213 #define XELPD_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) 3214 #define XELPD_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) 3215 3216 #define XELPD_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) 3217 #define XELPD_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) 3218 #define XELPD_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) 3219 #define XELPD_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) 3220 3221 #define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD) 3222 #define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD) 3223 #define XELPD_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) 3224 #define XELPD_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) 3225 #define XELPD_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) 3226 #define XELPD_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) 3227 3228 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 3229 .sync_hw = i9xx_power_well_sync_hw_noop, 3230 .enable = i9xx_always_on_power_well_noop, 3231 .disable = i9xx_always_on_power_well_noop, 3232 .is_enabled = i9xx_always_on_power_well_enabled, 3233 }; 3234 3235 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 3236 .sync_hw = chv_pipe_power_well_sync_hw, 3237 .enable = chv_pipe_power_well_enable, 3238 .disable = chv_pipe_power_well_disable, 3239 .is_enabled = chv_pipe_power_well_enabled, 3240 }; 3241 3242 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 3243 .sync_hw = i9xx_power_well_sync_hw_noop, 3244 .enable = chv_dpio_cmn_power_well_enable, 3245 .disable = chv_dpio_cmn_power_well_disable, 3246 .is_enabled = vlv_power_well_enabled, 3247 }; 3248 3249 static const struct i915_power_well_desc i9xx_always_on_power_well[] = { 3250 { 3251 .name = "always-on", 3252 .always_on = true, 3253 .domains = POWER_DOMAIN_MASK, 3254 .ops = &i9xx_always_on_power_well_ops, 3255 .id = DISP_PW_ID_NONE, 3256 }, 3257 }; 3258 3259 static const struct i915_power_well_ops i830_pipes_power_well_ops = { 3260 .sync_hw = i830_pipes_power_well_sync_hw, 3261 .enable = i830_pipes_power_well_enable, 3262 .disable = i830_pipes_power_well_disable, 3263 .is_enabled = i830_pipes_power_well_enabled, 3264 }; 3265 3266 static const struct i915_power_well_desc i830_power_wells[] = { 3267 { 3268 .name = "always-on", 3269 .always_on = true, 3270 .domains = POWER_DOMAIN_MASK, 3271 .ops = &i9xx_always_on_power_well_ops, 3272 .id = DISP_PW_ID_NONE, 3273 }, 3274 { 3275 .name = "pipes", 3276 .domains = I830_PIPES_POWER_DOMAINS, 3277 .ops = &i830_pipes_power_well_ops, 3278 .id = DISP_PW_ID_NONE, 3279 }, 3280 }; 3281 3282 static const struct i915_power_well_ops hsw_power_well_ops = { 3283 .sync_hw = hsw_power_well_sync_hw, 3284 .enable = hsw_power_well_enable, 3285 .disable = hsw_power_well_disable, 3286 .is_enabled = hsw_power_well_enabled, 3287 }; 3288 3289 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 3290 .sync_hw = i9xx_power_well_sync_hw_noop, 3291 .enable = gen9_dc_off_power_well_enable, 3292 .disable = gen9_dc_off_power_well_disable, 3293 .is_enabled = gen9_dc_off_power_well_enabled, 3294 }; 3295 3296 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 3297 .sync_hw = i9xx_power_well_sync_hw_noop, 3298 .enable = bxt_dpio_cmn_power_well_enable, 3299 .disable = bxt_dpio_cmn_power_well_disable, 3300 .is_enabled = bxt_dpio_cmn_power_well_enabled, 3301 }; 3302 3303 static const struct i915_power_well_regs hsw_power_well_regs = { 3304 .bios = HSW_PWR_WELL_CTL1, 3305 .driver = HSW_PWR_WELL_CTL2, 3306 .kvmr = HSW_PWR_WELL_CTL3, 3307 .debug = HSW_PWR_WELL_CTL4, 3308 }; 3309 3310 static const struct i915_power_well_desc hsw_power_wells[] = { 3311 { 3312 .name = "always-on", 3313 .always_on = true, 3314 .domains = POWER_DOMAIN_MASK, 3315 .ops = &i9xx_always_on_power_well_ops, 3316 .id = DISP_PW_ID_NONE, 3317 }, 3318 { 3319 .name = "display", 3320 .domains = HSW_DISPLAY_POWER_DOMAINS, 3321 .ops = &hsw_power_well_ops, 3322 .id = HSW_DISP_PW_GLOBAL, 3323 { 3324 .hsw.regs = &hsw_power_well_regs, 3325 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3326 .hsw.has_vga = true, 3327 }, 3328 }, 3329 }; 3330 3331 static const struct i915_power_well_desc bdw_power_wells[] = { 3332 { 3333 .name = "always-on", 3334 .always_on = true, 3335 .domains = POWER_DOMAIN_MASK, 3336 .ops = &i9xx_always_on_power_well_ops, 3337 .id = DISP_PW_ID_NONE, 3338 }, 3339 { 3340 .name = "display", 3341 .domains = BDW_DISPLAY_POWER_DOMAINS, 3342 .ops = &hsw_power_well_ops, 3343 .id = HSW_DISP_PW_GLOBAL, 3344 { 3345 .hsw.regs = &hsw_power_well_regs, 3346 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3347 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3348 .hsw.has_vga = true, 3349 }, 3350 }, 3351 }; 3352 3353 static const struct i915_power_well_ops vlv_display_power_well_ops = { 3354 .sync_hw = i9xx_power_well_sync_hw_noop, 3355 .enable = vlv_display_power_well_enable, 3356 .disable = vlv_display_power_well_disable, 3357 .is_enabled = vlv_power_well_enabled, 3358 }; 3359 3360 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 3361 .sync_hw = i9xx_power_well_sync_hw_noop, 3362 .enable = vlv_dpio_cmn_power_well_enable, 3363 .disable = vlv_dpio_cmn_power_well_disable, 3364 .is_enabled = vlv_power_well_enabled, 3365 }; 3366 3367 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 3368 .sync_hw = i9xx_power_well_sync_hw_noop, 3369 .enable = vlv_power_well_enable, 3370 .disable = vlv_power_well_disable, 3371 .is_enabled = vlv_power_well_enabled, 3372 }; 3373 3374 static const struct i915_power_well_desc vlv_power_wells[] = { 3375 { 3376 .name = "always-on", 3377 .always_on = true, 3378 .domains = POWER_DOMAIN_MASK, 3379 .ops = &i9xx_always_on_power_well_ops, 3380 .id = DISP_PW_ID_NONE, 3381 }, 3382 { 3383 .name = "display", 3384 .domains = VLV_DISPLAY_POWER_DOMAINS, 3385 .ops = &vlv_display_power_well_ops, 3386 .id = VLV_DISP_PW_DISP2D, 3387 { 3388 .vlv.idx = PUNIT_PWGT_IDX_DISP2D, 3389 }, 3390 }, 3391 { 3392 .name = "dpio-tx-b-01", 3393 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3394 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3395 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3396 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3397 .ops = &vlv_dpio_power_well_ops, 3398 .id = DISP_PW_ID_NONE, 3399 { 3400 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, 3401 }, 3402 }, 3403 { 3404 .name = "dpio-tx-b-23", 3405 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3406 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3407 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3408 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3409 .ops = &vlv_dpio_power_well_ops, 3410 .id = DISP_PW_ID_NONE, 3411 { 3412 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, 3413 }, 3414 }, 3415 { 3416 .name = "dpio-tx-c-01", 3417 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3418 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3419 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3420 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3421 .ops = &vlv_dpio_power_well_ops, 3422 .id = DISP_PW_ID_NONE, 3423 { 3424 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, 3425 }, 3426 }, 3427 { 3428 .name = "dpio-tx-c-23", 3429 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3430 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3431 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3432 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3433 .ops = &vlv_dpio_power_well_ops, 3434 .id = DISP_PW_ID_NONE, 3435 { 3436 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, 3437 }, 3438 }, 3439 { 3440 .name = "dpio-common", 3441 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 3442 .ops = &vlv_dpio_cmn_power_well_ops, 3443 .id = VLV_DISP_PW_DPIO_CMN_BC, 3444 { 3445 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3446 }, 3447 }, 3448 }; 3449 3450 static const struct i915_power_well_desc chv_power_wells[] = { 3451 { 3452 .name = "always-on", 3453 .always_on = true, 3454 .domains = POWER_DOMAIN_MASK, 3455 .ops = &i9xx_always_on_power_well_ops, 3456 .id = DISP_PW_ID_NONE, 3457 }, 3458 { 3459 .name = "display", 3460 /* 3461 * Pipe A power well is the new disp2d well. Pipe B and C 3462 * power wells don't actually exist. Pipe A power well is 3463 * required for any pipe to work. 3464 */ 3465 .domains = CHV_DISPLAY_POWER_DOMAINS, 3466 .ops = &chv_pipe_power_well_ops, 3467 .id = DISP_PW_ID_NONE, 3468 }, 3469 { 3470 .name = "dpio-common-bc", 3471 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 3472 .ops = &chv_dpio_cmn_power_well_ops, 3473 .id = VLV_DISP_PW_DPIO_CMN_BC, 3474 { 3475 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3476 }, 3477 }, 3478 { 3479 .name = "dpio-common-d", 3480 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 3481 .ops = &chv_dpio_cmn_power_well_ops, 3482 .id = CHV_DISP_PW_DPIO_CMN_D, 3483 { 3484 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, 3485 }, 3486 }, 3487 }; 3488 3489 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 3490 enum i915_power_well_id power_well_id) 3491 { 3492 struct i915_power_well *power_well; 3493 bool ret; 3494 3495 power_well = lookup_power_well(dev_priv, power_well_id); 3496 ret = power_well->desc->ops->is_enabled(dev_priv, power_well); 3497 3498 return ret; 3499 } 3500 3501 static const struct i915_power_well_desc skl_power_wells[] = { 3502 { 3503 .name = "always-on", 3504 .always_on = true, 3505 .domains = POWER_DOMAIN_MASK, 3506 .ops = &i9xx_always_on_power_well_ops, 3507 .id = DISP_PW_ID_NONE, 3508 }, 3509 { 3510 .name = "power well 1", 3511 /* Handled by the DMC firmware */ 3512 .always_on = true, 3513 .domains = 0, 3514 .ops = &hsw_power_well_ops, 3515 .id = SKL_DISP_PW_1, 3516 { 3517 .hsw.regs = &hsw_power_well_regs, 3518 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3519 .hsw.has_fuses = true, 3520 }, 3521 }, 3522 { 3523 .name = "MISC IO power well", 3524 /* Handled by the DMC firmware */ 3525 .always_on = true, 3526 .domains = 0, 3527 .ops = &hsw_power_well_ops, 3528 .id = SKL_DISP_PW_MISC_IO, 3529 { 3530 .hsw.regs = &hsw_power_well_regs, 3531 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, 3532 }, 3533 }, 3534 { 3535 .name = "DC off", 3536 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 3537 .ops = &gen9_dc_off_power_well_ops, 3538 .id = SKL_DISP_DC_OFF, 3539 }, 3540 { 3541 .name = "power well 2", 3542 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3543 .ops = &hsw_power_well_ops, 3544 .id = SKL_DISP_PW_2, 3545 { 3546 .hsw.regs = &hsw_power_well_regs, 3547 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3548 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3549 .hsw.has_vga = true, 3550 .hsw.has_fuses = true, 3551 }, 3552 }, 3553 { 3554 .name = "DDI A/E IO power well", 3555 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, 3556 .ops = &hsw_power_well_ops, 3557 .id = DISP_PW_ID_NONE, 3558 { 3559 .hsw.regs = &hsw_power_well_regs, 3560 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, 3561 }, 3562 }, 3563 { 3564 .name = "DDI B IO power well", 3565 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3566 .ops = &hsw_power_well_ops, 3567 .id = DISP_PW_ID_NONE, 3568 { 3569 .hsw.regs = &hsw_power_well_regs, 3570 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3571 }, 3572 }, 3573 { 3574 .name = "DDI C IO power well", 3575 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3576 .ops = &hsw_power_well_ops, 3577 .id = DISP_PW_ID_NONE, 3578 { 3579 .hsw.regs = &hsw_power_well_regs, 3580 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3581 }, 3582 }, 3583 { 3584 .name = "DDI D IO power well", 3585 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, 3586 .ops = &hsw_power_well_ops, 3587 .id = DISP_PW_ID_NONE, 3588 { 3589 .hsw.regs = &hsw_power_well_regs, 3590 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3591 }, 3592 }, 3593 }; 3594 3595 static const struct i915_power_well_desc bxt_power_wells[] = { 3596 { 3597 .name = "always-on", 3598 .always_on = true, 3599 .domains = POWER_DOMAIN_MASK, 3600 .ops = &i9xx_always_on_power_well_ops, 3601 .id = DISP_PW_ID_NONE, 3602 }, 3603 { 3604 .name = "power well 1", 3605 /* Handled by the DMC firmware */ 3606 .always_on = true, 3607 .domains = 0, 3608 .ops = &hsw_power_well_ops, 3609 .id = SKL_DISP_PW_1, 3610 { 3611 .hsw.regs = &hsw_power_well_regs, 3612 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3613 .hsw.has_fuses = true, 3614 }, 3615 }, 3616 { 3617 .name = "DC off", 3618 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 3619 .ops = &gen9_dc_off_power_well_ops, 3620 .id = SKL_DISP_DC_OFF, 3621 }, 3622 { 3623 .name = "power well 2", 3624 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3625 .ops = &hsw_power_well_ops, 3626 .id = SKL_DISP_PW_2, 3627 { 3628 .hsw.regs = &hsw_power_well_regs, 3629 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3630 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3631 .hsw.has_vga = true, 3632 .hsw.has_fuses = true, 3633 }, 3634 }, 3635 { 3636 .name = "dpio-common-a", 3637 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 3638 .ops = &bxt_dpio_cmn_power_well_ops, 3639 .id = BXT_DISP_PW_DPIO_CMN_A, 3640 { 3641 .bxt.phy = DPIO_PHY1, 3642 }, 3643 }, 3644 { 3645 .name = "dpio-common-bc", 3646 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 3647 .ops = &bxt_dpio_cmn_power_well_ops, 3648 .id = VLV_DISP_PW_DPIO_CMN_BC, 3649 { 3650 .bxt.phy = DPIO_PHY0, 3651 }, 3652 }, 3653 }; 3654 3655 static const struct i915_power_well_desc glk_power_wells[] = { 3656 { 3657 .name = "always-on", 3658 .always_on = true, 3659 .domains = POWER_DOMAIN_MASK, 3660 .ops = &i9xx_always_on_power_well_ops, 3661 .id = DISP_PW_ID_NONE, 3662 }, 3663 { 3664 .name = "power well 1", 3665 /* Handled by the DMC firmware */ 3666 .always_on = true, 3667 .domains = 0, 3668 .ops = &hsw_power_well_ops, 3669 .id = SKL_DISP_PW_1, 3670 { 3671 .hsw.regs = &hsw_power_well_regs, 3672 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3673 .hsw.has_fuses = true, 3674 }, 3675 }, 3676 { 3677 .name = "DC off", 3678 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, 3679 .ops = &gen9_dc_off_power_well_ops, 3680 .id = SKL_DISP_DC_OFF, 3681 }, 3682 { 3683 .name = "power well 2", 3684 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3685 .ops = &hsw_power_well_ops, 3686 .id = SKL_DISP_PW_2, 3687 { 3688 .hsw.regs = &hsw_power_well_regs, 3689 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3690 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3691 .hsw.has_vga = true, 3692 .hsw.has_fuses = true, 3693 }, 3694 }, 3695 { 3696 .name = "dpio-common-a", 3697 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, 3698 .ops = &bxt_dpio_cmn_power_well_ops, 3699 .id = BXT_DISP_PW_DPIO_CMN_A, 3700 { 3701 .bxt.phy = DPIO_PHY1, 3702 }, 3703 }, 3704 { 3705 .name = "dpio-common-b", 3706 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, 3707 .ops = &bxt_dpio_cmn_power_well_ops, 3708 .id = VLV_DISP_PW_DPIO_CMN_BC, 3709 { 3710 .bxt.phy = DPIO_PHY0, 3711 }, 3712 }, 3713 { 3714 .name = "dpio-common-c", 3715 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, 3716 .ops = &bxt_dpio_cmn_power_well_ops, 3717 .id = GLK_DISP_PW_DPIO_CMN_C, 3718 { 3719 .bxt.phy = DPIO_PHY2, 3720 }, 3721 }, 3722 { 3723 .name = "AUX A", 3724 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, 3725 .ops = &hsw_power_well_ops, 3726 .id = DISP_PW_ID_NONE, 3727 { 3728 .hsw.regs = &hsw_power_well_regs, 3729 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3730 }, 3731 }, 3732 { 3733 .name = "AUX B", 3734 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, 3735 .ops = &hsw_power_well_ops, 3736 .id = DISP_PW_ID_NONE, 3737 { 3738 .hsw.regs = &hsw_power_well_regs, 3739 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3740 }, 3741 }, 3742 { 3743 .name = "AUX C", 3744 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, 3745 .ops = &hsw_power_well_ops, 3746 .id = DISP_PW_ID_NONE, 3747 { 3748 .hsw.regs = &hsw_power_well_regs, 3749 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3750 }, 3751 }, 3752 { 3753 .name = "DDI A IO power well", 3754 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, 3755 .ops = &hsw_power_well_ops, 3756 .id = DISP_PW_ID_NONE, 3757 { 3758 .hsw.regs = &hsw_power_well_regs, 3759 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3760 }, 3761 }, 3762 { 3763 .name = "DDI B IO power well", 3764 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3765 .ops = &hsw_power_well_ops, 3766 .id = DISP_PW_ID_NONE, 3767 { 3768 .hsw.regs = &hsw_power_well_regs, 3769 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3770 }, 3771 }, 3772 { 3773 .name = "DDI C IO power well", 3774 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3775 .ops = &hsw_power_well_ops, 3776 .id = DISP_PW_ID_NONE, 3777 { 3778 .hsw.regs = &hsw_power_well_regs, 3779 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3780 }, 3781 }, 3782 }; 3783 3784 static const struct i915_power_well_ops icl_aux_power_well_ops = { 3785 .sync_hw = hsw_power_well_sync_hw, 3786 .enable = icl_aux_power_well_enable, 3787 .disable = icl_aux_power_well_disable, 3788 .is_enabled = hsw_power_well_enabled, 3789 }; 3790 3791 static const struct i915_power_well_regs icl_aux_power_well_regs = { 3792 .bios = ICL_PWR_WELL_CTL_AUX1, 3793 .driver = ICL_PWR_WELL_CTL_AUX2, 3794 .debug = ICL_PWR_WELL_CTL_AUX4, 3795 }; 3796 3797 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 3798 .bios = ICL_PWR_WELL_CTL_DDI1, 3799 .driver = ICL_PWR_WELL_CTL_DDI2, 3800 .debug = ICL_PWR_WELL_CTL_DDI4, 3801 }; 3802 3803 static const struct i915_power_well_desc icl_power_wells[] = { 3804 { 3805 .name = "always-on", 3806 .always_on = true, 3807 .domains = POWER_DOMAIN_MASK, 3808 .ops = &i9xx_always_on_power_well_ops, 3809 .id = DISP_PW_ID_NONE, 3810 }, 3811 { 3812 .name = "power well 1", 3813 /* Handled by the DMC firmware */ 3814 .always_on = true, 3815 .domains = 0, 3816 .ops = &hsw_power_well_ops, 3817 .id = SKL_DISP_PW_1, 3818 { 3819 .hsw.regs = &hsw_power_well_regs, 3820 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3821 .hsw.has_fuses = true, 3822 }, 3823 }, 3824 { 3825 .name = "DC off", 3826 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, 3827 .ops = &gen9_dc_off_power_well_ops, 3828 .id = SKL_DISP_DC_OFF, 3829 }, 3830 { 3831 .name = "power well 2", 3832 .domains = ICL_PW_2_POWER_DOMAINS, 3833 .ops = &hsw_power_well_ops, 3834 .id = SKL_DISP_PW_2, 3835 { 3836 .hsw.regs = &hsw_power_well_regs, 3837 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3838 .hsw.has_fuses = true, 3839 }, 3840 }, 3841 { 3842 .name = "power well 3", 3843 .domains = ICL_PW_3_POWER_DOMAINS, 3844 .ops = &hsw_power_well_ops, 3845 .id = ICL_DISP_PW_3, 3846 { 3847 .hsw.regs = &hsw_power_well_regs, 3848 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3849 .hsw.irq_pipe_mask = BIT(PIPE_B), 3850 .hsw.has_vga = true, 3851 .hsw.has_fuses = true, 3852 }, 3853 }, 3854 { 3855 .name = "DDI A IO", 3856 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3857 .ops = &hsw_power_well_ops, 3858 .id = DISP_PW_ID_NONE, 3859 { 3860 .hsw.regs = &icl_ddi_power_well_regs, 3861 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3862 }, 3863 }, 3864 { 3865 .name = "DDI B IO", 3866 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3867 .ops = &hsw_power_well_ops, 3868 .id = DISP_PW_ID_NONE, 3869 { 3870 .hsw.regs = &icl_ddi_power_well_regs, 3871 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3872 }, 3873 }, 3874 { 3875 .name = "DDI C IO", 3876 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3877 .ops = &hsw_power_well_ops, 3878 .id = DISP_PW_ID_NONE, 3879 { 3880 .hsw.regs = &icl_ddi_power_well_regs, 3881 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3882 }, 3883 }, 3884 { 3885 .name = "DDI D IO", 3886 .domains = ICL_DDI_IO_D_POWER_DOMAINS, 3887 .ops = &hsw_power_well_ops, 3888 .id = DISP_PW_ID_NONE, 3889 { 3890 .hsw.regs = &icl_ddi_power_well_regs, 3891 .hsw.idx = ICL_PW_CTL_IDX_DDI_D, 3892 }, 3893 }, 3894 { 3895 .name = "DDI E IO", 3896 .domains = ICL_DDI_IO_E_POWER_DOMAINS, 3897 .ops = &hsw_power_well_ops, 3898 .id = DISP_PW_ID_NONE, 3899 { 3900 .hsw.regs = &icl_ddi_power_well_regs, 3901 .hsw.idx = ICL_PW_CTL_IDX_DDI_E, 3902 }, 3903 }, 3904 { 3905 .name = "DDI F IO", 3906 .domains = ICL_DDI_IO_F_POWER_DOMAINS, 3907 .ops = &hsw_power_well_ops, 3908 .id = DISP_PW_ID_NONE, 3909 { 3910 .hsw.regs = &icl_ddi_power_well_regs, 3911 .hsw.idx = ICL_PW_CTL_IDX_DDI_F, 3912 }, 3913 }, 3914 { 3915 .name = "AUX A", 3916 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 3917 .ops = &icl_aux_power_well_ops, 3918 .id = DISP_PW_ID_NONE, 3919 { 3920 .hsw.regs = &icl_aux_power_well_regs, 3921 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3922 }, 3923 }, 3924 { 3925 .name = "AUX B", 3926 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 3927 .ops = &icl_aux_power_well_ops, 3928 .id = DISP_PW_ID_NONE, 3929 { 3930 .hsw.regs = &icl_aux_power_well_regs, 3931 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3932 }, 3933 }, 3934 { 3935 .name = "AUX C TC1", 3936 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, 3937 .ops = &icl_aux_power_well_ops, 3938 .id = DISP_PW_ID_NONE, 3939 { 3940 .hsw.regs = &icl_aux_power_well_regs, 3941 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3942 .hsw.is_tc_tbt = false, 3943 }, 3944 }, 3945 { 3946 .name = "AUX D TC2", 3947 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, 3948 .ops = &icl_aux_power_well_ops, 3949 .id = DISP_PW_ID_NONE, 3950 { 3951 .hsw.regs = &icl_aux_power_well_regs, 3952 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 3953 .hsw.is_tc_tbt = false, 3954 }, 3955 }, 3956 { 3957 .name = "AUX E TC3", 3958 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, 3959 .ops = &icl_aux_power_well_ops, 3960 .id = DISP_PW_ID_NONE, 3961 { 3962 .hsw.regs = &icl_aux_power_well_regs, 3963 .hsw.idx = ICL_PW_CTL_IDX_AUX_E, 3964 .hsw.is_tc_tbt = false, 3965 }, 3966 }, 3967 { 3968 .name = "AUX F TC4", 3969 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, 3970 .ops = &icl_aux_power_well_ops, 3971 .id = DISP_PW_ID_NONE, 3972 { 3973 .hsw.regs = &icl_aux_power_well_regs, 3974 .hsw.idx = ICL_PW_CTL_IDX_AUX_F, 3975 .hsw.is_tc_tbt = false, 3976 }, 3977 }, 3978 { 3979 .name = "AUX C TBT1", 3980 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, 3981 .ops = &icl_aux_power_well_ops, 3982 .id = DISP_PW_ID_NONE, 3983 { 3984 .hsw.regs = &icl_aux_power_well_regs, 3985 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, 3986 .hsw.is_tc_tbt = true, 3987 }, 3988 }, 3989 { 3990 .name = "AUX D TBT2", 3991 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, 3992 .ops = &icl_aux_power_well_ops, 3993 .id = DISP_PW_ID_NONE, 3994 { 3995 .hsw.regs = &icl_aux_power_well_regs, 3996 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, 3997 .hsw.is_tc_tbt = true, 3998 }, 3999 }, 4000 { 4001 .name = "AUX E TBT3", 4002 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, 4003 .ops = &icl_aux_power_well_ops, 4004 .id = DISP_PW_ID_NONE, 4005 { 4006 .hsw.regs = &icl_aux_power_well_regs, 4007 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, 4008 .hsw.is_tc_tbt = true, 4009 }, 4010 }, 4011 { 4012 .name = "AUX F TBT4", 4013 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, 4014 .ops = &icl_aux_power_well_ops, 4015 .id = DISP_PW_ID_NONE, 4016 { 4017 .hsw.regs = &icl_aux_power_well_regs, 4018 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, 4019 .hsw.is_tc_tbt = true, 4020 }, 4021 }, 4022 { 4023 .name = "power well 4", 4024 .domains = ICL_PW_4_POWER_DOMAINS, 4025 .ops = &hsw_power_well_ops, 4026 .id = DISP_PW_ID_NONE, 4027 { 4028 .hsw.regs = &hsw_power_well_regs, 4029 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4030 .hsw.has_fuses = true, 4031 .hsw.irq_pipe_mask = BIT(PIPE_C), 4032 }, 4033 }, 4034 }; 4035 4036 static void 4037 tgl_tc_cold_request(struct drm_i915_private *i915, bool block) 4038 { 4039 u8 tries = 0; 4040 int ret; 4041 4042 while (1) { 4043 u32 low_val; 4044 u32 high_val = 0; 4045 4046 if (block) 4047 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; 4048 else 4049 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; 4050 4051 /* 4052 * Spec states that we should timeout the request after 200us 4053 * but the function below will timeout after 500us 4054 */ 4055 ret = snb_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, &high_val); 4056 if (ret == 0) { 4057 if (block && 4058 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) 4059 ret = -EIO; 4060 else 4061 break; 4062 } 4063 4064 if (++tries == 3) 4065 break; 4066 4067 msleep(1); 4068 } 4069 4070 if (ret) 4071 drm_err(&i915->drm, "TC cold %sblock failed\n", 4072 block ? "" : "un"); 4073 else 4074 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", 4075 block ? "" : "un"); 4076 } 4077 4078 static void 4079 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, 4080 struct i915_power_well *power_well) 4081 { 4082 tgl_tc_cold_request(i915, true); 4083 } 4084 4085 static void 4086 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, 4087 struct i915_power_well *power_well) 4088 { 4089 tgl_tc_cold_request(i915, false); 4090 } 4091 4092 static void 4093 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, 4094 struct i915_power_well *power_well) 4095 { 4096 if (power_well->count > 0) 4097 tgl_tc_cold_off_power_well_enable(i915, power_well); 4098 else 4099 tgl_tc_cold_off_power_well_disable(i915, power_well); 4100 } 4101 4102 static bool 4103 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, 4104 struct i915_power_well *power_well) 4105 { 4106 /* 4107 * Not the correctly implementation but there is no way to just read it 4108 * from PCODE, so returning count to avoid state mismatch errors 4109 */ 4110 return power_well->count; 4111 } 4112 4113 static const struct i915_power_well_ops tgl_tc_cold_off_ops = { 4114 .sync_hw = tgl_tc_cold_off_power_well_sync_hw, 4115 .enable = tgl_tc_cold_off_power_well_enable, 4116 .disable = tgl_tc_cold_off_power_well_disable, 4117 .is_enabled = tgl_tc_cold_off_power_well_is_enabled, 4118 }; 4119 4120 static const struct i915_power_well_desc tgl_power_wells[] = { 4121 { 4122 .name = "always-on", 4123 .always_on = true, 4124 .domains = POWER_DOMAIN_MASK, 4125 .ops = &i9xx_always_on_power_well_ops, 4126 .id = DISP_PW_ID_NONE, 4127 }, 4128 { 4129 .name = "power well 1", 4130 /* Handled by the DMC firmware */ 4131 .always_on = true, 4132 .domains = 0, 4133 .ops = &hsw_power_well_ops, 4134 .id = SKL_DISP_PW_1, 4135 { 4136 .hsw.regs = &hsw_power_well_regs, 4137 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4138 .hsw.has_fuses = true, 4139 }, 4140 }, 4141 { 4142 .name = "DC off", 4143 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, 4144 .ops = &gen9_dc_off_power_well_ops, 4145 .id = SKL_DISP_DC_OFF, 4146 }, 4147 { 4148 .name = "power well 2", 4149 .domains = TGL_PW_2_POWER_DOMAINS, 4150 .ops = &hsw_power_well_ops, 4151 .id = SKL_DISP_PW_2, 4152 { 4153 .hsw.regs = &hsw_power_well_regs, 4154 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4155 .hsw.has_fuses = true, 4156 }, 4157 }, 4158 { 4159 .name = "power well 3", 4160 .domains = TGL_PW_3_POWER_DOMAINS, 4161 .ops = &hsw_power_well_ops, 4162 .id = ICL_DISP_PW_3, 4163 { 4164 .hsw.regs = &hsw_power_well_regs, 4165 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4166 .hsw.irq_pipe_mask = BIT(PIPE_B), 4167 .hsw.has_vga = true, 4168 .hsw.has_fuses = true, 4169 }, 4170 }, 4171 { 4172 .name = "DDI A IO", 4173 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4174 .ops = &hsw_power_well_ops, 4175 .id = DISP_PW_ID_NONE, 4176 { 4177 .hsw.regs = &icl_ddi_power_well_regs, 4178 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4179 } 4180 }, 4181 { 4182 .name = "DDI B IO", 4183 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4184 .ops = &hsw_power_well_ops, 4185 .id = DISP_PW_ID_NONE, 4186 { 4187 .hsw.regs = &icl_ddi_power_well_regs, 4188 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4189 } 4190 }, 4191 { 4192 .name = "DDI C IO", 4193 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 4194 .ops = &hsw_power_well_ops, 4195 .id = DISP_PW_ID_NONE, 4196 { 4197 .hsw.regs = &icl_ddi_power_well_regs, 4198 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 4199 } 4200 }, 4201 { 4202 .name = "DDI IO TC1", 4203 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4204 .ops = &hsw_power_well_ops, 4205 .id = DISP_PW_ID_NONE, 4206 { 4207 .hsw.regs = &icl_ddi_power_well_regs, 4208 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4209 }, 4210 }, 4211 { 4212 .name = "DDI IO TC2", 4213 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4214 .ops = &hsw_power_well_ops, 4215 .id = DISP_PW_ID_NONE, 4216 { 4217 .hsw.regs = &icl_ddi_power_well_regs, 4218 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4219 }, 4220 }, 4221 { 4222 .name = "DDI IO TC3", 4223 .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, 4224 .ops = &hsw_power_well_ops, 4225 .id = DISP_PW_ID_NONE, 4226 { 4227 .hsw.regs = &icl_ddi_power_well_regs, 4228 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 4229 }, 4230 }, 4231 { 4232 .name = "DDI IO TC4", 4233 .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, 4234 .ops = &hsw_power_well_ops, 4235 .id = DISP_PW_ID_NONE, 4236 { 4237 .hsw.regs = &icl_ddi_power_well_regs, 4238 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 4239 }, 4240 }, 4241 { 4242 .name = "DDI IO TC5", 4243 .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, 4244 .ops = &hsw_power_well_ops, 4245 .id = DISP_PW_ID_NONE, 4246 { 4247 .hsw.regs = &icl_ddi_power_well_regs, 4248 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, 4249 }, 4250 }, 4251 { 4252 .name = "DDI IO TC6", 4253 .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, 4254 .ops = &hsw_power_well_ops, 4255 .id = DISP_PW_ID_NONE, 4256 { 4257 .hsw.regs = &icl_ddi_power_well_regs, 4258 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, 4259 }, 4260 }, 4261 { 4262 .name = "TC cold off", 4263 .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, 4264 .ops = &tgl_tc_cold_off_ops, 4265 .id = TGL_DISP_PW_TC_COLD_OFF, 4266 }, 4267 { 4268 .name = "AUX A", 4269 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 4270 .ops = &icl_aux_power_well_ops, 4271 .id = DISP_PW_ID_NONE, 4272 { 4273 .hsw.regs = &icl_aux_power_well_regs, 4274 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4275 }, 4276 }, 4277 { 4278 .name = "AUX B", 4279 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 4280 .ops = &icl_aux_power_well_ops, 4281 .id = DISP_PW_ID_NONE, 4282 { 4283 .hsw.regs = &icl_aux_power_well_regs, 4284 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4285 }, 4286 }, 4287 { 4288 .name = "AUX C", 4289 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 4290 .ops = &icl_aux_power_well_ops, 4291 .id = DISP_PW_ID_NONE, 4292 { 4293 .hsw.regs = &icl_aux_power_well_regs, 4294 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4295 }, 4296 }, 4297 { 4298 .name = "AUX USBC1", 4299 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4300 .ops = &icl_aux_power_well_ops, 4301 .id = DISP_PW_ID_NONE, 4302 { 4303 .hsw.regs = &icl_aux_power_well_regs, 4304 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4305 .hsw.is_tc_tbt = false, 4306 }, 4307 }, 4308 { 4309 .name = "AUX USBC2", 4310 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4311 .ops = &icl_aux_power_well_ops, 4312 .id = DISP_PW_ID_NONE, 4313 { 4314 .hsw.regs = &icl_aux_power_well_regs, 4315 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4316 .hsw.is_tc_tbt = false, 4317 }, 4318 }, 4319 { 4320 .name = "AUX USBC3", 4321 .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS, 4322 .ops = &icl_aux_power_well_ops, 4323 .id = DISP_PW_ID_NONE, 4324 { 4325 .hsw.regs = &icl_aux_power_well_regs, 4326 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 4327 .hsw.is_tc_tbt = false, 4328 }, 4329 }, 4330 { 4331 .name = "AUX USBC4", 4332 .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS, 4333 .ops = &icl_aux_power_well_ops, 4334 .id = DISP_PW_ID_NONE, 4335 { 4336 .hsw.regs = &icl_aux_power_well_regs, 4337 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 4338 .hsw.is_tc_tbt = false, 4339 }, 4340 }, 4341 { 4342 .name = "AUX USBC5", 4343 .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS, 4344 .ops = &icl_aux_power_well_ops, 4345 .id = DISP_PW_ID_NONE, 4346 { 4347 .hsw.regs = &icl_aux_power_well_regs, 4348 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, 4349 .hsw.is_tc_tbt = false, 4350 }, 4351 }, 4352 { 4353 .name = "AUX USBC6", 4354 .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS, 4355 .ops = &icl_aux_power_well_ops, 4356 .id = DISP_PW_ID_NONE, 4357 { 4358 .hsw.regs = &icl_aux_power_well_regs, 4359 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, 4360 .hsw.is_tc_tbt = false, 4361 }, 4362 }, 4363 { 4364 .name = "AUX TBT1", 4365 .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS, 4366 .ops = &icl_aux_power_well_ops, 4367 .id = DISP_PW_ID_NONE, 4368 { 4369 .hsw.regs = &icl_aux_power_well_regs, 4370 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 4371 .hsw.is_tc_tbt = true, 4372 }, 4373 }, 4374 { 4375 .name = "AUX TBT2", 4376 .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS, 4377 .ops = &icl_aux_power_well_ops, 4378 .id = DISP_PW_ID_NONE, 4379 { 4380 .hsw.regs = &icl_aux_power_well_regs, 4381 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 4382 .hsw.is_tc_tbt = true, 4383 }, 4384 }, 4385 { 4386 .name = "AUX TBT3", 4387 .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS, 4388 .ops = &icl_aux_power_well_ops, 4389 .id = DISP_PW_ID_NONE, 4390 { 4391 .hsw.regs = &icl_aux_power_well_regs, 4392 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 4393 .hsw.is_tc_tbt = true, 4394 }, 4395 }, 4396 { 4397 .name = "AUX TBT4", 4398 .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS, 4399 .ops = &icl_aux_power_well_ops, 4400 .id = DISP_PW_ID_NONE, 4401 { 4402 .hsw.regs = &icl_aux_power_well_regs, 4403 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 4404 .hsw.is_tc_tbt = true, 4405 }, 4406 }, 4407 { 4408 .name = "AUX TBT5", 4409 .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS, 4410 .ops = &icl_aux_power_well_ops, 4411 .id = DISP_PW_ID_NONE, 4412 { 4413 .hsw.regs = &icl_aux_power_well_regs, 4414 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, 4415 .hsw.is_tc_tbt = true, 4416 }, 4417 }, 4418 { 4419 .name = "AUX TBT6", 4420 .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS, 4421 .ops = &icl_aux_power_well_ops, 4422 .id = DISP_PW_ID_NONE, 4423 { 4424 .hsw.regs = &icl_aux_power_well_regs, 4425 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, 4426 .hsw.is_tc_tbt = true, 4427 }, 4428 }, 4429 { 4430 .name = "power well 4", 4431 .domains = TGL_PW_4_POWER_DOMAINS, 4432 .ops = &hsw_power_well_ops, 4433 .id = DISP_PW_ID_NONE, 4434 { 4435 .hsw.regs = &hsw_power_well_regs, 4436 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4437 .hsw.has_fuses = true, 4438 .hsw.irq_pipe_mask = BIT(PIPE_C), 4439 } 4440 }, 4441 { 4442 .name = "power well 5", 4443 .domains = TGL_PW_5_POWER_DOMAINS, 4444 .ops = &hsw_power_well_ops, 4445 .id = DISP_PW_ID_NONE, 4446 { 4447 .hsw.regs = &hsw_power_well_regs, 4448 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4449 .hsw.has_fuses = true, 4450 .hsw.irq_pipe_mask = BIT(PIPE_D), 4451 }, 4452 }, 4453 }; 4454 4455 static const struct i915_power_well_desc rkl_power_wells[] = { 4456 { 4457 .name = "always-on", 4458 .always_on = true, 4459 .domains = POWER_DOMAIN_MASK, 4460 .ops = &i9xx_always_on_power_well_ops, 4461 .id = DISP_PW_ID_NONE, 4462 }, 4463 { 4464 .name = "power well 1", 4465 /* Handled by the DMC firmware */ 4466 .always_on = true, 4467 .domains = 0, 4468 .ops = &hsw_power_well_ops, 4469 .id = SKL_DISP_PW_1, 4470 { 4471 .hsw.regs = &hsw_power_well_regs, 4472 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4473 .hsw.has_fuses = true, 4474 }, 4475 }, 4476 { 4477 .name = "DC off", 4478 .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS, 4479 .ops = &gen9_dc_off_power_well_ops, 4480 .id = SKL_DISP_DC_OFF, 4481 }, 4482 { 4483 .name = "power well 3", 4484 .domains = RKL_PW_3_POWER_DOMAINS, 4485 .ops = &hsw_power_well_ops, 4486 .id = ICL_DISP_PW_3, 4487 { 4488 .hsw.regs = &hsw_power_well_regs, 4489 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4490 .hsw.irq_pipe_mask = BIT(PIPE_B), 4491 .hsw.has_vga = true, 4492 .hsw.has_fuses = true, 4493 }, 4494 }, 4495 { 4496 .name = "power well 4", 4497 .domains = RKL_PW_4_POWER_DOMAINS, 4498 .ops = &hsw_power_well_ops, 4499 .id = DISP_PW_ID_NONE, 4500 { 4501 .hsw.regs = &hsw_power_well_regs, 4502 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4503 .hsw.has_fuses = true, 4504 .hsw.irq_pipe_mask = BIT(PIPE_C), 4505 } 4506 }, 4507 { 4508 .name = "DDI A IO", 4509 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4510 .ops = &hsw_power_well_ops, 4511 .id = DISP_PW_ID_NONE, 4512 { 4513 .hsw.regs = &icl_ddi_power_well_regs, 4514 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4515 } 4516 }, 4517 { 4518 .name = "DDI B IO", 4519 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4520 .ops = &hsw_power_well_ops, 4521 .id = DISP_PW_ID_NONE, 4522 { 4523 .hsw.regs = &icl_ddi_power_well_regs, 4524 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4525 } 4526 }, 4527 { 4528 .name = "DDI IO TC1", 4529 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4530 .ops = &hsw_power_well_ops, 4531 .id = DISP_PW_ID_NONE, 4532 { 4533 .hsw.regs = &icl_ddi_power_well_regs, 4534 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4535 }, 4536 }, 4537 { 4538 .name = "DDI IO TC2", 4539 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4540 .ops = &hsw_power_well_ops, 4541 .id = DISP_PW_ID_NONE, 4542 { 4543 .hsw.regs = &icl_ddi_power_well_regs, 4544 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4545 }, 4546 }, 4547 { 4548 .name = "AUX A", 4549 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 4550 .ops = &icl_aux_power_well_ops, 4551 .id = DISP_PW_ID_NONE, 4552 { 4553 .hsw.regs = &icl_aux_power_well_regs, 4554 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4555 }, 4556 }, 4557 { 4558 .name = "AUX B", 4559 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 4560 .ops = &icl_aux_power_well_ops, 4561 .id = DISP_PW_ID_NONE, 4562 { 4563 .hsw.regs = &icl_aux_power_well_regs, 4564 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4565 }, 4566 }, 4567 { 4568 .name = "AUX USBC1", 4569 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4570 .ops = &icl_aux_power_well_ops, 4571 .id = DISP_PW_ID_NONE, 4572 { 4573 .hsw.regs = &icl_aux_power_well_regs, 4574 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4575 }, 4576 }, 4577 { 4578 .name = "AUX USBC2", 4579 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4580 .ops = &icl_aux_power_well_ops, 4581 .id = DISP_PW_ID_NONE, 4582 { 4583 .hsw.regs = &icl_aux_power_well_regs, 4584 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4585 }, 4586 }, 4587 }; 4588 4589 static const struct i915_power_well_desc dg1_power_wells[] = { 4590 { 4591 .name = "always-on", 4592 .always_on = true, 4593 .domains = POWER_DOMAIN_MASK, 4594 .ops = &i9xx_always_on_power_well_ops, 4595 .id = DISP_PW_ID_NONE, 4596 }, 4597 { 4598 .name = "power well 1", 4599 /* Handled by the DMC firmware */ 4600 .always_on = true, 4601 .domains = 0, 4602 .ops = &hsw_power_well_ops, 4603 .id = SKL_DISP_PW_1, 4604 { 4605 .hsw.regs = &hsw_power_well_regs, 4606 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4607 .hsw.has_fuses = true, 4608 }, 4609 }, 4610 { 4611 .name = "DC off", 4612 .domains = DG1_DISPLAY_DC_OFF_POWER_DOMAINS, 4613 .ops = &gen9_dc_off_power_well_ops, 4614 .id = SKL_DISP_DC_OFF, 4615 }, 4616 { 4617 .name = "power well 2", 4618 .domains = DG1_PW_2_POWER_DOMAINS, 4619 .ops = &hsw_power_well_ops, 4620 .id = SKL_DISP_PW_2, 4621 { 4622 .hsw.regs = &hsw_power_well_regs, 4623 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4624 .hsw.has_fuses = true, 4625 }, 4626 }, 4627 { 4628 .name = "power well 3", 4629 .domains = DG1_PW_3_POWER_DOMAINS, 4630 .ops = &hsw_power_well_ops, 4631 .id = ICL_DISP_PW_3, 4632 { 4633 .hsw.regs = &hsw_power_well_regs, 4634 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4635 .hsw.irq_pipe_mask = BIT(PIPE_B), 4636 .hsw.has_vga = true, 4637 .hsw.has_fuses = true, 4638 }, 4639 }, 4640 { 4641 .name = "DDI A IO", 4642 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4643 .ops = &hsw_power_well_ops, 4644 .id = DISP_PW_ID_NONE, 4645 { 4646 .hsw.regs = &icl_ddi_power_well_regs, 4647 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4648 } 4649 }, 4650 { 4651 .name = "DDI B IO", 4652 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4653 .ops = &hsw_power_well_ops, 4654 .id = DISP_PW_ID_NONE, 4655 { 4656 .hsw.regs = &icl_ddi_power_well_regs, 4657 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4658 } 4659 }, 4660 { 4661 .name = "DDI IO TC1", 4662 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4663 .ops = &hsw_power_well_ops, 4664 .id = DISP_PW_ID_NONE, 4665 { 4666 .hsw.regs = &icl_ddi_power_well_regs, 4667 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4668 }, 4669 }, 4670 { 4671 .name = "DDI IO TC2", 4672 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4673 .ops = &hsw_power_well_ops, 4674 .id = DISP_PW_ID_NONE, 4675 { 4676 .hsw.regs = &icl_ddi_power_well_regs, 4677 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4678 }, 4679 }, 4680 { 4681 .name = "AUX A", 4682 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 4683 .ops = &icl_aux_power_well_ops, 4684 .id = DISP_PW_ID_NONE, 4685 { 4686 .hsw.regs = &icl_aux_power_well_regs, 4687 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4688 }, 4689 }, 4690 { 4691 .name = "AUX B", 4692 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 4693 .ops = &icl_aux_power_well_ops, 4694 .id = DISP_PW_ID_NONE, 4695 { 4696 .hsw.regs = &icl_aux_power_well_regs, 4697 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4698 }, 4699 }, 4700 { 4701 .name = "AUX USBC1", 4702 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4703 .ops = &icl_aux_power_well_ops, 4704 .id = DISP_PW_ID_NONE, 4705 { 4706 .hsw.regs = &icl_aux_power_well_regs, 4707 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4708 .hsw.is_tc_tbt = false, 4709 }, 4710 }, 4711 { 4712 .name = "AUX USBC2", 4713 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4714 .ops = &icl_aux_power_well_ops, 4715 .id = DISP_PW_ID_NONE, 4716 { 4717 .hsw.regs = &icl_aux_power_well_regs, 4718 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4719 .hsw.is_tc_tbt = false, 4720 }, 4721 }, 4722 { 4723 .name = "power well 4", 4724 .domains = TGL_PW_4_POWER_DOMAINS, 4725 .ops = &hsw_power_well_ops, 4726 .id = DISP_PW_ID_NONE, 4727 { 4728 .hsw.regs = &hsw_power_well_regs, 4729 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4730 .hsw.has_fuses = true, 4731 .hsw.irq_pipe_mask = BIT(PIPE_C), 4732 } 4733 }, 4734 { 4735 .name = "power well 5", 4736 .domains = TGL_PW_5_POWER_DOMAINS, 4737 .ops = &hsw_power_well_ops, 4738 .id = DISP_PW_ID_NONE, 4739 { 4740 .hsw.regs = &hsw_power_well_regs, 4741 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4742 .hsw.has_fuses = true, 4743 .hsw.irq_pipe_mask = BIT(PIPE_D), 4744 }, 4745 }, 4746 }; 4747 4748 static const struct i915_power_well_desc xelpd_power_wells[] = { 4749 { 4750 .name = "always-on", 4751 .always_on = true, 4752 .domains = POWER_DOMAIN_MASK, 4753 .ops = &i9xx_always_on_power_well_ops, 4754 .id = DISP_PW_ID_NONE, 4755 }, 4756 { 4757 .name = "power well 1", 4758 /* Handled by the DMC firmware */ 4759 .always_on = true, 4760 .domains = 0, 4761 .ops = &hsw_power_well_ops, 4762 .id = SKL_DISP_PW_1, 4763 { 4764 .hsw.regs = &hsw_power_well_regs, 4765 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4766 .hsw.has_fuses = true, 4767 }, 4768 }, 4769 { 4770 .name = "DC off", 4771 .domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS, 4772 .ops = &gen9_dc_off_power_well_ops, 4773 .id = SKL_DISP_DC_OFF, 4774 }, 4775 { 4776 .name = "power well 2", 4777 .domains = XELPD_PW_2_POWER_DOMAINS, 4778 .ops = &hsw_power_well_ops, 4779 .id = SKL_DISP_PW_2, 4780 { 4781 .hsw.regs = &hsw_power_well_regs, 4782 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4783 .hsw.has_vga = true, 4784 .hsw.has_fuses = true, 4785 }, 4786 }, 4787 { 4788 .name = "power well A", 4789 .domains = XELPD_PW_A_POWER_DOMAINS, 4790 .ops = &hsw_power_well_ops, 4791 .id = DISP_PW_ID_NONE, 4792 { 4793 .hsw.regs = &hsw_power_well_regs, 4794 .hsw.idx = XELPD_PW_CTL_IDX_PW_A, 4795 .hsw.irq_pipe_mask = BIT(PIPE_A), 4796 .hsw.has_fuses = true, 4797 }, 4798 }, 4799 { 4800 .name = "power well B", 4801 .domains = XELPD_PW_B_POWER_DOMAINS, 4802 .ops = &hsw_power_well_ops, 4803 .id = DISP_PW_ID_NONE, 4804 { 4805 .hsw.regs = &hsw_power_well_regs, 4806 .hsw.idx = XELPD_PW_CTL_IDX_PW_B, 4807 .hsw.irq_pipe_mask = BIT(PIPE_B), 4808 .hsw.has_fuses = true, 4809 }, 4810 }, 4811 { 4812 .name = "power well C", 4813 .domains = XELPD_PW_C_POWER_DOMAINS, 4814 .ops = &hsw_power_well_ops, 4815 .id = DISP_PW_ID_NONE, 4816 { 4817 .hsw.regs = &hsw_power_well_regs, 4818 .hsw.idx = XELPD_PW_CTL_IDX_PW_C, 4819 .hsw.irq_pipe_mask = BIT(PIPE_C), 4820 .hsw.has_fuses = true, 4821 }, 4822 }, 4823 { 4824 .name = "power well D", 4825 .domains = XELPD_PW_D_POWER_DOMAINS, 4826 .ops = &hsw_power_well_ops, 4827 .id = DISP_PW_ID_NONE, 4828 { 4829 .hsw.regs = &hsw_power_well_regs, 4830 .hsw.idx = XELPD_PW_CTL_IDX_PW_D, 4831 .hsw.irq_pipe_mask = BIT(PIPE_D), 4832 .hsw.has_fuses = true, 4833 }, 4834 }, 4835 { 4836 .name = "DDI A IO", 4837 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4838 .ops = &hsw_power_well_ops, 4839 .id = DISP_PW_ID_NONE, 4840 { 4841 .hsw.regs = &icl_ddi_power_well_regs, 4842 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4843 } 4844 }, 4845 { 4846 .name = "DDI B IO", 4847 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4848 .ops = &hsw_power_well_ops, 4849 .id = DISP_PW_ID_NONE, 4850 { 4851 .hsw.regs = &icl_ddi_power_well_regs, 4852 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4853 } 4854 }, 4855 { 4856 .name = "DDI C IO", 4857 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 4858 .ops = &hsw_power_well_ops, 4859 .id = DISP_PW_ID_NONE, 4860 { 4861 .hsw.regs = &icl_ddi_power_well_regs, 4862 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 4863 } 4864 }, 4865 { 4866 .name = "DDI IO D_XELPD", 4867 .domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS, 4868 .ops = &hsw_power_well_ops, 4869 .id = DISP_PW_ID_NONE, 4870 { 4871 .hsw.regs = &icl_ddi_power_well_regs, 4872 .hsw.idx = XELPD_PW_CTL_IDX_DDI_D, 4873 } 4874 }, 4875 { 4876 .name = "DDI IO E_XELPD", 4877 .domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS, 4878 .ops = &hsw_power_well_ops, 4879 .id = DISP_PW_ID_NONE, 4880 { 4881 .hsw.regs = &icl_ddi_power_well_regs, 4882 .hsw.idx = XELPD_PW_CTL_IDX_DDI_E, 4883 } 4884 }, 4885 { 4886 .name = "DDI IO TC1", 4887 .domains = XELPD_DDI_IO_TC1_POWER_DOMAINS, 4888 .ops = &hsw_power_well_ops, 4889 .id = DISP_PW_ID_NONE, 4890 { 4891 .hsw.regs = &icl_ddi_power_well_regs, 4892 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4893 } 4894 }, 4895 { 4896 .name = "DDI IO TC2", 4897 .domains = XELPD_DDI_IO_TC2_POWER_DOMAINS, 4898 .ops = &hsw_power_well_ops, 4899 .id = DISP_PW_ID_NONE, 4900 { 4901 .hsw.regs = &icl_ddi_power_well_regs, 4902 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4903 } 4904 }, 4905 { 4906 .name = "DDI IO TC3", 4907 .domains = XELPD_DDI_IO_TC3_POWER_DOMAINS, 4908 .ops = &hsw_power_well_ops, 4909 .id = DISP_PW_ID_NONE, 4910 { 4911 .hsw.regs = &icl_ddi_power_well_regs, 4912 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 4913 } 4914 }, 4915 { 4916 .name = "DDI IO TC4", 4917 .domains = XELPD_DDI_IO_TC4_POWER_DOMAINS, 4918 .ops = &hsw_power_well_ops, 4919 .id = DISP_PW_ID_NONE, 4920 { 4921 .hsw.regs = &icl_ddi_power_well_regs, 4922 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 4923 } 4924 }, 4925 { 4926 .name = "AUX A", 4927 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 4928 .ops = &icl_aux_power_well_ops, 4929 .id = DISP_PW_ID_NONE, 4930 { 4931 .hsw.regs = &icl_aux_power_well_regs, 4932 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4933 .hsw.fixed_enable_delay = 600, 4934 }, 4935 }, 4936 { 4937 .name = "AUX B", 4938 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 4939 .ops = &icl_aux_power_well_ops, 4940 .id = DISP_PW_ID_NONE, 4941 { 4942 .hsw.regs = &icl_aux_power_well_regs, 4943 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4944 .hsw.fixed_enable_delay = 600, 4945 }, 4946 }, 4947 { 4948 .name = "AUX C", 4949 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 4950 .ops = &icl_aux_power_well_ops, 4951 .id = DISP_PW_ID_NONE, 4952 { 4953 .hsw.regs = &icl_aux_power_well_regs, 4954 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4955 .hsw.fixed_enable_delay = 600, 4956 }, 4957 }, 4958 { 4959 .name = "AUX D_XELPD", 4960 .domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS, 4961 .ops = &icl_aux_power_well_ops, 4962 .id = DISP_PW_ID_NONE, 4963 { 4964 .hsw.regs = &icl_aux_power_well_regs, 4965 .hsw.idx = XELPD_PW_CTL_IDX_AUX_D, 4966 .hsw.fixed_enable_delay = 600, 4967 }, 4968 }, 4969 { 4970 .name = "AUX E_XELPD", 4971 .domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS, 4972 .ops = &icl_aux_power_well_ops, 4973 .id = DISP_PW_ID_NONE, 4974 { 4975 .hsw.regs = &icl_aux_power_well_regs, 4976 .hsw.idx = XELPD_PW_CTL_IDX_AUX_E, 4977 }, 4978 }, 4979 { 4980 .name = "AUX USBC1", 4981 .domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS, 4982 .ops = &icl_aux_power_well_ops, 4983 .id = DISP_PW_ID_NONE, 4984 { 4985 .hsw.regs = &icl_aux_power_well_regs, 4986 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4987 .hsw.fixed_enable_delay = 600, 4988 }, 4989 }, 4990 { 4991 .name = "AUX USBC2", 4992 .domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS, 4993 .ops = &icl_aux_power_well_ops, 4994 .id = DISP_PW_ID_NONE, 4995 { 4996 .hsw.regs = &icl_aux_power_well_regs, 4997 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4998 }, 4999 }, 5000 { 5001 .name = "AUX USBC3", 5002 .domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS, 5003 .ops = &icl_aux_power_well_ops, 5004 .id = DISP_PW_ID_NONE, 5005 { 5006 .hsw.regs = &icl_aux_power_well_regs, 5007 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 5008 }, 5009 }, 5010 { 5011 .name = "AUX USBC4", 5012 .domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS, 5013 .ops = &icl_aux_power_well_ops, 5014 .id = DISP_PW_ID_NONE, 5015 { 5016 .hsw.regs = &icl_aux_power_well_regs, 5017 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 5018 }, 5019 }, 5020 { 5021 .name = "AUX TBT1", 5022 .domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS, 5023 .ops = &icl_aux_power_well_ops, 5024 .id = DISP_PW_ID_NONE, 5025 { 5026 .hsw.regs = &icl_aux_power_well_regs, 5027 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 5028 .hsw.is_tc_tbt = true, 5029 }, 5030 }, 5031 { 5032 .name = "AUX TBT2", 5033 .domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS, 5034 .ops = &icl_aux_power_well_ops, 5035 .id = DISP_PW_ID_NONE, 5036 { 5037 .hsw.regs = &icl_aux_power_well_regs, 5038 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 5039 .hsw.is_tc_tbt = true, 5040 }, 5041 }, 5042 { 5043 .name = "AUX TBT3", 5044 .domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS, 5045 .ops = &icl_aux_power_well_ops, 5046 .id = DISP_PW_ID_NONE, 5047 { 5048 .hsw.regs = &icl_aux_power_well_regs, 5049 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 5050 .hsw.is_tc_tbt = true, 5051 }, 5052 }, 5053 { 5054 .name = "AUX TBT4", 5055 .domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS, 5056 .ops = &icl_aux_power_well_ops, 5057 .id = DISP_PW_ID_NONE, 5058 { 5059 .hsw.regs = &icl_aux_power_well_regs, 5060 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 5061 .hsw.is_tc_tbt = true, 5062 }, 5063 }, 5064 }; 5065 5066 static int 5067 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 5068 int disable_power_well) 5069 { 5070 if (disable_power_well >= 0) 5071 return !!disable_power_well; 5072 5073 return 1; 5074 } 5075 5076 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 5077 int enable_dc) 5078 { 5079 u32 mask; 5080 int requested_dc; 5081 int max_dc; 5082 5083 if (!HAS_DISPLAY(dev_priv)) 5084 return 0; 5085 5086 if (IS_DG1(dev_priv)) 5087 max_dc = 3; 5088 else if (DISPLAY_VER(dev_priv) >= 12) 5089 max_dc = 4; 5090 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 5091 max_dc = 1; 5092 else if (DISPLAY_VER(dev_priv) >= 9) 5093 max_dc = 2; 5094 else 5095 max_dc = 0; 5096 5097 /* 5098 * DC9 has a separate HW flow from the rest of the DC states, 5099 * not depending on the DMC firmware. It's needed by system 5100 * suspend/resume, so allow it unconditionally. 5101 */ 5102 mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || 5103 DISPLAY_VER(dev_priv) >= 11 ? 5104 DC_STATE_EN_DC9 : 0; 5105 5106 if (!dev_priv->params.disable_power_well) 5107 max_dc = 0; 5108 5109 if (enable_dc >= 0 && enable_dc <= max_dc) { 5110 requested_dc = enable_dc; 5111 } else if (enable_dc == -1) { 5112 requested_dc = max_dc; 5113 } else if (enable_dc > max_dc && enable_dc <= 4) { 5114 drm_dbg_kms(&dev_priv->drm, 5115 "Adjusting requested max DC state (%d->%d)\n", 5116 enable_dc, max_dc); 5117 requested_dc = max_dc; 5118 } else { 5119 drm_err(&dev_priv->drm, 5120 "Unexpected value for enable_dc (%d)\n", enable_dc); 5121 requested_dc = max_dc; 5122 } 5123 5124 switch (requested_dc) { 5125 case 4: 5126 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 5127 break; 5128 case 3: 5129 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 5130 break; 5131 case 2: 5132 mask |= DC_STATE_EN_UPTO_DC6; 5133 break; 5134 case 1: 5135 mask |= DC_STATE_EN_UPTO_DC5; 5136 break; 5137 } 5138 5139 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); 5140 5141 return mask; 5142 } 5143 5144 static int 5145 __set_power_wells(struct i915_power_domains *power_domains, 5146 const struct i915_power_well_desc *power_well_descs, 5147 int power_well_descs_sz, u64 skip_mask) 5148 { 5149 struct drm_i915_private *i915 = container_of(power_domains, 5150 struct drm_i915_private, 5151 power_domains); 5152 u64 power_well_ids = 0; 5153 int power_well_count = 0; 5154 int i, plt_idx = 0; 5155 5156 for (i = 0; i < power_well_descs_sz; i++) 5157 if (!(BIT_ULL(power_well_descs[i].id) & skip_mask)) 5158 power_well_count++; 5159 5160 power_domains->power_well_count = power_well_count; 5161 power_domains->power_wells = 5162 kcalloc(power_well_count, 5163 sizeof(*power_domains->power_wells), 5164 GFP_KERNEL); 5165 if (!power_domains->power_wells) 5166 return -ENOMEM; 5167 5168 for (i = 0; i < power_well_descs_sz; i++) { 5169 enum i915_power_well_id id = power_well_descs[i].id; 5170 5171 if (BIT_ULL(id) & skip_mask) 5172 continue; 5173 5174 power_domains->power_wells[plt_idx++].desc = 5175 &power_well_descs[i]; 5176 5177 if (id == DISP_PW_ID_NONE) 5178 continue; 5179 5180 drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8); 5181 drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id)); 5182 power_well_ids |= BIT_ULL(id); 5183 } 5184 5185 return 0; 5186 } 5187 5188 #define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \ 5189 __set_power_wells(power_domains, __power_well_descs, \ 5190 ARRAY_SIZE(__power_well_descs), skip_mask) 5191 5192 #define set_power_wells(power_domains, __power_well_descs) \ 5193 set_power_wells_mask(power_domains, __power_well_descs, 0) 5194 5195 /** 5196 * intel_power_domains_init - initializes the power domain structures 5197 * @dev_priv: i915 device instance 5198 * 5199 * Initializes the power domain structures for @dev_priv depending upon the 5200 * supported platform. 5201 */ 5202 int intel_power_domains_init(struct drm_i915_private *dev_priv) 5203 { 5204 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5205 int err; 5206 5207 dev_priv->params.disable_power_well = 5208 sanitize_disable_power_well_option(dev_priv, 5209 dev_priv->params.disable_power_well); 5210 dev_priv->dmc.allowed_dc_mask = 5211 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); 5212 5213 dev_priv->dmc.target_dc_state = 5214 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 5215 5216 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); 5217 5218 mutex_init(&power_domains->lock); 5219 5220 INIT_DELAYED_WORK(&power_domains->async_put_work, 5221 intel_display_power_put_async_work); 5222 5223 /* 5224 * The enabling order will be from lower to higher indexed wells, 5225 * the disabling order is reversed. 5226 */ 5227 if (!HAS_DISPLAY(dev_priv)) { 5228 power_domains->power_well_count = 0; 5229 err = 0; 5230 } else if (DISPLAY_VER(dev_priv) >= 13) { 5231 err = set_power_wells(power_domains, xelpd_power_wells); 5232 } else if (IS_DG1(dev_priv)) { 5233 err = set_power_wells(power_domains, dg1_power_wells); 5234 } else if (IS_ALDERLAKE_S(dev_priv)) { 5235 err = set_power_wells_mask(power_domains, tgl_power_wells, 5236 BIT_ULL(TGL_DISP_PW_TC_COLD_OFF)); 5237 } else if (IS_ROCKETLAKE(dev_priv)) { 5238 err = set_power_wells(power_domains, rkl_power_wells); 5239 } else if (DISPLAY_VER(dev_priv) == 12) { 5240 err = set_power_wells(power_domains, tgl_power_wells); 5241 } else if (DISPLAY_VER(dev_priv) == 11) { 5242 err = set_power_wells(power_domains, icl_power_wells); 5243 } else if (IS_GEMINILAKE(dev_priv)) { 5244 err = set_power_wells(power_domains, glk_power_wells); 5245 } else if (IS_BROXTON(dev_priv)) { 5246 err = set_power_wells(power_domains, bxt_power_wells); 5247 } else if (DISPLAY_VER(dev_priv) == 9) { 5248 err = set_power_wells(power_domains, skl_power_wells); 5249 } else if (IS_CHERRYVIEW(dev_priv)) { 5250 err = set_power_wells(power_domains, chv_power_wells); 5251 } else if (IS_BROADWELL(dev_priv)) { 5252 err = set_power_wells(power_domains, bdw_power_wells); 5253 } else if (IS_HASWELL(dev_priv)) { 5254 err = set_power_wells(power_domains, hsw_power_wells); 5255 } else if (IS_VALLEYVIEW(dev_priv)) { 5256 err = set_power_wells(power_domains, vlv_power_wells); 5257 } else if (IS_I830(dev_priv)) { 5258 err = set_power_wells(power_domains, i830_power_wells); 5259 } else { 5260 err = set_power_wells(power_domains, i9xx_always_on_power_well); 5261 } 5262 5263 return err; 5264 } 5265 5266 /** 5267 * intel_power_domains_cleanup - clean up power domains resources 5268 * @dev_priv: i915 device instance 5269 * 5270 * Release any resources acquired by intel_power_domains_init() 5271 */ 5272 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 5273 { 5274 kfree(dev_priv->power_domains.power_wells); 5275 } 5276 5277 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 5278 { 5279 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5280 struct i915_power_well *power_well; 5281 5282 mutex_lock(&power_domains->lock); 5283 for_each_power_well(dev_priv, power_well) { 5284 power_well->desc->ops->sync_hw(dev_priv, power_well); 5285 power_well->hw_enabled = 5286 power_well->desc->ops->is_enabled(dev_priv, power_well); 5287 } 5288 mutex_unlock(&power_domains->lock); 5289 } 5290 5291 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, 5292 enum dbuf_slice slice, bool enable) 5293 { 5294 i915_reg_t reg = DBUF_CTL_S(slice); 5295 bool state; 5296 5297 intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST, 5298 enable ? DBUF_POWER_REQUEST : 0); 5299 intel_de_posting_read(dev_priv, reg); 5300 udelay(10); 5301 5302 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; 5303 drm_WARN(&dev_priv->drm, enable != state, 5304 "DBuf slice %d power %s timeout!\n", 5305 slice, enabledisable(enable)); 5306 } 5307 5308 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, 5309 u8 req_slices) 5310 { 5311 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5312 u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask; 5313 enum dbuf_slice slice; 5314 5315 drm_WARN(&dev_priv->drm, req_slices & ~slice_mask, 5316 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n", 5317 req_slices, slice_mask); 5318 5319 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", 5320 req_slices); 5321 5322 /* 5323 * Might be running this in parallel to gen9_dc_off_power_well_enable 5324 * being called from intel_dp_detect for instance, 5325 * which causes assertion triggered by race condition, 5326 * as gen9_assert_dbuf_enabled might preempt this when registers 5327 * were already updated, while dev_priv was not. 5328 */ 5329 mutex_lock(&power_domains->lock); 5330 5331 for_each_dbuf_slice(dev_priv, slice) 5332 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); 5333 5334 dev_priv->dbuf.enabled_slices = req_slices; 5335 5336 mutex_unlock(&power_domains->lock); 5337 } 5338 5339 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 5340 { 5341 dev_priv->dbuf.enabled_slices = 5342 intel_enabled_dbuf_slices_mask(dev_priv); 5343 5344 /* 5345 * Just power up at least 1 slice, we will 5346 * figure out later which slices we have and what we need. 5347 */ 5348 gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | 5349 dev_priv->dbuf.enabled_slices); 5350 } 5351 5352 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 5353 { 5354 gen9_dbuf_slices_update(dev_priv, 0); 5355 } 5356 5357 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) 5358 { 5359 enum dbuf_slice slice; 5360 5361 if (IS_ALDERLAKE_P(dev_priv)) 5362 return; 5363 5364 for_each_dbuf_slice(dev_priv, slice) 5365 intel_de_rmw(dev_priv, DBUF_CTL_S(slice), 5366 DBUF_TRACKER_STATE_SERVICE_MASK, 5367 DBUF_TRACKER_STATE_SERVICE(8)); 5368 } 5369 5370 static void icl_mbus_init(struct drm_i915_private *dev_priv) 5371 { 5372 unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask; 5373 u32 mask, val, i; 5374 5375 if (IS_ALDERLAKE_P(dev_priv)) 5376 return; 5377 5378 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | 5379 MBUS_ABOX_BT_CREDIT_POOL2_MASK | 5380 MBUS_ABOX_B_CREDIT_MASK | 5381 MBUS_ABOX_BW_CREDIT_MASK; 5382 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 5383 MBUS_ABOX_BT_CREDIT_POOL2(16) | 5384 MBUS_ABOX_B_CREDIT(1) | 5385 MBUS_ABOX_BW_CREDIT(1); 5386 5387 /* 5388 * gen12 platforms that use abox1 and abox2 for pixel data reads still 5389 * expect us to program the abox_ctl0 register as well, even though 5390 * we don't have to program other instance-0 registers like BW_BUDDY. 5391 */ 5392 if (DISPLAY_VER(dev_priv) == 12) 5393 abox_regs |= BIT(0); 5394 5395 for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) 5396 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val); 5397 } 5398 5399 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 5400 { 5401 u32 val = intel_de_read(dev_priv, LCPLL_CTL); 5402 5403 /* 5404 * The LCPLL register should be turned on by the BIOS. For now 5405 * let's just check its state and print errors in case 5406 * something is wrong. Don't even try to turn it on. 5407 */ 5408 5409 if (val & LCPLL_CD_SOURCE_FCLK) 5410 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); 5411 5412 if (val & LCPLL_PLL_DISABLE) 5413 drm_err(&dev_priv->drm, "LCPLL is disabled\n"); 5414 5415 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 5416 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); 5417 } 5418 5419 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 5420 { 5421 struct drm_device *dev = &dev_priv->drm; 5422 struct intel_crtc *crtc; 5423 5424 for_each_intel_crtc(dev, crtc) 5425 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 5426 pipe_name(crtc->pipe)); 5427 5428 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), 5429 "Display power well on\n"); 5430 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, 5431 "SPLL enabled\n"); 5432 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 5433 "WRPLL1 enabled\n"); 5434 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 5435 "WRPLL2 enabled\n"); 5436 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, 5437 "Panel power on\n"); 5438 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 5439 "CPU PWM1 enabled\n"); 5440 if (IS_HASWELL(dev_priv)) 5441 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 5442 "CPU PWM2 enabled\n"); 5443 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 5444 "PCH PWM1 enabled\n"); 5445 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 5446 "Utility pin enabled\n"); 5447 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, 5448 "PCH GTC enabled\n"); 5449 5450 /* 5451 * In theory we can still leave IRQs enabled, as long as only the HPD 5452 * interrupts remain enabled. We used to check for that, but since it's 5453 * gen-specific and since we only disable LCPLL after we fully disable 5454 * the interrupts, the check below should be enough. 5455 */ 5456 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 5457 } 5458 5459 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 5460 { 5461 if (IS_HASWELL(dev_priv)) 5462 return intel_de_read(dev_priv, D_COMP_HSW); 5463 else 5464 return intel_de_read(dev_priv, D_COMP_BDW); 5465 } 5466 5467 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 5468 { 5469 if (IS_HASWELL(dev_priv)) { 5470 if (snb_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) 5471 drm_dbg_kms(&dev_priv->drm, 5472 "Failed to write to D_COMP\n"); 5473 } else { 5474 intel_de_write(dev_priv, D_COMP_BDW, val); 5475 intel_de_posting_read(dev_priv, D_COMP_BDW); 5476 } 5477 } 5478 5479 /* 5480 * This function implements pieces of two sequences from BSpec: 5481 * - Sequence for display software to disable LCPLL 5482 * - Sequence for display software to allow package C8+ 5483 * The steps implemented here are just the steps that actually touch the LCPLL 5484 * register. Callers should take care of disabling all the display engine 5485 * functions, doing the mode unset, fixing interrupts, etc. 5486 */ 5487 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 5488 bool switch_to_fclk, bool allow_power_down) 5489 { 5490 u32 val; 5491 5492 assert_can_disable_lcpll(dev_priv); 5493 5494 val = intel_de_read(dev_priv, LCPLL_CTL); 5495 5496 if (switch_to_fclk) { 5497 val |= LCPLL_CD_SOURCE_FCLK; 5498 intel_de_write(dev_priv, LCPLL_CTL, val); 5499 5500 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & 5501 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 5502 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); 5503 5504 val = intel_de_read(dev_priv, LCPLL_CTL); 5505 } 5506 5507 val |= LCPLL_PLL_DISABLE; 5508 intel_de_write(dev_priv, LCPLL_CTL, val); 5509 intel_de_posting_read(dev_priv, LCPLL_CTL); 5510 5511 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 5512 drm_err(&dev_priv->drm, "LCPLL still locked\n"); 5513 5514 val = hsw_read_dcomp(dev_priv); 5515 val |= D_COMP_COMP_DISABLE; 5516 hsw_write_dcomp(dev_priv, val); 5517 ndelay(100); 5518 5519 if (wait_for((hsw_read_dcomp(dev_priv) & 5520 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 5521 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); 5522 5523 if (allow_power_down) { 5524 val = intel_de_read(dev_priv, LCPLL_CTL); 5525 val |= LCPLL_POWER_DOWN_ALLOW; 5526 intel_de_write(dev_priv, LCPLL_CTL, val); 5527 intel_de_posting_read(dev_priv, LCPLL_CTL); 5528 } 5529 } 5530 5531 /* 5532 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 5533 * source. 5534 */ 5535 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 5536 { 5537 u32 val; 5538 5539 val = intel_de_read(dev_priv, LCPLL_CTL); 5540 5541 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 5542 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 5543 return; 5544 5545 /* 5546 * Make sure we're not on PC8 state before disabling PC8, otherwise 5547 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 5548 */ 5549 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 5550 5551 if (val & LCPLL_POWER_DOWN_ALLOW) { 5552 val &= ~LCPLL_POWER_DOWN_ALLOW; 5553 intel_de_write(dev_priv, LCPLL_CTL, val); 5554 intel_de_posting_read(dev_priv, LCPLL_CTL); 5555 } 5556 5557 val = hsw_read_dcomp(dev_priv); 5558 val |= D_COMP_COMP_FORCE; 5559 val &= ~D_COMP_COMP_DISABLE; 5560 hsw_write_dcomp(dev_priv, val); 5561 5562 val = intel_de_read(dev_priv, LCPLL_CTL); 5563 val &= ~LCPLL_PLL_DISABLE; 5564 intel_de_write(dev_priv, LCPLL_CTL, val); 5565 5566 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 5567 drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); 5568 5569 if (val & LCPLL_CD_SOURCE_FCLK) { 5570 val = intel_de_read(dev_priv, LCPLL_CTL); 5571 val &= ~LCPLL_CD_SOURCE_FCLK; 5572 intel_de_write(dev_priv, LCPLL_CTL, val); 5573 5574 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & 5575 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 5576 drm_err(&dev_priv->drm, 5577 "Switching back to LCPLL failed\n"); 5578 } 5579 5580 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 5581 5582 intel_update_cdclk(dev_priv); 5583 intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); 5584 } 5585 5586 /* 5587 * Package states C8 and deeper are really deep PC states that can only be 5588 * reached when all the devices on the system allow it, so even if the graphics 5589 * device allows PC8+, it doesn't mean the system will actually get to these 5590 * states. Our driver only allows PC8+ when going into runtime PM. 5591 * 5592 * The requirements for PC8+ are that all the outputs are disabled, the power 5593 * well is disabled and most interrupts are disabled, and these are also 5594 * requirements for runtime PM. When these conditions are met, we manually do 5595 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 5596 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 5597 * hang the machine. 5598 * 5599 * When we really reach PC8 or deeper states (not just when we allow it) we lose 5600 * the state of some registers, so when we come back from PC8+ we need to 5601 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 5602 * need to take care of the registers kept by RC6. Notice that this happens even 5603 * if we don't put the device in PCI D3 state (which is what currently happens 5604 * because of the runtime PM support). 5605 * 5606 * For more, read "Display Sequences for Package C8" on the hardware 5607 * documentation. 5608 */ 5609 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 5610 { 5611 u32 val; 5612 5613 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); 5614 5615 if (HAS_PCH_LPT_LP(dev_priv)) { 5616 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5617 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 5618 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5619 } 5620 5621 lpt_disable_clkout_dp(dev_priv); 5622 hsw_disable_lcpll(dev_priv, true, true); 5623 } 5624 5625 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 5626 { 5627 u32 val; 5628 5629 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); 5630 5631 hsw_restore_lcpll(dev_priv); 5632 intel_init_pch_refclk(dev_priv); 5633 5634 if (HAS_PCH_LPT_LP(dev_priv)) { 5635 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5636 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 5637 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5638 } 5639 } 5640 5641 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 5642 bool enable) 5643 { 5644 i915_reg_t reg; 5645 u32 reset_bits, val; 5646 5647 if (IS_IVYBRIDGE(dev_priv)) { 5648 reg = GEN7_MSG_CTL; 5649 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 5650 } else { 5651 reg = HSW_NDE_RSTWRN_OPT; 5652 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 5653 } 5654 5655 val = intel_de_read(dev_priv, reg); 5656 5657 if (enable) 5658 val |= reset_bits; 5659 else 5660 val &= ~reset_bits; 5661 5662 intel_de_write(dev_priv, reg, val); 5663 } 5664 5665 static void skl_display_core_init(struct drm_i915_private *dev_priv, 5666 bool resume) 5667 { 5668 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5669 struct i915_power_well *well; 5670 5671 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5672 5673 /* enable PCH reset handshake */ 5674 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5675 5676 if (!HAS_DISPLAY(dev_priv)) 5677 return; 5678 5679 /* enable PG1 and Misc I/O */ 5680 mutex_lock(&power_domains->lock); 5681 5682 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5683 intel_power_well_enable(dev_priv, well); 5684 5685 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 5686 intel_power_well_enable(dev_priv, well); 5687 5688 mutex_unlock(&power_domains->lock); 5689 5690 intel_cdclk_init_hw(dev_priv); 5691 5692 gen9_dbuf_enable(dev_priv); 5693 5694 if (resume && intel_dmc_has_payload(dev_priv)) 5695 intel_dmc_load_program(dev_priv); 5696 } 5697 5698 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 5699 { 5700 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5701 struct i915_power_well *well; 5702 5703 if (!HAS_DISPLAY(dev_priv)) 5704 return; 5705 5706 gen9_disable_dc_states(dev_priv); 5707 5708 gen9_dbuf_disable(dev_priv); 5709 5710 intel_cdclk_uninit_hw(dev_priv); 5711 5712 /* The spec doesn't call for removing the reset handshake flag */ 5713 /* disable PG1 and Misc I/O */ 5714 5715 mutex_lock(&power_domains->lock); 5716 5717 /* 5718 * BSpec says to keep the MISC IO power well enabled here, only 5719 * remove our request for power well 1. 5720 * Note that even though the driver's request is removed power well 1 5721 * may stay enabled after this due to DMC's own request on it. 5722 */ 5723 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5724 intel_power_well_disable(dev_priv, well); 5725 5726 mutex_unlock(&power_domains->lock); 5727 5728 usleep_range(10, 30); /* 10 us delay per Bspec */ 5729 } 5730 5731 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 5732 { 5733 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5734 struct i915_power_well *well; 5735 5736 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5737 5738 /* 5739 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 5740 * or else the reset will hang because there is no PCH to respond. 5741 * Move the handshake programming to initialization sequence. 5742 * Previously was left up to BIOS. 5743 */ 5744 intel_pch_reset_handshake(dev_priv, false); 5745 5746 if (!HAS_DISPLAY(dev_priv)) 5747 return; 5748 5749 /* Enable PG1 */ 5750 mutex_lock(&power_domains->lock); 5751 5752 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5753 intel_power_well_enable(dev_priv, well); 5754 5755 mutex_unlock(&power_domains->lock); 5756 5757 intel_cdclk_init_hw(dev_priv); 5758 5759 gen9_dbuf_enable(dev_priv); 5760 5761 if (resume && intel_dmc_has_payload(dev_priv)) 5762 intel_dmc_load_program(dev_priv); 5763 } 5764 5765 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 5766 { 5767 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5768 struct i915_power_well *well; 5769 5770 if (!HAS_DISPLAY(dev_priv)) 5771 return; 5772 5773 gen9_disable_dc_states(dev_priv); 5774 5775 gen9_dbuf_disable(dev_priv); 5776 5777 intel_cdclk_uninit_hw(dev_priv); 5778 5779 /* The spec doesn't call for removing the reset handshake flag */ 5780 5781 /* 5782 * Disable PW1 (PG1). 5783 * Note that even though the driver's request is removed power well 1 5784 * may stay enabled after this due to DMC's own request on it. 5785 */ 5786 mutex_lock(&power_domains->lock); 5787 5788 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5789 intel_power_well_disable(dev_priv, well); 5790 5791 mutex_unlock(&power_domains->lock); 5792 5793 usleep_range(10, 30); /* 10 us delay per Bspec */ 5794 } 5795 5796 struct buddy_page_mask { 5797 u32 page_mask; 5798 u8 type; 5799 u8 num_channels; 5800 }; 5801 5802 static const struct buddy_page_mask tgl_buddy_page_masks[] = { 5803 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, 5804 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF }, 5805 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, 5806 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C }, 5807 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, 5808 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E }, 5809 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, 5810 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 }, 5811 {} 5812 }; 5813 5814 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { 5815 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, 5816 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, 5817 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 }, 5818 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 }, 5819 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, 5820 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, 5821 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 }, 5822 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 }, 5823 {} 5824 }; 5825 5826 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) 5827 { 5828 enum intel_dram_type type = dev_priv->dram_info.type; 5829 u8 num_channels = dev_priv->dram_info.num_channels; 5830 const struct buddy_page_mask *table; 5831 unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask; 5832 int config, i; 5833 5834 /* BW_BUDDY registers are not used on dgpu's beyond DG1 */ 5835 if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv)) 5836 return; 5837 5838 if (IS_ALDERLAKE_S(dev_priv) || 5839 IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 5840 IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 5841 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) 5842 /* Wa_1409767108:tgl,dg1,adl-s */ 5843 table = wa_1409767108_buddy_page_masks; 5844 else 5845 table = tgl_buddy_page_masks; 5846 5847 for (config = 0; table[config].page_mask != 0; config++) 5848 if (table[config].num_channels == num_channels && 5849 table[config].type == type) 5850 break; 5851 5852 if (table[config].page_mask == 0) { 5853 drm_dbg(&dev_priv->drm, 5854 "Unknown memory configuration; disabling address buddy logic.\n"); 5855 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) 5856 intel_de_write(dev_priv, BW_BUDDY_CTL(i), 5857 BW_BUDDY_DISABLE); 5858 } else { 5859 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { 5860 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i), 5861 table[config].page_mask); 5862 5863 /* Wa_22010178259:tgl,dg1,rkl,adl-s */ 5864 if (DISPLAY_VER(dev_priv) == 12) 5865 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i), 5866 BW_BUDDY_TLB_REQ_TIMER_MASK, 5867 BW_BUDDY_TLB_REQ_TIMER(0x8)); 5868 } 5869 } 5870 } 5871 5872 static void icl_display_core_init(struct drm_i915_private *dev_priv, 5873 bool resume) 5874 { 5875 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5876 struct i915_power_well *well; 5877 u32 val; 5878 5879 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5880 5881 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ 5882 if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP && 5883 INTEL_PCH_TYPE(dev_priv) < PCH_DG1) 5884 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0, 5885 PCH_DPMGUNIT_CLOCK_GATE_DISABLE); 5886 5887 /* 1. Enable PCH reset handshake. */ 5888 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5889 5890 if (!HAS_DISPLAY(dev_priv)) 5891 return; 5892 5893 /* 2. Initialize all combo phys */ 5894 intel_combo_phy_init(dev_priv); 5895 5896 /* 5897 * 3. Enable Power Well 1 (PG1). 5898 * The AUX IO power wells will be enabled on demand. 5899 */ 5900 mutex_lock(&power_domains->lock); 5901 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5902 intel_power_well_enable(dev_priv, well); 5903 mutex_unlock(&power_domains->lock); 5904 5905 /* 4. Enable CDCLK. */ 5906 intel_cdclk_init_hw(dev_priv); 5907 5908 if (DISPLAY_VER(dev_priv) >= 12) 5909 gen12_dbuf_slices_config(dev_priv); 5910 5911 /* 5. Enable DBUF. */ 5912 gen9_dbuf_enable(dev_priv); 5913 5914 /* 6. Setup MBUS. */ 5915 icl_mbus_init(dev_priv); 5916 5917 /* 7. Program arbiter BW_BUDDY registers */ 5918 if (DISPLAY_VER(dev_priv) >= 12) 5919 tgl_bw_buddy_init(dev_priv); 5920 5921 /* 8. Ensure PHYs have completed calibration and adaptation */ 5922 if (IS_DG2(dev_priv)) 5923 intel_snps_phy_wait_for_calibration(dev_priv); 5924 5925 if (resume && intel_dmc_has_payload(dev_priv)) 5926 intel_dmc_load_program(dev_priv); 5927 5928 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */ 5929 if (DISPLAY_VER(dev_priv) >= 12) { 5930 val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | 5931 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR; 5932 intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val); 5933 } 5934 5935 /* Wa_14011503030:xelpd */ 5936 if (DISPLAY_VER(dev_priv) >= 13) 5937 intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0); 5938 } 5939 5940 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 5941 { 5942 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5943 struct i915_power_well *well; 5944 5945 if (!HAS_DISPLAY(dev_priv)) 5946 return; 5947 5948 gen9_disable_dc_states(dev_priv); 5949 5950 /* 1. Disable all display engine functions -> aready done */ 5951 5952 /* 2. Disable DBUF */ 5953 gen9_dbuf_disable(dev_priv); 5954 5955 /* 3. Disable CD clock */ 5956 intel_cdclk_uninit_hw(dev_priv); 5957 5958 /* 5959 * 4. Disable Power Well 1 (PG1). 5960 * The AUX IO power wells are toggled on demand, so they are already 5961 * disabled at this point. 5962 */ 5963 mutex_lock(&power_domains->lock); 5964 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5965 intel_power_well_disable(dev_priv, well); 5966 mutex_unlock(&power_domains->lock); 5967 5968 /* 5. */ 5969 intel_combo_phy_uninit(dev_priv); 5970 } 5971 5972 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 5973 { 5974 struct i915_power_well *cmn_bc = 5975 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5976 struct i915_power_well *cmn_d = 5977 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 5978 5979 /* 5980 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 5981 * workaround never ever read DISPLAY_PHY_CONTROL, and 5982 * instead maintain a shadow copy ourselves. Use the actual 5983 * power well state and lane status to reconstruct the 5984 * expected initial value. 5985 */ 5986 dev_priv->chv_phy_control = 5987 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 5988 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 5989 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 5990 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 5991 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 5992 5993 /* 5994 * If all lanes are disabled we leave the override disabled 5995 * with all power down bits cleared to match the state we 5996 * would use after disabling the port. Otherwise enable the 5997 * override and set the lane powerdown bits accding to the 5998 * current lane status. 5999 */ 6000 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 6001 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)); 6002 unsigned int mask; 6003 6004 mask = status & DPLL_PORTB_READY_MASK; 6005 if (mask == 0xf) 6006 mask = 0x0; 6007 else 6008 dev_priv->chv_phy_control |= 6009 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 6010 6011 dev_priv->chv_phy_control |= 6012 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 6013 6014 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 6015 if (mask == 0xf) 6016 mask = 0x0; 6017 else 6018 dev_priv->chv_phy_control |= 6019 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 6020 6021 dev_priv->chv_phy_control |= 6022 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 6023 6024 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 6025 6026 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 6027 } else { 6028 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 6029 } 6030 6031 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 6032 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS); 6033 unsigned int mask; 6034 6035 mask = status & DPLL_PORTD_READY_MASK; 6036 6037 if (mask == 0xf) 6038 mask = 0x0; 6039 else 6040 dev_priv->chv_phy_control |= 6041 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 6042 6043 dev_priv->chv_phy_control |= 6044 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 6045 6046 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 6047 6048 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 6049 } else { 6050 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 6051 } 6052 6053 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", 6054 dev_priv->chv_phy_control); 6055 6056 /* Defer application of initial phy_control to enabling the powerwell */ 6057 } 6058 6059 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 6060 { 6061 struct i915_power_well *cmn = 6062 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 6063 struct i915_power_well *disp2d = 6064 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 6065 6066 /* If the display might be already active skip this */ 6067 if (cmn->desc->ops->is_enabled(dev_priv, cmn) && 6068 disp2d->desc->ops->is_enabled(dev_priv, disp2d) && 6069 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST) 6070 return; 6071 6072 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); 6073 6074 /* cmnlane needs DPLL registers */ 6075 disp2d->desc->ops->enable(dev_priv, disp2d); 6076 6077 /* 6078 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 6079 * Need to assert and de-assert PHY SB reset by gating the 6080 * common lane power, then un-gating it. 6081 * Simply ungating isn't enough to reset the PHY enough to get 6082 * ports and lanes running. 6083 */ 6084 cmn->desc->ops->disable(dev_priv, cmn); 6085 } 6086 6087 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 6088 { 6089 bool ret; 6090 6091 vlv_punit_get(dev_priv); 6092 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 6093 vlv_punit_put(dev_priv); 6094 6095 return ret; 6096 } 6097 6098 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 6099 { 6100 drm_WARN(&dev_priv->drm, 6101 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 6102 "VED not power gated\n"); 6103 } 6104 6105 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 6106 { 6107 static const struct pci_device_id isp_ids[] = { 6108 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 6109 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 6110 {} 6111 }; 6112 6113 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && 6114 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 6115 "ISP not power gated\n"); 6116 } 6117 6118 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 6119 6120 /** 6121 * intel_power_domains_init_hw - initialize hardware power domain state 6122 * @i915: i915 device instance 6123 * @resume: Called from resume code paths or not 6124 * 6125 * This function initializes the hardware power domain state and enables all 6126 * power wells belonging to the INIT power domain. Power wells in other 6127 * domains (and not in the INIT domain) are referenced or disabled by 6128 * intel_modeset_readout_hw_state(). After that the reference count of each 6129 * power well must match its HW enabled state, see 6130 * intel_power_domains_verify_state(). 6131 * 6132 * It will return with power domains disabled (to be enabled later by 6133 * intel_power_domains_enable()) and must be paired with 6134 * intel_power_domains_driver_remove(). 6135 */ 6136 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 6137 { 6138 struct i915_power_domains *power_domains = &i915->power_domains; 6139 6140 power_domains->initializing = true; 6141 6142 if (DISPLAY_VER(i915) >= 11) { 6143 icl_display_core_init(i915, resume); 6144 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6145 bxt_display_core_init(i915, resume); 6146 } else if (DISPLAY_VER(i915) == 9) { 6147 skl_display_core_init(i915, resume); 6148 } else if (IS_CHERRYVIEW(i915)) { 6149 mutex_lock(&power_domains->lock); 6150 chv_phy_control_init(i915); 6151 mutex_unlock(&power_domains->lock); 6152 assert_isp_power_gated(i915); 6153 } else if (IS_VALLEYVIEW(i915)) { 6154 mutex_lock(&power_domains->lock); 6155 vlv_cmnlane_wa(i915); 6156 mutex_unlock(&power_domains->lock); 6157 assert_ved_power_gated(i915); 6158 assert_isp_power_gated(i915); 6159 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 6160 hsw_assert_cdclk(i915); 6161 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 6162 } else if (IS_IVYBRIDGE(i915)) { 6163 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 6164 } 6165 6166 /* 6167 * Keep all power wells enabled for any dependent HW access during 6168 * initialization and to make sure we keep BIOS enabled display HW 6169 * resources powered until display HW readout is complete. We drop 6170 * this reference in intel_power_domains_enable(). 6171 */ 6172 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6173 power_domains->init_wakeref = 6174 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6175 6176 /* Disable power support if the user asked so. */ 6177 if (!i915->params.disable_power_well) { 6178 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); 6179 i915->power_domains.disable_wakeref = intel_display_power_get(i915, 6180 POWER_DOMAIN_INIT); 6181 } 6182 intel_power_domains_sync_hw(i915); 6183 6184 power_domains->initializing = false; 6185 } 6186 6187 /** 6188 * intel_power_domains_driver_remove - deinitialize hw power domain state 6189 * @i915: i915 device instance 6190 * 6191 * De-initializes the display power domain HW state. It also ensures that the 6192 * device stays powered up so that the driver can be reloaded. 6193 * 6194 * It must be called with power domains already disabled (after a call to 6195 * intel_power_domains_disable()) and must be paired with 6196 * intel_power_domains_init_hw(). 6197 */ 6198 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 6199 { 6200 intel_wakeref_t wakeref __maybe_unused = 6201 fetch_and_zero(&i915->power_domains.init_wakeref); 6202 6203 /* Remove the refcount we took to keep power well support disabled. */ 6204 if (!i915->params.disable_power_well) 6205 intel_display_power_put(i915, POWER_DOMAIN_INIT, 6206 fetch_and_zero(&i915->power_domains.disable_wakeref)); 6207 6208 intel_display_power_flush_work_sync(i915); 6209 6210 intel_power_domains_verify_state(i915); 6211 6212 /* Keep the power well enabled, but cancel its rpm wakeref. */ 6213 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 6214 } 6215 6216 /** 6217 * intel_power_domains_sanitize_state - sanitize power domains state 6218 * @i915: i915 device instance 6219 * 6220 * Sanitize the power domains state during driver loading and system resume. 6221 * The function will disable all display power wells that BIOS has enabled 6222 * without a user for it (any user for a power well has taken a reference 6223 * on it by the time this function is called, after the state of all the 6224 * pipe, encoder, etc. HW resources have been sanitized). 6225 */ 6226 void intel_power_domains_sanitize_state(struct drm_i915_private *i915) 6227 { 6228 struct i915_power_domains *power_domains = &i915->power_domains; 6229 struct i915_power_well *power_well; 6230 6231 mutex_lock(&power_domains->lock); 6232 6233 for_each_power_well_reverse(i915, power_well) { 6234 if (power_well->desc->always_on || power_well->count || 6235 !power_well->desc->ops->is_enabled(i915, power_well)) 6236 continue; 6237 6238 drm_dbg_kms(&i915->drm, 6239 "BIOS left unused %s power well enabled, disabling it\n", 6240 power_well->desc->name); 6241 intel_power_well_disable(i915, power_well); 6242 } 6243 6244 mutex_unlock(&power_domains->lock); 6245 } 6246 6247 /** 6248 * intel_power_domains_enable - enable toggling of display power wells 6249 * @i915: i915 device instance 6250 * 6251 * Enable the ondemand enabling/disabling of the display power wells. Note that 6252 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 6253 * only at specific points of the display modeset sequence, thus they are not 6254 * affected by the intel_power_domains_enable()/disable() calls. The purpose 6255 * of these function is to keep the rest of power wells enabled until the end 6256 * of display HW readout (which will acquire the power references reflecting 6257 * the current HW state). 6258 */ 6259 void intel_power_domains_enable(struct drm_i915_private *i915) 6260 { 6261 intel_wakeref_t wakeref __maybe_unused = 6262 fetch_and_zero(&i915->power_domains.init_wakeref); 6263 6264 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 6265 intel_power_domains_verify_state(i915); 6266 } 6267 6268 /** 6269 * intel_power_domains_disable - disable toggling of display power wells 6270 * @i915: i915 device instance 6271 * 6272 * Disable the ondemand enabling/disabling of the display power wells. See 6273 * intel_power_domains_enable() for which power wells this call controls. 6274 */ 6275 void intel_power_domains_disable(struct drm_i915_private *i915) 6276 { 6277 struct i915_power_domains *power_domains = &i915->power_domains; 6278 6279 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6280 power_domains->init_wakeref = 6281 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6282 6283 intel_power_domains_verify_state(i915); 6284 } 6285 6286 /** 6287 * intel_power_domains_suspend - suspend power domain state 6288 * @i915: i915 device instance 6289 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 6290 * 6291 * This function prepares the hardware power domain state before entering 6292 * system suspend. 6293 * 6294 * It must be called with power domains already disabled (after a call to 6295 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 6296 */ 6297 void intel_power_domains_suspend(struct drm_i915_private *i915, 6298 enum i915_drm_suspend_mode suspend_mode) 6299 { 6300 struct i915_power_domains *power_domains = &i915->power_domains; 6301 intel_wakeref_t wakeref __maybe_unused = 6302 fetch_and_zero(&power_domains->init_wakeref); 6303 6304 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 6305 6306 /* 6307 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 6308 * support don't manually deinit the power domains. This also means the 6309 * DMC firmware will stay active, it will power down any HW 6310 * resources as required and also enable deeper system power states 6311 * that would be blocked if the firmware was inactive. 6312 */ 6313 if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) && 6314 suspend_mode == I915_DRM_SUSPEND_IDLE && 6315 intel_dmc_has_payload(i915)) { 6316 intel_display_power_flush_work(i915); 6317 intel_power_domains_verify_state(i915); 6318 return; 6319 } 6320 6321 /* 6322 * Even if power well support was disabled we still want to disable 6323 * power wells if power domains must be deinitialized for suspend. 6324 */ 6325 if (!i915->params.disable_power_well) 6326 intel_display_power_put(i915, POWER_DOMAIN_INIT, 6327 fetch_and_zero(&i915->power_domains.disable_wakeref)); 6328 6329 intel_display_power_flush_work(i915); 6330 intel_power_domains_verify_state(i915); 6331 6332 if (DISPLAY_VER(i915) >= 11) 6333 icl_display_core_uninit(i915); 6334 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) 6335 bxt_display_core_uninit(i915); 6336 else if (DISPLAY_VER(i915) == 9) 6337 skl_display_core_uninit(i915); 6338 6339 power_domains->display_core_suspended = true; 6340 } 6341 6342 /** 6343 * intel_power_domains_resume - resume power domain state 6344 * @i915: i915 device instance 6345 * 6346 * This function resume the hardware power domain state during system resume. 6347 * 6348 * It will return with power domain support disabled (to be enabled later by 6349 * intel_power_domains_enable()) and must be paired with 6350 * intel_power_domains_suspend(). 6351 */ 6352 void intel_power_domains_resume(struct drm_i915_private *i915) 6353 { 6354 struct i915_power_domains *power_domains = &i915->power_domains; 6355 6356 if (power_domains->display_core_suspended) { 6357 intel_power_domains_init_hw(i915, true); 6358 power_domains->display_core_suspended = false; 6359 } else { 6360 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6361 power_domains->init_wakeref = 6362 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6363 } 6364 6365 intel_power_domains_verify_state(i915); 6366 } 6367 6368 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 6369 6370 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 6371 { 6372 struct i915_power_domains *power_domains = &i915->power_domains; 6373 struct i915_power_well *power_well; 6374 6375 for_each_power_well(i915, power_well) { 6376 enum intel_display_power_domain domain; 6377 6378 drm_dbg(&i915->drm, "%-25s %d\n", 6379 power_well->desc->name, power_well->count); 6380 6381 for_each_power_domain(domain, power_well->desc->domains) 6382 drm_dbg(&i915->drm, " %-23s %d\n", 6383 intel_display_power_domain_str(domain), 6384 power_domains->domain_use_count[domain]); 6385 } 6386 } 6387 6388 /** 6389 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 6390 * @i915: i915 device instance 6391 * 6392 * Verify if the reference count of each power well matches its HW enabled 6393 * state and the total refcount of the domains it belongs to. This must be 6394 * called after modeset HW state sanitization, which is responsible for 6395 * acquiring reference counts for any power wells in use and disabling the 6396 * ones left on by BIOS but not required by any active output. 6397 */ 6398 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 6399 { 6400 struct i915_power_domains *power_domains = &i915->power_domains; 6401 struct i915_power_well *power_well; 6402 bool dump_domain_info; 6403 6404 mutex_lock(&power_domains->lock); 6405 6406 verify_async_put_domains_state(power_domains); 6407 6408 dump_domain_info = false; 6409 for_each_power_well(i915, power_well) { 6410 enum intel_display_power_domain domain; 6411 int domains_count; 6412 bool enabled; 6413 6414 enabled = power_well->desc->ops->is_enabled(i915, power_well); 6415 if ((power_well->count || power_well->desc->always_on) != 6416 enabled) 6417 drm_err(&i915->drm, 6418 "power well %s state mismatch (refcount %d/enabled %d)", 6419 power_well->desc->name, 6420 power_well->count, enabled); 6421 6422 domains_count = 0; 6423 for_each_power_domain(domain, power_well->desc->domains) 6424 domains_count += power_domains->domain_use_count[domain]; 6425 6426 if (power_well->count != domains_count) { 6427 drm_err(&i915->drm, 6428 "power well %s refcount/domain refcount mismatch " 6429 "(refcount %d/domains refcount %d)\n", 6430 power_well->desc->name, power_well->count, 6431 domains_count); 6432 dump_domain_info = true; 6433 } 6434 } 6435 6436 if (dump_domain_info) { 6437 static bool dumped; 6438 6439 if (!dumped) { 6440 intel_power_domains_dump_info(i915); 6441 dumped = true; 6442 } 6443 } 6444 6445 mutex_unlock(&power_domains->lock); 6446 } 6447 6448 #else 6449 6450 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 6451 { 6452 } 6453 6454 #endif 6455 6456 void intel_display_power_suspend_late(struct drm_i915_private *i915) 6457 { 6458 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 6459 IS_BROXTON(i915)) { 6460 bxt_enable_dc9(i915); 6461 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6462 hsw_enable_pc8(i915); 6463 } 6464 6465 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 6466 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 6467 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); 6468 } 6469 6470 void intel_display_power_resume_early(struct drm_i915_private *i915) 6471 { 6472 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 6473 IS_BROXTON(i915)) { 6474 gen9_sanitize_dc_state(i915); 6475 bxt_disable_dc9(i915); 6476 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6477 hsw_disable_pc8(i915); 6478 } 6479 6480 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 6481 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 6482 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); 6483 } 6484 6485 void intel_display_power_suspend(struct drm_i915_private *i915) 6486 { 6487 if (DISPLAY_VER(i915) >= 11) { 6488 icl_display_core_uninit(i915); 6489 bxt_enable_dc9(i915); 6490 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6491 bxt_display_core_uninit(i915); 6492 bxt_enable_dc9(i915); 6493 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6494 hsw_enable_pc8(i915); 6495 } 6496 } 6497 6498 void intel_display_power_resume(struct drm_i915_private *i915) 6499 { 6500 if (DISPLAY_VER(i915) >= 11) { 6501 bxt_disable_dc9(i915); 6502 icl_display_core_init(i915, true); 6503 if (intel_dmc_has_payload(i915)) { 6504 if (i915->dmc.allowed_dc_mask & 6505 DC_STATE_EN_UPTO_DC6) 6506 skl_enable_dc6(i915); 6507 else if (i915->dmc.allowed_dc_mask & 6508 DC_STATE_EN_UPTO_DC5) 6509 gen9_enable_dc5(i915); 6510 } 6511 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6512 bxt_disable_dc9(i915); 6513 bxt_display_core_init(i915, true); 6514 if (intel_dmc_has_payload(i915) && 6515 (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 6516 gen9_enable_dc5(i915); 6517 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6518 hsw_disable_pc8(i915); 6519 } 6520 } 6521 6522 void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m) 6523 { 6524 struct i915_power_domains *power_domains = &i915->power_domains; 6525 int i; 6526 6527 mutex_lock(&power_domains->lock); 6528 6529 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 6530 for (i = 0; i < power_domains->power_well_count; i++) { 6531 struct i915_power_well *power_well; 6532 enum intel_display_power_domain power_domain; 6533 6534 power_well = &power_domains->power_wells[i]; 6535 seq_printf(m, "%-25s %d\n", power_well->desc->name, 6536 power_well->count); 6537 6538 for_each_power_domain(power_domain, power_well->desc->domains) 6539 seq_printf(m, " %-23s %d\n", 6540 intel_display_power_domain_str(power_domain), 6541 power_domains->domain_use_count[power_domain]); 6542 } 6543 6544 mutex_unlock(&power_domains->lock); 6545 } 6546