1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_irq.h" 8 #include "intel_cdclk.h" 9 #include "intel_combo_phy.h" 10 #include "intel_crt.h" 11 #include "intel_de.h" 12 #include "intel_display_power.h" 13 #include "intel_display_types.h" 14 #include "intel_dmc.h" 15 #include "intel_dpio_phy.h" 16 #include "intel_dpll.h" 17 #include "intel_hotplug.h" 18 #include "intel_pch_refclk.h" 19 #include "intel_pcode.h" 20 #include "intel_pm.h" 21 #include "intel_pps.h" 22 #include "intel_snps_phy.h" 23 #include "intel_tc.h" 24 #include "intel_vga.h" 25 #include "vlv_sideband.h" 26 27 struct i915_power_well_ops { 28 /* 29 * Synchronize the well's hw state to match the current sw state, for 30 * example enable/disable it based on the current refcount. Called 31 * during driver init and resume time, possibly after first calling 32 * the enable/disable handlers. 33 */ 34 void (*sync_hw)(struct drm_i915_private *dev_priv, 35 struct i915_power_well *power_well); 36 /* 37 * Enable the well and resources that depend on it (for example 38 * interrupts located on the well). Called after the 0->1 refcount 39 * transition. 40 */ 41 void (*enable)(struct drm_i915_private *dev_priv, 42 struct i915_power_well *power_well); 43 /* 44 * Disable the well and resources that depend on it. Called after 45 * the 1->0 refcount transition. 46 */ 47 void (*disable)(struct drm_i915_private *dev_priv, 48 struct i915_power_well *power_well); 49 /* Returns the hw enabled state. */ 50 bool (*is_enabled)(struct drm_i915_private *dev_priv, 51 struct i915_power_well *power_well); 52 }; 53 54 struct i915_power_well_regs { 55 i915_reg_t bios; 56 i915_reg_t driver; 57 i915_reg_t kvmr; 58 i915_reg_t debug; 59 }; 60 61 /* Power well structure for haswell */ 62 struct i915_power_well_desc { 63 const char *name; 64 bool always_on; 65 u64 domains; 66 /* unique identifier for this power well */ 67 enum i915_power_well_id id; 68 /* 69 * Arbitraty data associated with this power well. Platform and power 70 * well specific. 71 */ 72 union { 73 struct { 74 /* 75 * request/status flag index in the PUNIT power well 76 * control/status registers. 77 */ 78 u8 idx; 79 } vlv; 80 struct { 81 enum dpio_phy phy; 82 } bxt; 83 struct { 84 const struct i915_power_well_regs *regs; 85 /* 86 * request/status flag index in the power well 87 * constrol/status registers. 88 */ 89 u8 idx; 90 /* Mask of pipes whose IRQ logic is backed by the pw */ 91 u8 irq_pipe_mask; 92 /* 93 * Instead of waiting for the status bit to ack enables, 94 * just wait a specific amount of time and then consider 95 * the well enabled. 96 */ 97 u16 fixed_enable_delay; 98 /* The pw is backing the VGA functionality */ 99 bool has_vga:1; 100 bool has_fuses:1; 101 /* 102 * The pw is for an ICL+ TypeC PHY port in 103 * Thunderbolt mode. 104 */ 105 bool is_tc_tbt:1; 106 } hsw; 107 }; 108 const struct i915_power_well_ops *ops; 109 }; 110 111 struct i915_power_well { 112 const struct i915_power_well_desc *desc; 113 /* power well enable/disable usage count */ 114 int count; 115 /* cached hw enabled state */ 116 bool hw_enabled; 117 }; 118 119 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 120 enum i915_power_well_id power_well_id); 121 122 const char * 123 intel_display_power_domain_str(enum intel_display_power_domain domain) 124 { 125 switch (domain) { 126 case POWER_DOMAIN_DISPLAY_CORE: 127 return "DISPLAY_CORE"; 128 case POWER_DOMAIN_PIPE_A: 129 return "PIPE_A"; 130 case POWER_DOMAIN_PIPE_B: 131 return "PIPE_B"; 132 case POWER_DOMAIN_PIPE_C: 133 return "PIPE_C"; 134 case POWER_DOMAIN_PIPE_D: 135 return "PIPE_D"; 136 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 137 return "PIPE_A_PANEL_FITTER"; 138 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 139 return "PIPE_B_PANEL_FITTER"; 140 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 141 return "PIPE_C_PANEL_FITTER"; 142 case POWER_DOMAIN_PIPE_D_PANEL_FITTER: 143 return "PIPE_D_PANEL_FITTER"; 144 case POWER_DOMAIN_TRANSCODER_A: 145 return "TRANSCODER_A"; 146 case POWER_DOMAIN_TRANSCODER_B: 147 return "TRANSCODER_B"; 148 case POWER_DOMAIN_TRANSCODER_C: 149 return "TRANSCODER_C"; 150 case POWER_DOMAIN_TRANSCODER_D: 151 return "TRANSCODER_D"; 152 case POWER_DOMAIN_TRANSCODER_EDP: 153 return "TRANSCODER_EDP"; 154 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 155 return "TRANSCODER_VDSC_PW2"; 156 case POWER_DOMAIN_TRANSCODER_DSI_A: 157 return "TRANSCODER_DSI_A"; 158 case POWER_DOMAIN_TRANSCODER_DSI_C: 159 return "TRANSCODER_DSI_C"; 160 case POWER_DOMAIN_PORT_DDI_A_LANES: 161 return "PORT_DDI_A_LANES"; 162 case POWER_DOMAIN_PORT_DDI_B_LANES: 163 return "PORT_DDI_B_LANES"; 164 case POWER_DOMAIN_PORT_DDI_C_LANES: 165 return "PORT_DDI_C_LANES"; 166 case POWER_DOMAIN_PORT_DDI_D_LANES: 167 return "PORT_DDI_D_LANES"; 168 case POWER_DOMAIN_PORT_DDI_E_LANES: 169 return "PORT_DDI_E_LANES"; 170 case POWER_DOMAIN_PORT_DDI_F_LANES: 171 return "PORT_DDI_F_LANES"; 172 case POWER_DOMAIN_PORT_DDI_G_LANES: 173 return "PORT_DDI_G_LANES"; 174 case POWER_DOMAIN_PORT_DDI_H_LANES: 175 return "PORT_DDI_H_LANES"; 176 case POWER_DOMAIN_PORT_DDI_I_LANES: 177 return "PORT_DDI_I_LANES"; 178 case POWER_DOMAIN_PORT_DDI_A_IO: 179 return "PORT_DDI_A_IO"; 180 case POWER_DOMAIN_PORT_DDI_B_IO: 181 return "PORT_DDI_B_IO"; 182 case POWER_DOMAIN_PORT_DDI_C_IO: 183 return "PORT_DDI_C_IO"; 184 case POWER_DOMAIN_PORT_DDI_D_IO: 185 return "PORT_DDI_D_IO"; 186 case POWER_DOMAIN_PORT_DDI_E_IO: 187 return "PORT_DDI_E_IO"; 188 case POWER_DOMAIN_PORT_DDI_F_IO: 189 return "PORT_DDI_F_IO"; 190 case POWER_DOMAIN_PORT_DDI_G_IO: 191 return "PORT_DDI_G_IO"; 192 case POWER_DOMAIN_PORT_DDI_H_IO: 193 return "PORT_DDI_H_IO"; 194 case POWER_DOMAIN_PORT_DDI_I_IO: 195 return "PORT_DDI_I_IO"; 196 case POWER_DOMAIN_PORT_DSI: 197 return "PORT_DSI"; 198 case POWER_DOMAIN_PORT_CRT: 199 return "PORT_CRT"; 200 case POWER_DOMAIN_PORT_OTHER: 201 return "PORT_OTHER"; 202 case POWER_DOMAIN_VGA: 203 return "VGA"; 204 case POWER_DOMAIN_AUDIO_MMIO: 205 return "AUDIO_MMIO"; 206 case POWER_DOMAIN_AUDIO_PLAYBACK: 207 return "AUDIO_PLAYBACK"; 208 case POWER_DOMAIN_AUX_A: 209 return "AUX_A"; 210 case POWER_DOMAIN_AUX_B: 211 return "AUX_B"; 212 case POWER_DOMAIN_AUX_C: 213 return "AUX_C"; 214 case POWER_DOMAIN_AUX_D: 215 return "AUX_D"; 216 case POWER_DOMAIN_AUX_E: 217 return "AUX_E"; 218 case POWER_DOMAIN_AUX_F: 219 return "AUX_F"; 220 case POWER_DOMAIN_AUX_G: 221 return "AUX_G"; 222 case POWER_DOMAIN_AUX_H: 223 return "AUX_H"; 224 case POWER_DOMAIN_AUX_I: 225 return "AUX_I"; 226 case POWER_DOMAIN_AUX_IO_A: 227 return "AUX_IO_A"; 228 case POWER_DOMAIN_AUX_C_TBT: 229 return "AUX_C_TBT"; 230 case POWER_DOMAIN_AUX_D_TBT: 231 return "AUX_D_TBT"; 232 case POWER_DOMAIN_AUX_E_TBT: 233 return "AUX_E_TBT"; 234 case POWER_DOMAIN_AUX_F_TBT: 235 return "AUX_F_TBT"; 236 case POWER_DOMAIN_AUX_G_TBT: 237 return "AUX_G_TBT"; 238 case POWER_DOMAIN_AUX_H_TBT: 239 return "AUX_H_TBT"; 240 case POWER_DOMAIN_AUX_I_TBT: 241 return "AUX_I_TBT"; 242 case POWER_DOMAIN_GMBUS: 243 return "GMBUS"; 244 case POWER_DOMAIN_INIT: 245 return "INIT"; 246 case POWER_DOMAIN_MODESET: 247 return "MODESET"; 248 case POWER_DOMAIN_GT_IRQ: 249 return "GT_IRQ"; 250 case POWER_DOMAIN_DC_OFF: 251 return "DC_OFF"; 252 case POWER_DOMAIN_TC_COLD_OFF: 253 return "TC_COLD_OFF"; 254 default: 255 MISSING_CASE(domain); 256 return "?"; 257 } 258 } 259 260 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 261 struct i915_power_well *power_well) 262 { 263 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name); 264 power_well->desc->ops->enable(dev_priv, power_well); 265 power_well->hw_enabled = true; 266 } 267 268 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 269 struct i915_power_well *power_well) 270 { 271 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name); 272 power_well->hw_enabled = false; 273 power_well->desc->ops->disable(dev_priv, power_well); 274 } 275 276 static void intel_power_well_get(struct drm_i915_private *dev_priv, 277 struct i915_power_well *power_well) 278 { 279 if (!power_well->count++) 280 intel_power_well_enable(dev_priv, power_well); 281 } 282 283 static void intel_power_well_put(struct drm_i915_private *dev_priv, 284 struct i915_power_well *power_well) 285 { 286 drm_WARN(&dev_priv->drm, !power_well->count, 287 "Use count on power well %s is already zero", 288 power_well->desc->name); 289 290 if (!--power_well->count) 291 intel_power_well_disable(dev_priv, power_well); 292 } 293 294 /** 295 * __intel_display_power_is_enabled - unlocked check for a power domain 296 * @dev_priv: i915 device instance 297 * @domain: power domain to check 298 * 299 * This is the unlocked version of intel_display_power_is_enabled() and should 300 * only be used from error capture and recovery code where deadlocks are 301 * possible. 302 * 303 * Returns: 304 * True when the power domain is enabled, false otherwise. 305 */ 306 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 307 enum intel_display_power_domain domain) 308 { 309 struct i915_power_well *power_well; 310 bool is_enabled; 311 312 if (dev_priv->runtime_pm.suspended) 313 return false; 314 315 is_enabled = true; 316 317 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { 318 if (power_well->desc->always_on) 319 continue; 320 321 if (!power_well->hw_enabled) { 322 is_enabled = false; 323 break; 324 } 325 } 326 327 return is_enabled; 328 } 329 330 /** 331 * intel_display_power_is_enabled - check for a power domain 332 * @dev_priv: i915 device instance 333 * @domain: power domain to check 334 * 335 * This function can be used to check the hw power domain state. It is mostly 336 * used in hardware state readout functions. Everywhere else code should rely 337 * upon explicit power domain reference counting to ensure that the hardware 338 * block is powered up before accessing it. 339 * 340 * Callers must hold the relevant modesetting locks to ensure that concurrent 341 * threads can't disable the power well while the caller tries to read a few 342 * registers. 343 * 344 * Returns: 345 * True when the power domain is enabled, false otherwise. 346 */ 347 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 348 enum intel_display_power_domain domain) 349 { 350 struct i915_power_domains *power_domains; 351 bool ret; 352 353 power_domains = &dev_priv->power_domains; 354 355 mutex_lock(&power_domains->lock); 356 ret = __intel_display_power_is_enabled(dev_priv, domain); 357 mutex_unlock(&power_domains->lock); 358 359 return ret; 360 } 361 362 /* 363 * Starting with Haswell, we have a "Power Down Well" that can be turned off 364 * when not needed anymore. We have 4 registers that can request the power well 365 * to be enabled, and it will only be disabled if none of the registers is 366 * requesting it to be enabled. 367 */ 368 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, 369 u8 irq_pipe_mask, bool has_vga) 370 { 371 if (has_vga) 372 intel_vga_reset_io_mem(dev_priv); 373 374 if (irq_pipe_mask) 375 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); 376 } 377 378 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, 379 u8 irq_pipe_mask) 380 { 381 if (irq_pipe_mask) 382 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); 383 } 384 385 #define ICL_AUX_PW_TO_CH(pw_idx) \ 386 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 387 388 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 389 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 390 391 static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) 392 { 393 int pw_idx = power_well->desc->hsw.idx; 394 395 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 396 ICL_AUX_PW_TO_CH(pw_idx); 397 } 398 399 static struct intel_digital_port * 400 aux_ch_to_digital_port(struct drm_i915_private *dev_priv, 401 enum aux_ch aux_ch) 402 { 403 struct intel_digital_port *dig_port = NULL; 404 struct intel_encoder *encoder; 405 406 for_each_intel_encoder(&dev_priv->drm, encoder) { 407 /* We'll check the MST primary port */ 408 if (encoder->type == INTEL_OUTPUT_DP_MST) 409 continue; 410 411 dig_port = enc_to_dig_port(encoder); 412 if (!dig_port) 413 continue; 414 415 if (dig_port->aux_ch != aux_ch) { 416 dig_port = NULL; 417 continue; 418 } 419 420 break; 421 } 422 423 return dig_port; 424 } 425 426 static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915, 427 const struct i915_power_well *power_well) 428 { 429 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 430 struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); 431 432 return intel_port_to_phy(i915, dig_port->base.port); 433 } 434 435 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, 436 struct i915_power_well *power_well, 437 bool timeout_expected) 438 { 439 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 440 int pw_idx = power_well->desc->hsw.idx; 441 int enable_delay = power_well->desc->hsw.fixed_enable_delay; 442 443 /* 444 * For some power wells we're not supposed to watch the status bit for 445 * an ack, but rather just wait a fixed amount of time and then 446 * proceed. This is only used on DG2. 447 */ 448 if (IS_DG2(dev_priv) && enable_delay) { 449 usleep_range(enable_delay, 2 * enable_delay); 450 return; 451 } 452 453 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 454 if (intel_de_wait_for_set(dev_priv, regs->driver, 455 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { 456 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", 457 power_well->desc->name); 458 459 drm_WARN_ON(&dev_priv->drm, !timeout_expected); 460 461 } 462 } 463 464 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, 465 const struct i915_power_well_regs *regs, 466 int pw_idx) 467 { 468 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 469 u32 ret; 470 471 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; 472 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; 473 if (regs->kvmr.reg) 474 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; 475 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; 476 477 return ret; 478 } 479 480 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, 481 struct i915_power_well *power_well) 482 { 483 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 484 int pw_idx = power_well->desc->hsw.idx; 485 bool disabled; 486 u32 reqs; 487 488 /* 489 * Bspec doesn't require waiting for PWs to get disabled, but still do 490 * this for paranoia. The known cases where a PW will be forced on: 491 * - a KVMR request on any power well via the KVMR request register 492 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 493 * DEBUG request registers 494 * Skip the wait in case any of the request bits are set and print a 495 * diagnostic message. 496 */ 497 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & 498 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 499 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); 500 if (disabled) 501 return; 502 503 drm_dbg_kms(&dev_priv->drm, 504 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 505 power_well->desc->name, 506 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 507 } 508 509 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, 510 enum skl_power_gate pg) 511 { 512 /* Timeout 5us for PG#0, for other PGs 1us */ 513 drm_WARN_ON(&dev_priv->drm, 514 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, 515 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 516 } 517 518 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 519 struct i915_power_well *power_well) 520 { 521 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 522 int pw_idx = power_well->desc->hsw.idx; 523 u32 val; 524 525 if (power_well->desc->hsw.has_fuses) { 526 enum skl_power_gate pg; 527 528 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 529 SKL_PW_CTL_IDX_TO_PG(pw_idx); 530 531 /* Wa_16013190616:adlp */ 532 if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1) 533 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC); 534 535 /* 536 * For PW1 we have to wait both for the PW0/PG0 fuse state 537 * before enabling the power well and PW1/PG1's own fuse 538 * state after the enabling. For all other power wells with 539 * fuses we only have to wait for that PW/PG's fuse state 540 * after the enabling. 541 */ 542 if (pg == SKL_PG1) 543 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 544 } 545 546 val = intel_de_read(dev_priv, regs->driver); 547 intel_de_write(dev_priv, regs->driver, 548 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 549 550 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 551 552 if (power_well->desc->hsw.has_fuses) { 553 enum skl_power_gate pg; 554 555 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 556 SKL_PW_CTL_IDX_TO_PG(pw_idx); 557 gen9_wait_for_power_well_fuses(dev_priv, pg); 558 } 559 560 hsw_power_well_post_enable(dev_priv, 561 power_well->desc->hsw.irq_pipe_mask, 562 power_well->desc->hsw.has_vga); 563 } 564 565 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 566 struct i915_power_well *power_well) 567 { 568 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 569 int pw_idx = power_well->desc->hsw.idx; 570 u32 val; 571 572 hsw_power_well_pre_disable(dev_priv, 573 power_well->desc->hsw.irq_pipe_mask); 574 575 val = intel_de_read(dev_priv, regs->driver); 576 intel_de_write(dev_priv, regs->driver, 577 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 578 hsw_wait_for_power_well_disable(dev_priv, power_well); 579 } 580 581 static void 582 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 583 struct i915_power_well *power_well) 584 { 585 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 586 int pw_idx = power_well->desc->hsw.idx; 587 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 588 u32 val; 589 590 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 591 592 val = intel_de_read(dev_priv, regs->driver); 593 intel_de_write(dev_priv, regs->driver, 594 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 595 596 if (DISPLAY_VER(dev_priv) < 12) { 597 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 598 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 599 val | ICL_LANE_ENABLE_AUX); 600 } 601 602 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 603 604 /* Display WA #1178: icl */ 605 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && 606 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { 607 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); 608 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; 609 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); 610 } 611 } 612 613 static void 614 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 615 struct i915_power_well *power_well) 616 { 617 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 618 int pw_idx = power_well->desc->hsw.idx; 619 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 620 u32 val; 621 622 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 623 624 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 625 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 626 val & ~ICL_LANE_ENABLE_AUX); 627 628 val = intel_de_read(dev_priv, regs->driver); 629 intel_de_write(dev_priv, regs->driver, 630 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 631 632 hsw_wait_for_power_well_disable(dev_priv, power_well); 633 } 634 635 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 636 637 static u64 async_put_domains_mask(struct i915_power_domains *power_domains); 638 639 static int power_well_async_ref_count(struct drm_i915_private *dev_priv, 640 struct i915_power_well *power_well) 641 { 642 int refs = hweight64(power_well->desc->domains & 643 async_put_domains_mask(&dev_priv->power_domains)); 644 645 drm_WARN_ON(&dev_priv->drm, refs > power_well->count); 646 647 return refs; 648 } 649 650 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 651 struct i915_power_well *power_well, 652 struct intel_digital_port *dig_port) 653 { 654 /* Bypass the check if all references are released asynchronously */ 655 if (power_well_async_ref_count(dev_priv, power_well) == 656 power_well->count) 657 return; 658 659 if (drm_WARN_ON(&dev_priv->drm, !dig_port)) 660 return; 661 662 if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) 663 return; 664 665 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); 666 } 667 668 #else 669 670 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 671 struct i915_power_well *power_well, 672 struct intel_digital_port *dig_port) 673 { 674 } 675 676 #endif 677 678 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 679 680 static void icl_tc_cold_exit(struct drm_i915_private *i915) 681 { 682 int ret, tries = 0; 683 684 while (1) { 685 ret = sandybridge_pcode_write_timeout(i915, 686 ICL_PCODE_EXIT_TCCOLD, 687 0, 250, 1); 688 if (ret != -EAGAIN || ++tries == 3) 689 break; 690 msleep(1); 691 } 692 693 /* Spec states that TC cold exit can take up to 1ms to complete */ 694 if (!ret) 695 msleep(1); 696 697 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ 698 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : 699 "succeeded"); 700 } 701 702 static void 703 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 704 struct i915_power_well *power_well) 705 { 706 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 707 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 708 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 709 bool is_tbt = power_well->desc->hsw.is_tc_tbt; 710 bool timeout_expected; 711 u32 val; 712 713 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 714 715 val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); 716 val &= ~DP_AUX_CH_CTL_TBT_IO; 717 if (is_tbt) 718 val |= DP_AUX_CH_CTL_TBT_IO; 719 intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); 720 721 val = intel_de_read(dev_priv, regs->driver); 722 intel_de_write(dev_priv, regs->driver, 723 val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx)); 724 725 /* 726 * An AUX timeout is expected if the TBT DP tunnel is down, 727 * or need to enable AUX on a legacy TypeC port as part of the TC-cold 728 * exit sequence. 729 */ 730 timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); 731 if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) 732 icl_tc_cold_exit(dev_priv); 733 734 hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); 735 736 if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) { 737 enum tc_port tc_port; 738 739 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); 740 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 741 HIP_INDEX_VAL(tc_port, 0x2)); 742 743 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), 744 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 745 drm_warn(&dev_priv->drm, 746 "Timeout waiting TC uC health\n"); 747 } 748 } 749 750 static void 751 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 752 struct i915_power_well *power_well) 753 { 754 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 755 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 756 757 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 758 759 hsw_power_well_disable(dev_priv, power_well); 760 } 761 762 static void 763 icl_aux_power_well_enable(struct drm_i915_private *dev_priv, 764 struct i915_power_well *power_well) 765 { 766 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 767 768 if (intel_phy_is_tc(dev_priv, phy)) 769 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); 770 else if (IS_ICELAKE(dev_priv)) 771 return icl_combo_phy_aux_power_well_enable(dev_priv, 772 power_well); 773 else 774 return hsw_power_well_enable(dev_priv, power_well); 775 } 776 777 static void 778 icl_aux_power_well_disable(struct drm_i915_private *dev_priv, 779 struct i915_power_well *power_well) 780 { 781 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 782 783 if (intel_phy_is_tc(dev_priv, phy)) 784 return icl_tc_phy_aux_power_well_disable(dev_priv, power_well); 785 else if (IS_ICELAKE(dev_priv)) 786 return icl_combo_phy_aux_power_well_disable(dev_priv, 787 power_well); 788 else 789 return hsw_power_well_disable(dev_priv, power_well); 790 } 791 792 /* 793 * We should only use the power well if we explicitly asked the hardware to 794 * enable it, so check if it's enabled and also check if we've requested it to 795 * be enabled. 796 */ 797 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 798 struct i915_power_well *power_well) 799 { 800 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 801 enum i915_power_well_id id = power_well->desc->id; 802 int pw_idx = power_well->desc->hsw.idx; 803 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 804 HSW_PWR_WELL_CTL_STATE(pw_idx); 805 u32 val; 806 807 val = intel_de_read(dev_priv, regs->driver); 808 809 /* 810 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 811 * and the MISC_IO PW will be not restored, so check instead for the 812 * BIOS's own request bits, which are forced-on for these power wells 813 * when exiting DC5/6. 814 */ 815 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) && 816 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 817 val |= intel_de_read(dev_priv, regs->bios); 818 819 return (val & mask) == mask; 820 } 821 822 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 823 { 824 drm_WARN_ONCE(&dev_priv->drm, 825 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), 826 "DC9 already programmed to be enabled.\n"); 827 drm_WARN_ONCE(&dev_priv->drm, 828 intel_de_read(dev_priv, DC_STATE_EN) & 829 DC_STATE_EN_UPTO_DC5, 830 "DC5 still not disabled to enable DC9.\n"); 831 drm_WARN_ONCE(&dev_priv->drm, 832 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & 833 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 834 "Power well 2 on.\n"); 835 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 836 "Interrupts not disabled yet.\n"); 837 838 /* 839 * TODO: check for the following to verify the conditions to enter DC9 840 * state are satisfied: 841 * 1] Check relevant display engine registers to verify if mode set 842 * disable sequence was followed. 843 * 2] Check if display uninitialize sequence is initialized. 844 */ 845 } 846 847 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 848 { 849 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 850 "Interrupts not disabled yet.\n"); 851 drm_WARN_ONCE(&dev_priv->drm, 852 intel_de_read(dev_priv, DC_STATE_EN) & 853 DC_STATE_EN_UPTO_DC5, 854 "DC5 still not disabled.\n"); 855 856 /* 857 * TODO: check for the following to verify DC9 state was indeed 858 * entered before programming to disable it: 859 * 1] Check relevant display engine registers to verify if mode 860 * set disable sequence was followed. 861 * 2] Check if display uninitialize sequence is initialized. 862 */ 863 } 864 865 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 866 u32 state) 867 { 868 int rewrites = 0; 869 int rereads = 0; 870 u32 v; 871 872 intel_de_write(dev_priv, DC_STATE_EN, state); 873 874 /* It has been observed that disabling the dc6 state sometimes 875 * doesn't stick and dmc keeps returning old value. Make sure 876 * the write really sticks enough times and also force rewrite until 877 * we are confident that state is exactly what we want. 878 */ 879 do { 880 v = intel_de_read(dev_priv, DC_STATE_EN); 881 882 if (v != state) { 883 intel_de_write(dev_priv, DC_STATE_EN, state); 884 rewrites++; 885 rereads = 0; 886 } else if (rereads++ > 5) { 887 break; 888 } 889 890 } while (rewrites < 100); 891 892 if (v != state) 893 drm_err(&dev_priv->drm, 894 "Writing dc state to 0x%x failed, now 0x%x\n", 895 state, v); 896 897 /* Most of the times we need one retry, avoid spam */ 898 if (rewrites > 1) 899 drm_dbg_kms(&dev_priv->drm, 900 "Rewrote dc state to 0x%x %d times\n", 901 state, rewrites); 902 } 903 904 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 905 { 906 u32 mask; 907 908 mask = DC_STATE_EN_UPTO_DC5; 909 910 if (DISPLAY_VER(dev_priv) >= 12) 911 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 912 | DC_STATE_EN_DC9; 913 else if (DISPLAY_VER(dev_priv) == 11) 914 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 915 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 916 mask |= DC_STATE_EN_DC9; 917 else 918 mask |= DC_STATE_EN_UPTO_DC6; 919 920 return mask; 921 } 922 923 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 924 { 925 u32 val; 926 927 if (!HAS_DISPLAY(dev_priv)) 928 return; 929 930 val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); 931 932 drm_dbg_kms(&dev_priv->drm, 933 "Resetting DC state tracking from %02x to %02x\n", 934 dev_priv->dmc.dc_state, val); 935 dev_priv->dmc.dc_state = val; 936 } 937 938 /** 939 * gen9_set_dc_state - set target display C power state 940 * @dev_priv: i915 device instance 941 * @state: target DC power state 942 * - DC_STATE_DISABLE 943 * - DC_STATE_EN_UPTO_DC5 944 * - DC_STATE_EN_UPTO_DC6 945 * - DC_STATE_EN_DC9 946 * 947 * Signal to DMC firmware/HW the target DC power state passed in @state. 948 * DMC/HW can turn off individual display clocks and power rails when entering 949 * a deeper DC power state (higher in number) and turns these back when exiting 950 * that state to a shallower power state (lower in number). The HW will decide 951 * when to actually enter a given state on an on-demand basis, for instance 952 * depending on the active state of display pipes. The state of display 953 * registers backed by affected power rails are saved/restored as needed. 954 * 955 * Based on the above enabling a deeper DC power state is asynchronous wrt. 956 * enabling it. Disabling a deeper power state is synchronous: for instance 957 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 958 * back on and register state is restored. This is guaranteed by the MMIO write 959 * to DC_STATE_EN blocking until the state is restored. 960 */ 961 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) 962 { 963 u32 val; 964 u32 mask; 965 966 if (!HAS_DISPLAY(dev_priv)) 967 return; 968 969 if (drm_WARN_ON_ONCE(&dev_priv->drm, 970 state & ~dev_priv->dmc.allowed_dc_mask)) 971 state &= dev_priv->dmc.allowed_dc_mask; 972 973 val = intel_de_read(dev_priv, DC_STATE_EN); 974 mask = gen9_dc_mask(dev_priv); 975 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", 976 val & mask, state); 977 978 /* Check if DMC is ignoring our DC state requests */ 979 if ((val & mask) != dev_priv->dmc.dc_state) 980 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", 981 dev_priv->dmc.dc_state, val & mask); 982 983 val &= ~mask; 984 val |= state; 985 986 gen9_write_dc_state(dev_priv, val); 987 988 dev_priv->dmc.dc_state = val & mask; 989 } 990 991 static u32 992 sanitize_target_dc_state(struct drm_i915_private *dev_priv, 993 u32 target_dc_state) 994 { 995 static const u32 states[] = { 996 DC_STATE_EN_UPTO_DC6, 997 DC_STATE_EN_UPTO_DC5, 998 DC_STATE_EN_DC3CO, 999 DC_STATE_DISABLE, 1000 }; 1001 int i; 1002 1003 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 1004 if (target_dc_state != states[i]) 1005 continue; 1006 1007 if (dev_priv->dmc.allowed_dc_mask & target_dc_state) 1008 break; 1009 1010 target_dc_state = states[i + 1]; 1011 } 1012 1013 return target_dc_state; 1014 } 1015 1016 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) 1017 { 1018 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); 1019 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); 1020 } 1021 1022 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) 1023 { 1024 u32 val; 1025 1026 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); 1027 val = intel_de_read(dev_priv, DC_STATE_EN); 1028 val &= ~DC_STATE_DC3CO_STATUS; 1029 intel_de_write(dev_priv, DC_STATE_EN, val); 1030 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1031 /* 1032 * Delay of 200us DC3CO Exit time B.Spec 49196 1033 */ 1034 usleep_range(200, 210); 1035 } 1036 1037 static void bxt_enable_dc9(struct drm_i915_private *dev_priv) 1038 { 1039 assert_can_enable_dc9(dev_priv); 1040 1041 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); 1042 /* 1043 * Power sequencer reset is not needed on 1044 * platforms with South Display Engine on PCH, 1045 * because PPS registers are always on. 1046 */ 1047 if (!HAS_PCH_SPLIT(dev_priv)) 1048 intel_pps_reset_all(dev_priv); 1049 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 1050 } 1051 1052 static void bxt_disable_dc9(struct drm_i915_private *dev_priv) 1053 { 1054 assert_can_disable_dc9(dev_priv); 1055 1056 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); 1057 1058 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1059 1060 intel_pps_unlock_regs_wa(dev_priv); 1061 } 1062 1063 static void assert_dmc_loaded(struct drm_i915_private *dev_priv) 1064 { 1065 drm_WARN_ONCE(&dev_priv->drm, 1066 !intel_de_read(dev_priv, 1067 DMC_PROGRAM(dev_priv->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), 1068 "DMC program storage start is NULL\n"); 1069 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE), 1070 "DMC SSP Base Not fine\n"); 1071 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL), 1072 "DMC HTP Not fine\n"); 1073 } 1074 1075 static struct i915_power_well * 1076 lookup_power_well(struct drm_i915_private *dev_priv, 1077 enum i915_power_well_id power_well_id) 1078 { 1079 struct i915_power_well *power_well; 1080 1081 for_each_power_well(dev_priv, power_well) 1082 if (power_well->desc->id == power_well_id) 1083 return power_well; 1084 1085 /* 1086 * It's not feasible to add error checking code to the callers since 1087 * this condition really shouldn't happen and it doesn't even make sense 1088 * to abort things like display initialization sequences. Just return 1089 * the first power well and hope the WARN gets reported so we can fix 1090 * our driver. 1091 */ 1092 drm_WARN(&dev_priv->drm, 1, 1093 "Power well %d not defined for this platform\n", 1094 power_well_id); 1095 return &dev_priv->power_domains.power_wells[0]; 1096 } 1097 1098 /** 1099 * intel_display_power_set_target_dc_state - Set target dc state. 1100 * @dev_priv: i915 device 1101 * @state: state which needs to be set as target_dc_state. 1102 * 1103 * This function set the "DC off" power well target_dc_state, 1104 * based upon this target_dc_stste, "DC off" power well will 1105 * enable desired DC state. 1106 */ 1107 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, 1108 u32 state) 1109 { 1110 struct i915_power_well *power_well; 1111 bool dc_off_enabled; 1112 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1113 1114 mutex_lock(&power_domains->lock); 1115 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); 1116 1117 if (drm_WARN_ON(&dev_priv->drm, !power_well)) 1118 goto unlock; 1119 1120 state = sanitize_target_dc_state(dev_priv, state); 1121 1122 if (state == dev_priv->dmc.target_dc_state) 1123 goto unlock; 1124 1125 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv, 1126 power_well); 1127 /* 1128 * If DC off power well is disabled, need to enable and disable the 1129 * DC off power well to effect target DC state. 1130 */ 1131 if (!dc_off_enabled) 1132 power_well->desc->ops->enable(dev_priv, power_well); 1133 1134 dev_priv->dmc.target_dc_state = state; 1135 1136 if (!dc_off_enabled) 1137 power_well->desc->ops->disable(dev_priv, power_well); 1138 1139 unlock: 1140 mutex_unlock(&power_domains->lock); 1141 } 1142 1143 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 1144 { 1145 enum i915_power_well_id high_pg; 1146 1147 /* Power wells at this level and above must be disabled for DC5 entry */ 1148 if (DISPLAY_VER(dev_priv) == 12) 1149 high_pg = ICL_DISP_PW_3; 1150 else 1151 high_pg = SKL_DISP_PW_2; 1152 1153 drm_WARN_ONCE(&dev_priv->drm, 1154 intel_display_power_well_is_enabled(dev_priv, high_pg), 1155 "Power wells above platform's DC5 limit still enabled.\n"); 1156 1157 drm_WARN_ONCE(&dev_priv->drm, 1158 (intel_de_read(dev_priv, DC_STATE_EN) & 1159 DC_STATE_EN_UPTO_DC5), 1160 "DC5 already programmed to be enabled.\n"); 1161 assert_rpm_wakelock_held(&dev_priv->runtime_pm); 1162 1163 assert_dmc_loaded(dev_priv); 1164 } 1165 1166 static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 1167 { 1168 assert_can_enable_dc5(dev_priv); 1169 1170 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); 1171 1172 /* Wa Display #1183: skl,kbl,cfl */ 1173 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 1174 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1175 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1176 1177 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 1178 } 1179 1180 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 1181 { 1182 drm_WARN_ONCE(&dev_priv->drm, 1183 intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 1184 "Backlight is not disabled.\n"); 1185 drm_WARN_ONCE(&dev_priv->drm, 1186 (intel_de_read(dev_priv, DC_STATE_EN) & 1187 DC_STATE_EN_UPTO_DC6), 1188 "DC6 already programmed to be enabled.\n"); 1189 1190 assert_dmc_loaded(dev_priv); 1191 } 1192 1193 static void skl_enable_dc6(struct drm_i915_private *dev_priv) 1194 { 1195 assert_can_enable_dc6(dev_priv); 1196 1197 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); 1198 1199 /* Wa Display #1183: skl,kbl,cfl */ 1200 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 1201 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1202 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1203 1204 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 1205 } 1206 1207 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 1208 struct i915_power_well *power_well) 1209 { 1210 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 1211 int pw_idx = power_well->desc->hsw.idx; 1212 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 1213 u32 bios_req = intel_de_read(dev_priv, regs->bios); 1214 1215 /* Take over the request bit if set by BIOS. */ 1216 if (bios_req & mask) { 1217 u32 drv_req = intel_de_read(dev_priv, regs->driver); 1218 1219 if (!(drv_req & mask)) 1220 intel_de_write(dev_priv, regs->driver, drv_req | mask); 1221 intel_de_write(dev_priv, regs->bios, bios_req & ~mask); 1222 } 1223 } 1224 1225 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1226 struct i915_power_well *power_well) 1227 { 1228 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); 1229 } 1230 1231 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1232 struct i915_power_well *power_well) 1233 { 1234 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); 1235 } 1236 1237 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 1238 struct i915_power_well *power_well) 1239 { 1240 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); 1241 } 1242 1243 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 1244 { 1245 struct i915_power_well *power_well; 1246 1247 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); 1248 if (power_well->count > 0) 1249 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1250 1251 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1252 if (power_well->count > 0) 1253 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1254 1255 if (IS_GEMINILAKE(dev_priv)) { 1256 power_well = lookup_power_well(dev_priv, 1257 GLK_DISP_PW_DPIO_CMN_C); 1258 if (power_well->count > 0) 1259 bxt_ddi_phy_verify_state(dev_priv, 1260 power_well->desc->bxt.phy); 1261 } 1262 } 1263 1264 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 1265 struct i915_power_well *power_well) 1266 { 1267 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && 1268 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); 1269 } 1270 1271 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 1272 { 1273 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); 1274 u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices; 1275 1276 drm_WARN(&dev_priv->drm, 1277 hw_enabled_dbuf_slices != enabled_dbuf_slices, 1278 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", 1279 hw_enabled_dbuf_slices, 1280 enabled_dbuf_slices); 1281 } 1282 1283 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) 1284 { 1285 struct intel_cdclk_config cdclk_config = {}; 1286 1287 if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) { 1288 tgl_disable_dc3co(dev_priv); 1289 return; 1290 } 1291 1292 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1293 1294 if (!HAS_DISPLAY(dev_priv)) 1295 return; 1296 1297 intel_cdclk_get_cdclk(dev_priv, &cdclk_config); 1298 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 1299 drm_WARN_ON(&dev_priv->drm, 1300 intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, 1301 &cdclk_config)); 1302 1303 gen9_assert_dbuf_enabled(dev_priv); 1304 1305 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 1306 bxt_verify_ddi_phy_power_wells(dev_priv); 1307 1308 if (DISPLAY_VER(dev_priv) >= 11) 1309 /* 1310 * DMC retains HW context only for port A, the other combo 1311 * PHY's HW context for port B is lost after DC transitions, 1312 * so we need to restore it manually. 1313 */ 1314 intel_combo_phy_init(dev_priv); 1315 } 1316 1317 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 1318 struct i915_power_well *power_well) 1319 { 1320 gen9_disable_dc_states(dev_priv); 1321 } 1322 1323 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 1324 struct i915_power_well *power_well) 1325 { 1326 if (!intel_dmc_has_payload(dev_priv)) 1327 return; 1328 1329 switch (dev_priv->dmc.target_dc_state) { 1330 case DC_STATE_EN_DC3CO: 1331 tgl_enable_dc3co(dev_priv); 1332 break; 1333 case DC_STATE_EN_UPTO_DC6: 1334 skl_enable_dc6(dev_priv); 1335 break; 1336 case DC_STATE_EN_UPTO_DC5: 1337 gen9_enable_dc5(dev_priv); 1338 break; 1339 } 1340 } 1341 1342 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, 1343 struct i915_power_well *power_well) 1344 { 1345 } 1346 1347 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 1348 struct i915_power_well *power_well) 1349 { 1350 } 1351 1352 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 1353 struct i915_power_well *power_well) 1354 { 1355 return true; 1356 } 1357 1358 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 1359 struct i915_power_well *power_well) 1360 { 1361 if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) 1362 i830_enable_pipe(dev_priv, PIPE_A); 1363 if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) 1364 i830_enable_pipe(dev_priv, PIPE_B); 1365 } 1366 1367 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, 1368 struct i915_power_well *power_well) 1369 { 1370 i830_disable_pipe(dev_priv, PIPE_B); 1371 i830_disable_pipe(dev_priv, PIPE_A); 1372 } 1373 1374 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 1375 struct i915_power_well *power_well) 1376 { 1377 return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && 1378 intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; 1379 } 1380 1381 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, 1382 struct i915_power_well *power_well) 1383 { 1384 if (power_well->count > 0) 1385 i830_pipes_power_well_enable(dev_priv, power_well); 1386 else 1387 i830_pipes_power_well_disable(dev_priv, power_well); 1388 } 1389 1390 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 1391 struct i915_power_well *power_well, bool enable) 1392 { 1393 int pw_idx = power_well->desc->vlv.idx; 1394 u32 mask; 1395 u32 state; 1396 u32 ctrl; 1397 1398 mask = PUNIT_PWRGT_MASK(pw_idx); 1399 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1400 PUNIT_PWRGT_PWR_GATE(pw_idx); 1401 1402 vlv_punit_get(dev_priv); 1403 1404 #define COND \ 1405 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1406 1407 if (COND) 1408 goto out; 1409 1410 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 1411 ctrl &= ~mask; 1412 ctrl |= state; 1413 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1414 1415 if (wait_for(COND, 100)) 1416 drm_err(&dev_priv->drm, 1417 "timeout setting power well state %08x (%08x)\n", 1418 state, 1419 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1420 1421 #undef COND 1422 1423 out: 1424 vlv_punit_put(dev_priv); 1425 } 1426 1427 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1428 struct i915_power_well *power_well) 1429 { 1430 vlv_set_power_well(dev_priv, power_well, true); 1431 } 1432 1433 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1434 struct i915_power_well *power_well) 1435 { 1436 vlv_set_power_well(dev_priv, power_well, false); 1437 } 1438 1439 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1440 struct i915_power_well *power_well) 1441 { 1442 int pw_idx = power_well->desc->vlv.idx; 1443 bool enabled = false; 1444 u32 mask; 1445 u32 state; 1446 u32 ctrl; 1447 1448 mask = PUNIT_PWRGT_MASK(pw_idx); 1449 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1450 1451 vlv_punit_get(dev_priv); 1452 1453 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1454 /* 1455 * We only ever set the power-on and power-gate states, anything 1456 * else is unexpected. 1457 */ 1458 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1459 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1460 if (state == ctrl) 1461 enabled = true; 1462 1463 /* 1464 * A transient state at this point would mean some unexpected party 1465 * is poking at the power controls too. 1466 */ 1467 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1468 drm_WARN_ON(&dev_priv->drm, ctrl != state); 1469 1470 vlv_punit_put(dev_priv); 1471 1472 return enabled; 1473 } 1474 1475 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1476 { 1477 u32 val; 1478 1479 /* 1480 * On driver load, a pipe may be active and driving a DSI display. 1481 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1482 * (and never recovering) in this case. intel_dsi_post_disable() will 1483 * clear it when we turn off the display. 1484 */ 1485 val = intel_de_read(dev_priv, DSPCLK_GATE_D); 1486 val &= DPOUNIT_CLOCK_GATE_DISABLE; 1487 val |= VRHUNIT_CLOCK_GATE_DISABLE; 1488 intel_de_write(dev_priv, DSPCLK_GATE_D, val); 1489 1490 /* 1491 * Disable trickle feed and enable pnd deadline calculation 1492 */ 1493 intel_de_write(dev_priv, MI_ARB_VLV, 1494 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1495 intel_de_write(dev_priv, CBR1_VLV, 0); 1496 1497 drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); 1498 intel_de_write(dev_priv, RAWCLK_FREQ_VLV, 1499 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 1500 1000)); 1501 } 1502 1503 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1504 { 1505 struct intel_encoder *encoder; 1506 enum pipe pipe; 1507 1508 /* 1509 * Enable the CRI clock source so we can get at the 1510 * display and the reference clock for VGA 1511 * hotplug / manual detection. Supposedly DSI also 1512 * needs the ref clock up and running. 1513 * 1514 * CHV DPLL B/C have some issues if VGA mode is enabled. 1515 */ 1516 for_each_pipe(dev_priv, pipe) { 1517 u32 val = intel_de_read(dev_priv, DPLL(pipe)); 1518 1519 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1520 if (pipe != PIPE_A) 1521 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1522 1523 intel_de_write(dev_priv, DPLL(pipe), val); 1524 } 1525 1526 vlv_init_display_clock_gating(dev_priv); 1527 1528 spin_lock_irq(&dev_priv->irq_lock); 1529 valleyview_enable_display_irqs(dev_priv); 1530 spin_unlock_irq(&dev_priv->irq_lock); 1531 1532 /* 1533 * During driver initialization/resume we can avoid restoring the 1534 * part of the HW/SW state that will be inited anyway explicitly. 1535 */ 1536 if (dev_priv->power_domains.initializing) 1537 return; 1538 1539 intel_hpd_init(dev_priv); 1540 intel_hpd_poll_disable(dev_priv); 1541 1542 /* Re-enable the ADPA, if we have one */ 1543 for_each_intel_encoder(&dev_priv->drm, encoder) { 1544 if (encoder->type == INTEL_OUTPUT_ANALOG) 1545 intel_crt_reset(&encoder->base); 1546 } 1547 1548 intel_vga_redisable_power_on(dev_priv); 1549 1550 intel_pps_unlock_regs_wa(dev_priv); 1551 } 1552 1553 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1554 { 1555 spin_lock_irq(&dev_priv->irq_lock); 1556 valleyview_disable_display_irqs(dev_priv); 1557 spin_unlock_irq(&dev_priv->irq_lock); 1558 1559 /* make sure we're done processing display irqs */ 1560 intel_synchronize_irq(dev_priv); 1561 1562 intel_pps_reset_all(dev_priv); 1563 1564 /* Prevent us from re-enabling polling on accident in late suspend */ 1565 if (!dev_priv->drm.dev->power.is_suspended) 1566 intel_hpd_poll_enable(dev_priv); 1567 } 1568 1569 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1570 struct i915_power_well *power_well) 1571 { 1572 vlv_set_power_well(dev_priv, power_well, true); 1573 1574 vlv_display_power_well_init(dev_priv); 1575 } 1576 1577 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1578 struct i915_power_well *power_well) 1579 { 1580 vlv_display_power_well_deinit(dev_priv); 1581 1582 vlv_set_power_well(dev_priv, power_well, false); 1583 } 1584 1585 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1586 struct i915_power_well *power_well) 1587 { 1588 /* since ref/cri clock was enabled */ 1589 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1590 1591 vlv_set_power_well(dev_priv, power_well, true); 1592 1593 /* 1594 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1595 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1596 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1597 * b. The other bits such as sfr settings / modesel may all 1598 * be set to 0. 1599 * 1600 * This should only be done on init and resume from S3 with 1601 * both PLLs disabled, or we risk losing DPIO and PLL 1602 * synchronization. 1603 */ 1604 intel_de_write(dev_priv, DPIO_CTL, 1605 intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); 1606 } 1607 1608 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1609 struct i915_power_well *power_well) 1610 { 1611 enum pipe pipe; 1612 1613 for_each_pipe(dev_priv, pipe) 1614 assert_pll_disabled(dev_priv, pipe); 1615 1616 /* Assert common reset */ 1617 intel_de_write(dev_priv, DPIO_CTL, 1618 intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); 1619 1620 vlv_set_power_well(dev_priv, power_well, false); 1621 } 1622 1623 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 1624 1625 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1626 1627 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1628 { 1629 struct i915_power_well *cmn_bc = 1630 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1631 struct i915_power_well *cmn_d = 1632 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1633 u32 phy_control = dev_priv->chv_phy_control; 1634 u32 phy_status = 0; 1635 u32 phy_status_mask = 0xffffffff; 1636 1637 /* 1638 * The BIOS can leave the PHY is some weird state 1639 * where it doesn't fully power down some parts. 1640 * Disable the asserts until the PHY has been fully 1641 * reset (ie. the power well has been disabled at 1642 * least once). 1643 */ 1644 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1645 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1646 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1647 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1648 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1649 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1650 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1651 1652 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1653 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1654 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1655 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1656 1657 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 1658 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1659 1660 /* this assumes override is only used to enable lanes */ 1661 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1662 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1663 1664 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1665 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1666 1667 /* CL1 is on whenever anything is on in either channel */ 1668 if (BITS_SET(phy_control, 1669 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1670 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1671 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1672 1673 /* 1674 * The DPLLB check accounts for the pipe B + port A usage 1675 * with CL2 powered up but all the lanes in the second channel 1676 * powered down. 1677 */ 1678 if (BITS_SET(phy_control, 1679 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1680 (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1681 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1682 1683 if (BITS_SET(phy_control, 1684 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1685 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1686 if (BITS_SET(phy_control, 1687 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1688 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1689 1690 if (BITS_SET(phy_control, 1691 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1692 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1693 if (BITS_SET(phy_control, 1694 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1695 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1696 } 1697 1698 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 1699 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1700 1701 /* this assumes override is only used to enable lanes */ 1702 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1703 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1704 1705 if (BITS_SET(phy_control, 1706 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1707 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1708 1709 if (BITS_SET(phy_control, 1710 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1711 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1712 if (BITS_SET(phy_control, 1713 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1714 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1715 } 1716 1717 phy_status &= phy_status_mask; 1718 1719 /* 1720 * The PHY may be busy with some initial calibration and whatnot, 1721 * so the power state can take a while to actually change. 1722 */ 1723 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, 1724 phy_status_mask, phy_status, 10)) 1725 drm_err(&dev_priv->drm, 1726 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1727 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, 1728 phy_status, dev_priv->chv_phy_control); 1729 } 1730 1731 #undef BITS_SET 1732 1733 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1734 struct i915_power_well *power_well) 1735 { 1736 enum dpio_phy phy; 1737 enum pipe pipe; 1738 u32 tmp; 1739 1740 drm_WARN_ON_ONCE(&dev_priv->drm, 1741 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1742 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1743 1744 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1745 pipe = PIPE_A; 1746 phy = DPIO_PHY0; 1747 } else { 1748 pipe = PIPE_C; 1749 phy = DPIO_PHY1; 1750 } 1751 1752 /* since ref/cri clock was enabled */ 1753 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1754 vlv_set_power_well(dev_priv, power_well, true); 1755 1756 /* Poll for phypwrgood signal */ 1757 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, 1758 PHY_POWERGOOD(phy), 1)) 1759 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", 1760 phy); 1761 1762 vlv_dpio_get(dev_priv); 1763 1764 /* Enable dynamic power down */ 1765 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1766 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1767 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1768 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1769 1770 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1771 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1772 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1773 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1774 } else { 1775 /* 1776 * Force the non-existing CL2 off. BXT does this 1777 * too, so maybe it saves some power even though 1778 * CL2 doesn't exist? 1779 */ 1780 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1781 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1782 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1783 } 1784 1785 vlv_dpio_put(dev_priv); 1786 1787 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1788 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1789 dev_priv->chv_phy_control); 1790 1791 drm_dbg_kms(&dev_priv->drm, 1792 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1793 phy, dev_priv->chv_phy_control); 1794 1795 assert_chv_phy_status(dev_priv); 1796 } 1797 1798 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1799 struct i915_power_well *power_well) 1800 { 1801 enum dpio_phy phy; 1802 1803 drm_WARN_ON_ONCE(&dev_priv->drm, 1804 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1805 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1806 1807 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1808 phy = DPIO_PHY0; 1809 assert_pll_disabled(dev_priv, PIPE_A); 1810 assert_pll_disabled(dev_priv, PIPE_B); 1811 } else { 1812 phy = DPIO_PHY1; 1813 assert_pll_disabled(dev_priv, PIPE_C); 1814 } 1815 1816 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1817 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1818 dev_priv->chv_phy_control); 1819 1820 vlv_set_power_well(dev_priv, power_well, false); 1821 1822 drm_dbg_kms(&dev_priv->drm, 1823 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1824 phy, dev_priv->chv_phy_control); 1825 1826 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1827 dev_priv->chv_phy_assert[phy] = true; 1828 1829 assert_chv_phy_status(dev_priv); 1830 } 1831 1832 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1833 enum dpio_channel ch, bool override, unsigned int mask) 1834 { 1835 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1836 u32 reg, val, expected, actual; 1837 1838 /* 1839 * The BIOS can leave the PHY is some weird state 1840 * where it doesn't fully power down some parts. 1841 * Disable the asserts until the PHY has been fully 1842 * reset (ie. the power well has been disabled at 1843 * least once). 1844 */ 1845 if (!dev_priv->chv_phy_assert[phy]) 1846 return; 1847 1848 if (ch == DPIO_CH0) 1849 reg = _CHV_CMN_DW0_CH0; 1850 else 1851 reg = _CHV_CMN_DW6_CH1; 1852 1853 vlv_dpio_get(dev_priv); 1854 val = vlv_dpio_read(dev_priv, pipe, reg); 1855 vlv_dpio_put(dev_priv); 1856 1857 /* 1858 * This assumes !override is only used when the port is disabled. 1859 * All lanes should power down even without the override when 1860 * the port is disabled. 1861 */ 1862 if (!override || mask == 0xf) { 1863 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1864 /* 1865 * If CH1 common lane is not active anymore 1866 * (eg. for pipe B DPLL) the entire channel will 1867 * shut down, which causes the common lane registers 1868 * to read as 0. That means we can't actually check 1869 * the lane power down status bits, but as the entire 1870 * register reads as 0 it's a good indication that the 1871 * channel is indeed entirely powered down. 1872 */ 1873 if (ch == DPIO_CH1 && val == 0) 1874 expected = 0; 1875 } else if (mask != 0x0) { 1876 expected = DPIO_ANYDL_POWERDOWN; 1877 } else { 1878 expected = 0; 1879 } 1880 1881 if (ch == DPIO_CH0) 1882 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1883 else 1884 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1885 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1886 1887 drm_WARN(&dev_priv->drm, actual != expected, 1888 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1889 !!(actual & DPIO_ALLDL_POWERDOWN), 1890 !!(actual & DPIO_ANYDL_POWERDOWN), 1891 !!(expected & DPIO_ALLDL_POWERDOWN), 1892 !!(expected & DPIO_ANYDL_POWERDOWN), 1893 reg, val); 1894 } 1895 1896 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1897 enum dpio_channel ch, bool override) 1898 { 1899 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1900 bool was_override; 1901 1902 mutex_lock(&power_domains->lock); 1903 1904 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1905 1906 if (override == was_override) 1907 goto out; 1908 1909 if (override) 1910 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1911 else 1912 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1913 1914 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1915 dev_priv->chv_phy_control); 1916 1917 drm_dbg_kms(&dev_priv->drm, 1918 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1919 phy, ch, dev_priv->chv_phy_control); 1920 1921 assert_chv_phy_status(dev_priv); 1922 1923 out: 1924 mutex_unlock(&power_domains->lock); 1925 1926 return was_override; 1927 } 1928 1929 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1930 bool override, unsigned int mask) 1931 { 1932 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1933 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1934 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); 1935 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); 1936 1937 mutex_lock(&power_domains->lock); 1938 1939 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1940 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1941 1942 if (override) 1943 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1944 else 1945 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1946 1947 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1948 dev_priv->chv_phy_control); 1949 1950 drm_dbg_kms(&dev_priv->drm, 1951 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1952 phy, ch, mask, dev_priv->chv_phy_control); 1953 1954 assert_chv_phy_status(dev_priv); 1955 1956 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1957 1958 mutex_unlock(&power_domains->lock); 1959 } 1960 1961 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1962 struct i915_power_well *power_well) 1963 { 1964 enum pipe pipe = PIPE_A; 1965 bool enabled; 1966 u32 state, ctrl; 1967 1968 vlv_punit_get(dev_priv); 1969 1970 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1971 /* 1972 * We only ever set the power-on and power-gate states, anything 1973 * else is unexpected. 1974 */ 1975 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && 1976 state != DP_SSS_PWR_GATE(pipe)); 1977 enabled = state == DP_SSS_PWR_ON(pipe); 1978 1979 /* 1980 * A transient state at this point would mean some unexpected party 1981 * is poking at the power controls too. 1982 */ 1983 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1984 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); 1985 1986 vlv_punit_put(dev_priv); 1987 1988 return enabled; 1989 } 1990 1991 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1992 struct i915_power_well *power_well, 1993 bool enable) 1994 { 1995 enum pipe pipe = PIPE_A; 1996 u32 state; 1997 u32 ctrl; 1998 1999 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 2000 2001 vlv_punit_get(dev_priv); 2002 2003 #define COND \ 2004 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 2005 2006 if (COND) 2007 goto out; 2008 2009 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 2010 ctrl &= ~DP_SSC_MASK(pipe); 2011 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 2012 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); 2013 2014 if (wait_for(COND, 100)) 2015 drm_err(&dev_priv->drm, 2016 "timeout setting power well state %08x (%08x)\n", 2017 state, 2018 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); 2019 2020 #undef COND 2021 2022 out: 2023 vlv_punit_put(dev_priv); 2024 } 2025 2026 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 2027 struct i915_power_well *power_well) 2028 { 2029 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 2030 dev_priv->chv_phy_control); 2031 } 2032 2033 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 2034 struct i915_power_well *power_well) 2035 { 2036 chv_set_pipe_power_well(dev_priv, power_well, true); 2037 2038 vlv_display_power_well_init(dev_priv); 2039 } 2040 2041 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 2042 struct i915_power_well *power_well) 2043 { 2044 vlv_display_power_well_deinit(dev_priv); 2045 2046 chv_set_pipe_power_well(dev_priv, power_well, false); 2047 } 2048 2049 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) 2050 { 2051 return power_domains->async_put_domains[0] | 2052 power_domains->async_put_domains[1]; 2053 } 2054 2055 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2056 2057 static bool 2058 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 2059 { 2060 struct drm_i915_private *i915 = container_of(power_domains, 2061 struct drm_i915_private, 2062 power_domains); 2063 return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] & 2064 power_domains->async_put_domains[1]); 2065 } 2066 2067 static bool 2068 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 2069 { 2070 struct drm_i915_private *i915 = container_of(power_domains, 2071 struct drm_i915_private, 2072 power_domains); 2073 enum intel_display_power_domain domain; 2074 bool err = false; 2075 2076 err |= !assert_async_put_domain_masks_disjoint(power_domains); 2077 err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref != 2078 !!__async_put_domains_mask(power_domains)); 2079 2080 for_each_power_domain(domain, __async_put_domains_mask(power_domains)) 2081 err |= drm_WARN_ON(&i915->drm, 2082 power_domains->domain_use_count[domain] != 1); 2083 2084 return !err; 2085 } 2086 2087 static void print_power_domains(struct i915_power_domains *power_domains, 2088 const char *prefix, u64 mask) 2089 { 2090 struct drm_i915_private *i915 = container_of(power_domains, 2091 struct drm_i915_private, 2092 power_domains); 2093 enum intel_display_power_domain domain; 2094 2095 drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask)); 2096 for_each_power_domain(domain, mask) 2097 drm_dbg(&i915->drm, "%s use_count %d\n", 2098 intel_display_power_domain_str(domain), 2099 power_domains->domain_use_count[domain]); 2100 } 2101 2102 static void 2103 print_async_put_domains_state(struct i915_power_domains *power_domains) 2104 { 2105 struct drm_i915_private *i915 = container_of(power_domains, 2106 struct drm_i915_private, 2107 power_domains); 2108 2109 drm_dbg(&i915->drm, "async_put_wakeref %u\n", 2110 power_domains->async_put_wakeref); 2111 2112 print_power_domains(power_domains, "async_put_domains[0]", 2113 power_domains->async_put_domains[0]); 2114 print_power_domains(power_domains, "async_put_domains[1]", 2115 power_domains->async_put_domains[1]); 2116 } 2117 2118 static void 2119 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2120 { 2121 if (!__async_put_domains_state_ok(power_domains)) 2122 print_async_put_domains_state(power_domains); 2123 } 2124 2125 #else 2126 2127 static void 2128 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 2129 { 2130 } 2131 2132 static void 2133 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2134 { 2135 } 2136 2137 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 2138 2139 static u64 async_put_domains_mask(struct i915_power_domains *power_domains) 2140 { 2141 assert_async_put_domain_masks_disjoint(power_domains); 2142 2143 return __async_put_domains_mask(power_domains); 2144 } 2145 2146 static void 2147 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 2148 enum intel_display_power_domain domain) 2149 { 2150 assert_async_put_domain_masks_disjoint(power_domains); 2151 2152 power_domains->async_put_domains[0] &= ~BIT_ULL(domain); 2153 power_domains->async_put_domains[1] &= ~BIT_ULL(domain); 2154 } 2155 2156 static bool 2157 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 2158 enum intel_display_power_domain domain) 2159 { 2160 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2161 bool ret = false; 2162 2163 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) 2164 goto out_verify; 2165 2166 async_put_domains_clear_domain(power_domains, domain); 2167 2168 ret = true; 2169 2170 if (async_put_domains_mask(power_domains)) 2171 goto out_verify; 2172 2173 cancel_delayed_work(&power_domains->async_put_work); 2174 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 2175 fetch_and_zero(&power_domains->async_put_wakeref)); 2176 out_verify: 2177 verify_async_put_domains_state(power_domains); 2178 2179 return ret; 2180 } 2181 2182 static void 2183 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 2184 enum intel_display_power_domain domain) 2185 { 2186 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2187 struct i915_power_well *power_well; 2188 2189 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 2190 return; 2191 2192 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) 2193 intel_power_well_get(dev_priv, power_well); 2194 2195 power_domains->domain_use_count[domain]++; 2196 } 2197 2198 /** 2199 * intel_display_power_get - grab a power domain reference 2200 * @dev_priv: i915 device instance 2201 * @domain: power domain to reference 2202 * 2203 * This function grabs a power domain reference for @domain and ensures that the 2204 * power domain and all its parents are powered up. Therefore users should only 2205 * grab a reference to the innermost power domain they need. 2206 * 2207 * Any power domain reference obtained by this function must have a symmetric 2208 * call to intel_display_power_put() to release the reference again. 2209 */ 2210 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 2211 enum intel_display_power_domain domain) 2212 { 2213 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2214 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2215 2216 mutex_lock(&power_domains->lock); 2217 __intel_display_power_get_domain(dev_priv, domain); 2218 mutex_unlock(&power_domains->lock); 2219 2220 return wakeref; 2221 } 2222 2223 /** 2224 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 2225 * @dev_priv: i915 device instance 2226 * @domain: power domain to reference 2227 * 2228 * This function grabs a power domain reference for @domain and ensures that the 2229 * power domain and all its parents are powered up. Therefore users should only 2230 * grab a reference to the innermost power domain they need. 2231 * 2232 * Any power domain reference obtained by this function must have a symmetric 2233 * call to intel_display_power_put() to release the reference again. 2234 */ 2235 intel_wakeref_t 2236 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 2237 enum intel_display_power_domain domain) 2238 { 2239 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2240 intel_wakeref_t wakeref; 2241 bool is_enabled; 2242 2243 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 2244 if (!wakeref) 2245 return false; 2246 2247 mutex_lock(&power_domains->lock); 2248 2249 if (__intel_display_power_is_enabled(dev_priv, domain)) { 2250 __intel_display_power_get_domain(dev_priv, domain); 2251 is_enabled = true; 2252 } else { 2253 is_enabled = false; 2254 } 2255 2256 mutex_unlock(&power_domains->lock); 2257 2258 if (!is_enabled) { 2259 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2260 wakeref = 0; 2261 } 2262 2263 return wakeref; 2264 } 2265 2266 static void 2267 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 2268 enum intel_display_power_domain domain) 2269 { 2270 struct i915_power_domains *power_domains; 2271 struct i915_power_well *power_well; 2272 const char *name = intel_display_power_domain_str(domain); 2273 2274 power_domains = &dev_priv->power_domains; 2275 2276 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], 2277 "Use count on domain %s is already zero\n", 2278 name); 2279 drm_WARN(&dev_priv->drm, 2280 async_put_domains_mask(power_domains) & BIT_ULL(domain), 2281 "Async disabling of domain %s is pending\n", 2282 name); 2283 2284 power_domains->domain_use_count[domain]--; 2285 2286 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) 2287 intel_power_well_put(dev_priv, power_well); 2288 } 2289 2290 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 2291 enum intel_display_power_domain domain) 2292 { 2293 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2294 2295 mutex_lock(&power_domains->lock); 2296 __intel_display_power_put_domain(dev_priv, domain); 2297 mutex_unlock(&power_domains->lock); 2298 } 2299 2300 static void 2301 queue_async_put_domains_work(struct i915_power_domains *power_domains, 2302 intel_wakeref_t wakeref) 2303 { 2304 struct drm_i915_private *i915 = container_of(power_domains, 2305 struct drm_i915_private, 2306 power_domains); 2307 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2308 power_domains->async_put_wakeref = wakeref; 2309 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, 2310 &power_domains->async_put_work, 2311 msecs_to_jiffies(100))); 2312 } 2313 2314 static void 2315 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) 2316 { 2317 struct drm_i915_private *dev_priv = 2318 container_of(power_domains, struct drm_i915_private, 2319 power_domains); 2320 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2321 enum intel_display_power_domain domain; 2322 intel_wakeref_t wakeref; 2323 2324 /* 2325 * The caller must hold already raw wakeref, upgrade that to a proper 2326 * wakeref to make the state checker happy about the HW access during 2327 * power well disabling. 2328 */ 2329 assert_rpm_raw_wakeref_held(rpm); 2330 wakeref = intel_runtime_pm_get(rpm); 2331 2332 for_each_power_domain(domain, mask) { 2333 /* Clear before put, so put's sanity check is happy. */ 2334 async_put_domains_clear_domain(power_domains, domain); 2335 __intel_display_power_put_domain(dev_priv, domain); 2336 } 2337 2338 intel_runtime_pm_put(rpm, wakeref); 2339 } 2340 2341 static void 2342 intel_display_power_put_async_work(struct work_struct *work) 2343 { 2344 struct drm_i915_private *dev_priv = 2345 container_of(work, struct drm_i915_private, 2346 power_domains.async_put_work.work); 2347 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2348 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2349 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 2350 intel_wakeref_t old_work_wakeref = 0; 2351 2352 mutex_lock(&power_domains->lock); 2353 2354 /* 2355 * Bail out if all the domain refs pending to be released were grabbed 2356 * by subsequent gets or a flush_work. 2357 */ 2358 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2359 if (!old_work_wakeref) 2360 goto out_verify; 2361 2362 release_async_put_domains(power_domains, 2363 power_domains->async_put_domains[0]); 2364 2365 /* Requeue the work if more domains were async put meanwhile. */ 2366 if (power_domains->async_put_domains[1]) { 2367 power_domains->async_put_domains[0] = 2368 fetch_and_zero(&power_domains->async_put_domains[1]); 2369 queue_async_put_domains_work(power_domains, 2370 fetch_and_zero(&new_work_wakeref)); 2371 } else { 2372 /* 2373 * Cancel the work that got queued after this one got dequeued, 2374 * since here we released the corresponding async-put reference. 2375 */ 2376 cancel_delayed_work(&power_domains->async_put_work); 2377 } 2378 2379 out_verify: 2380 verify_async_put_domains_state(power_domains); 2381 2382 mutex_unlock(&power_domains->lock); 2383 2384 if (old_work_wakeref) 2385 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 2386 if (new_work_wakeref) 2387 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 2388 } 2389 2390 /** 2391 * intel_display_power_put_async - release a power domain reference asynchronously 2392 * @i915: i915 device instance 2393 * @domain: power domain to reference 2394 * @wakeref: wakeref acquired for the reference that is being released 2395 * 2396 * This function drops the power domain reference obtained by 2397 * intel_display_power_get*() and schedules a work to power down the 2398 * corresponding hardware block if this is the last reference. 2399 */ 2400 void __intel_display_power_put_async(struct drm_i915_private *i915, 2401 enum intel_display_power_domain domain, 2402 intel_wakeref_t wakeref) 2403 { 2404 struct i915_power_domains *power_domains = &i915->power_domains; 2405 struct intel_runtime_pm *rpm = &i915->runtime_pm; 2406 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 2407 2408 mutex_lock(&power_domains->lock); 2409 2410 if (power_domains->domain_use_count[domain] > 1) { 2411 __intel_display_power_put_domain(i915, domain); 2412 2413 goto out_verify; 2414 } 2415 2416 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); 2417 2418 /* Let a pending work requeue itself or queue a new one. */ 2419 if (power_domains->async_put_wakeref) { 2420 power_domains->async_put_domains[1] |= BIT_ULL(domain); 2421 } else { 2422 power_domains->async_put_domains[0] |= BIT_ULL(domain); 2423 queue_async_put_domains_work(power_domains, 2424 fetch_and_zero(&work_wakeref)); 2425 } 2426 2427 out_verify: 2428 verify_async_put_domains_state(power_domains); 2429 2430 mutex_unlock(&power_domains->lock); 2431 2432 if (work_wakeref) 2433 intel_runtime_pm_put_raw(rpm, work_wakeref); 2434 2435 intel_runtime_pm_put(rpm, wakeref); 2436 } 2437 2438 /** 2439 * intel_display_power_flush_work - flushes the async display power disabling work 2440 * @i915: i915 device instance 2441 * 2442 * Flushes any pending work that was scheduled by a preceding 2443 * intel_display_power_put_async() call, completing the disabling of the 2444 * corresponding power domains. 2445 * 2446 * Note that the work handler function may still be running after this 2447 * function returns; to ensure that the work handler isn't running use 2448 * intel_display_power_flush_work_sync() instead. 2449 */ 2450 void intel_display_power_flush_work(struct drm_i915_private *i915) 2451 { 2452 struct i915_power_domains *power_domains = &i915->power_domains; 2453 intel_wakeref_t work_wakeref; 2454 2455 mutex_lock(&power_domains->lock); 2456 2457 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2458 if (!work_wakeref) 2459 goto out_verify; 2460 2461 release_async_put_domains(power_domains, 2462 async_put_domains_mask(power_domains)); 2463 cancel_delayed_work(&power_domains->async_put_work); 2464 2465 out_verify: 2466 verify_async_put_domains_state(power_domains); 2467 2468 mutex_unlock(&power_domains->lock); 2469 2470 if (work_wakeref) 2471 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 2472 } 2473 2474 /** 2475 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 2476 * @i915: i915 device instance 2477 * 2478 * Like intel_display_power_flush_work(), but also ensure that the work 2479 * handler function is not running any more when this function returns. 2480 */ 2481 static void 2482 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 2483 { 2484 struct i915_power_domains *power_domains = &i915->power_domains; 2485 2486 intel_display_power_flush_work(i915); 2487 cancel_delayed_work_sync(&power_domains->async_put_work); 2488 2489 verify_async_put_domains_state(power_domains); 2490 2491 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2492 } 2493 2494 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2495 /** 2496 * intel_display_power_put - release a power domain reference 2497 * @dev_priv: i915 device instance 2498 * @domain: power domain to reference 2499 * @wakeref: wakeref acquired for the reference that is being released 2500 * 2501 * This function drops the power domain reference obtained by 2502 * intel_display_power_get() and might power down the corresponding hardware 2503 * block right away if this is the last reference. 2504 */ 2505 void intel_display_power_put(struct drm_i915_private *dev_priv, 2506 enum intel_display_power_domain domain, 2507 intel_wakeref_t wakeref) 2508 { 2509 __intel_display_power_put(dev_priv, domain); 2510 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2511 } 2512 #else 2513 /** 2514 * intel_display_power_put_unchecked - release an unchecked power domain reference 2515 * @dev_priv: i915 device instance 2516 * @domain: power domain to reference 2517 * 2518 * This function drops the power domain reference obtained by 2519 * intel_display_power_get() and might power down the corresponding hardware 2520 * block right away if this is the last reference. 2521 * 2522 * This function is only for the power domain code's internal use to suppress wakeref 2523 * tracking when the correspondig debug kconfig option is disabled, should not 2524 * be used otherwise. 2525 */ 2526 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 2527 enum intel_display_power_domain domain) 2528 { 2529 __intel_display_power_put(dev_priv, domain); 2530 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 2531 } 2532 #endif 2533 2534 void 2535 intel_display_power_get_in_set(struct drm_i915_private *i915, 2536 struct intel_display_power_domain_set *power_domain_set, 2537 enum intel_display_power_domain domain) 2538 { 2539 intel_wakeref_t __maybe_unused wf; 2540 2541 drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); 2542 2543 wf = intel_display_power_get(i915, domain); 2544 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2545 power_domain_set->wakerefs[domain] = wf; 2546 #endif 2547 power_domain_set->mask |= BIT_ULL(domain); 2548 } 2549 2550 bool 2551 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, 2552 struct intel_display_power_domain_set *power_domain_set, 2553 enum intel_display_power_domain domain) 2554 { 2555 intel_wakeref_t wf; 2556 2557 drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); 2558 2559 wf = intel_display_power_get_if_enabled(i915, domain); 2560 if (!wf) 2561 return false; 2562 2563 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2564 power_domain_set->wakerefs[domain] = wf; 2565 #endif 2566 power_domain_set->mask |= BIT_ULL(domain); 2567 2568 return true; 2569 } 2570 2571 void 2572 intel_display_power_put_mask_in_set(struct drm_i915_private *i915, 2573 struct intel_display_power_domain_set *power_domain_set, 2574 u64 mask) 2575 { 2576 enum intel_display_power_domain domain; 2577 2578 drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask); 2579 2580 for_each_power_domain(domain, mask) { 2581 intel_wakeref_t __maybe_unused wf = -1; 2582 2583 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2584 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); 2585 #endif 2586 intel_display_power_put(i915, domain, wf); 2587 power_domain_set->mask &= ~BIT_ULL(domain); 2588 } 2589 } 2590 2591 #define I830_PIPES_POWER_DOMAINS ( \ 2592 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2593 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2594 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2595 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2596 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2597 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2598 BIT_ULL(POWER_DOMAIN_INIT)) 2599 2600 #define VLV_DISPLAY_POWER_DOMAINS ( \ 2601 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2602 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2603 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2604 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2605 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2606 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2607 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2608 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2609 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2610 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2611 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2612 BIT_ULL(POWER_DOMAIN_VGA) | \ 2613 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2614 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2615 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2616 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2617 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2618 BIT_ULL(POWER_DOMAIN_INIT)) 2619 2620 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2621 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2622 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2623 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2624 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2625 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2626 BIT_ULL(POWER_DOMAIN_INIT)) 2627 2628 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 2629 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2630 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2631 BIT_ULL(POWER_DOMAIN_INIT)) 2632 2633 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 2634 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2635 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2636 BIT_ULL(POWER_DOMAIN_INIT)) 2637 2638 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 2639 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2640 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2641 BIT_ULL(POWER_DOMAIN_INIT)) 2642 2643 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 2644 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2645 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2646 BIT_ULL(POWER_DOMAIN_INIT)) 2647 2648 #define CHV_DISPLAY_POWER_DOMAINS ( \ 2649 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2650 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2651 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2652 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2653 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2654 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2655 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2656 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2657 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2658 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2659 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2660 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2661 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2662 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2663 BIT_ULL(POWER_DOMAIN_VGA) | \ 2664 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2665 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2666 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2667 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2668 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2669 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2670 BIT_ULL(POWER_DOMAIN_INIT)) 2671 2672 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2673 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2674 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2675 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2676 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2677 BIT_ULL(POWER_DOMAIN_INIT)) 2678 2679 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 2680 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2681 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2682 BIT_ULL(POWER_DOMAIN_INIT)) 2683 2684 #define HSW_DISPLAY_POWER_DOMAINS ( \ 2685 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2686 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2687 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2688 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2689 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2690 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2691 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2692 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2693 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2694 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2695 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2696 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2697 BIT_ULL(POWER_DOMAIN_VGA) | \ 2698 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2699 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2700 BIT_ULL(POWER_DOMAIN_INIT)) 2701 2702 #define BDW_DISPLAY_POWER_DOMAINS ( \ 2703 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2704 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2705 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2706 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2707 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2708 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2709 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2710 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2711 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2712 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2713 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2714 BIT_ULL(POWER_DOMAIN_VGA) | \ 2715 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2716 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2717 BIT_ULL(POWER_DOMAIN_INIT)) 2718 2719 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2720 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2721 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2722 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2723 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2724 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2725 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2726 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2727 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2728 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2729 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2730 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2731 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2732 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2733 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2734 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2735 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2736 BIT_ULL(POWER_DOMAIN_VGA) | \ 2737 BIT_ULL(POWER_DOMAIN_INIT)) 2738 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ 2739 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2740 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ 2741 BIT_ULL(POWER_DOMAIN_INIT)) 2742 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2743 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2744 BIT_ULL(POWER_DOMAIN_INIT)) 2745 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2746 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2747 BIT_ULL(POWER_DOMAIN_INIT)) 2748 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ 2749 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2750 BIT_ULL(POWER_DOMAIN_INIT)) 2751 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2752 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2753 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2754 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2755 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2756 BIT_ULL(POWER_DOMAIN_INIT)) 2757 2758 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2759 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2760 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2761 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2762 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2763 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2764 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2765 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2766 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2767 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2768 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2769 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2770 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2771 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2772 BIT_ULL(POWER_DOMAIN_VGA) | \ 2773 BIT_ULL(POWER_DOMAIN_INIT)) 2774 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2775 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2776 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2777 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2778 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2779 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2780 BIT_ULL(POWER_DOMAIN_INIT)) 2781 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 2782 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2783 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2784 BIT_ULL(POWER_DOMAIN_INIT)) 2785 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 2786 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2787 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2788 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2789 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2790 BIT_ULL(POWER_DOMAIN_INIT)) 2791 2792 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2793 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2794 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2795 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2796 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2797 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2798 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2799 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2800 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2801 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2802 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2803 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2804 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2805 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2806 BIT_ULL(POWER_DOMAIN_VGA) | \ 2807 BIT_ULL(POWER_DOMAIN_INIT)) 2808 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ 2809 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2810 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2811 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2812 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2813 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2814 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ 2815 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2816 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2817 BIT_ULL(POWER_DOMAIN_INIT)) 2818 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ 2819 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2820 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2821 BIT_ULL(POWER_DOMAIN_INIT)) 2822 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ 2823 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2824 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2825 BIT_ULL(POWER_DOMAIN_INIT)) 2826 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2827 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2828 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2829 BIT_ULL(POWER_DOMAIN_INIT)) 2830 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2831 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2832 BIT_ULL(POWER_DOMAIN_INIT)) 2833 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2834 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2835 BIT_ULL(POWER_DOMAIN_INIT)) 2836 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2837 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2838 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2839 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2840 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2841 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2842 BIT_ULL(POWER_DOMAIN_INIT)) 2843 2844 /* 2845 * ICL PW_0/PG_0 domains (HW/DMC control): 2846 * - PCI 2847 * - clocks except port PLL 2848 * - central power except FBC 2849 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers 2850 * ICL PW_1/PG_1 domains (HW/DMC control): 2851 * - DBUF function 2852 * - PIPE_A and its planes, except VGA 2853 * - transcoder EDP + PSR 2854 * - transcoder DSI 2855 * - DDI_A 2856 * - FBC 2857 */ 2858 #define ICL_PW_4_POWER_DOMAINS ( \ 2859 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2860 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2861 BIT_ULL(POWER_DOMAIN_INIT)) 2862 /* VDSC/joining */ 2863 #define ICL_PW_3_POWER_DOMAINS ( \ 2864 ICL_PW_4_POWER_DOMAINS | \ 2865 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2866 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2867 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2868 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2869 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2870 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2871 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2872 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2873 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2874 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2875 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2876 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2877 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2878 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2879 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2880 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ 2881 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2882 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2883 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2884 BIT_ULL(POWER_DOMAIN_VGA) | \ 2885 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2886 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2887 BIT_ULL(POWER_DOMAIN_INIT)) 2888 /* 2889 * - transcoder WD 2890 * - KVMR (HW control) 2891 */ 2892 #define ICL_PW_2_POWER_DOMAINS ( \ 2893 ICL_PW_3_POWER_DOMAINS | \ 2894 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2895 BIT_ULL(POWER_DOMAIN_INIT)) 2896 /* 2897 * - KVMR (HW control) 2898 */ 2899 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2900 ICL_PW_2_POWER_DOMAINS | \ 2901 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2902 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2903 BIT_ULL(POWER_DOMAIN_DC_OFF) | \ 2904 BIT_ULL(POWER_DOMAIN_INIT)) 2905 2906 #define ICL_DDI_IO_A_POWER_DOMAINS ( \ 2907 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2908 #define ICL_DDI_IO_B_POWER_DOMAINS ( \ 2909 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2910 #define ICL_DDI_IO_C_POWER_DOMAINS ( \ 2911 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2912 #define ICL_DDI_IO_D_POWER_DOMAINS ( \ 2913 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2914 #define ICL_DDI_IO_E_POWER_DOMAINS ( \ 2915 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2916 #define ICL_DDI_IO_F_POWER_DOMAINS ( \ 2917 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2918 2919 #define ICL_AUX_A_IO_POWER_DOMAINS ( \ 2920 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2921 BIT_ULL(POWER_DOMAIN_AUX_A)) 2922 #define ICL_AUX_B_IO_POWER_DOMAINS ( \ 2923 BIT_ULL(POWER_DOMAIN_AUX_B)) 2924 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ 2925 BIT_ULL(POWER_DOMAIN_AUX_C)) 2926 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ 2927 BIT_ULL(POWER_DOMAIN_AUX_D)) 2928 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ 2929 BIT_ULL(POWER_DOMAIN_AUX_E)) 2930 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ 2931 BIT_ULL(POWER_DOMAIN_AUX_F)) 2932 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ 2933 BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) 2934 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ 2935 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2936 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ 2937 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2938 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ 2939 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2940 2941 #define TGL_PW_5_POWER_DOMAINS ( \ 2942 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 2943 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 2944 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 2945 BIT_ULL(POWER_DOMAIN_INIT)) 2946 2947 #define TGL_PW_4_POWER_DOMAINS ( \ 2948 TGL_PW_5_POWER_DOMAINS | \ 2949 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2950 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2951 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2952 BIT_ULL(POWER_DOMAIN_INIT)) 2953 2954 #define TGL_PW_3_POWER_DOMAINS ( \ 2955 TGL_PW_4_POWER_DOMAINS | \ 2956 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2957 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2958 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2959 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 2960 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 2961 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ 2962 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ 2963 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \ 2964 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \ 2965 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 2966 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 2967 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 2968 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 2969 BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ 2970 BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ 2971 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 2972 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 2973 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 2974 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 2975 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ 2976 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ 2977 BIT_ULL(POWER_DOMAIN_VGA) | \ 2978 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2979 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2980 BIT_ULL(POWER_DOMAIN_INIT)) 2981 2982 #define TGL_PW_2_POWER_DOMAINS ( \ 2983 TGL_PW_3_POWER_DOMAINS | \ 2984 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2985 BIT_ULL(POWER_DOMAIN_INIT)) 2986 2987 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2988 TGL_PW_3_POWER_DOMAINS | \ 2989 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2990 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2991 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2992 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2993 BIT_ULL(POWER_DOMAIN_INIT)) 2994 2995 #define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) 2996 #define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) 2997 #define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) 2998 #define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) 2999 #define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5) 3000 #define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6) 3001 3002 #define TGL_AUX_A_IO_POWER_DOMAINS ( \ 3003 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 3004 BIT_ULL(POWER_DOMAIN_AUX_A)) 3005 #define TGL_AUX_B_IO_POWER_DOMAINS ( \ 3006 BIT_ULL(POWER_DOMAIN_AUX_B)) 3007 #define TGL_AUX_C_IO_POWER_DOMAINS ( \ 3008 BIT_ULL(POWER_DOMAIN_AUX_C)) 3009 3010 #define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) 3011 #define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) 3012 #define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) 3013 #define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) 3014 #define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5) 3015 #define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6) 3016 3017 #define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) 3018 #define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) 3019 #define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) 3020 #define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) 3021 #define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5) 3022 #define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6) 3023 3024 #define TGL_TC_COLD_OFF_POWER_DOMAINS ( \ 3025 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3026 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3027 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 3028 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 3029 BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ 3030 BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ 3031 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 3032 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 3033 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 3034 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 3035 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ 3036 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ 3037 BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) 3038 3039 #define RKL_PW_4_POWER_DOMAINS ( \ 3040 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 3041 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 3042 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 3043 BIT_ULL(POWER_DOMAIN_INIT)) 3044 3045 #define RKL_PW_3_POWER_DOMAINS ( \ 3046 RKL_PW_4_POWER_DOMAINS | \ 3047 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 3048 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 3049 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 3050 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 3051 BIT_ULL(POWER_DOMAIN_VGA) | \ 3052 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 3053 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 3054 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 3055 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3056 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3057 BIT_ULL(POWER_DOMAIN_INIT)) 3058 3059 /* 3060 * There is no PW_2/PG_2 on RKL. 3061 * 3062 * RKL PW_1/PG_1 domains (under HW/DMC control): 3063 * - DBUF function (note: registers are in PW0) 3064 * - PIPE_A and its planes and VDSC/joining, except VGA 3065 * - transcoder A 3066 * - DDI_A and DDI_B 3067 * - FBC 3068 * 3069 * RKL PW_0/PG_0 domains (under HW/DMC control): 3070 * - PCI 3071 * - clocks except port PLL 3072 * - shared functions: 3073 * * interrupts except pipe interrupts 3074 * * MBus except PIPE_MBUS_DBOX_CTL 3075 * * DBUF registers 3076 * - central power except FBC 3077 * - top-level GTC (DDI-level GTC is in the well associated with the DDI) 3078 */ 3079 3080 #define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 3081 RKL_PW_3_POWER_DOMAINS | \ 3082 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3083 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3084 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3085 BIT_ULL(POWER_DOMAIN_INIT)) 3086 3087 /* 3088 * DG1 onwards Audio MMIO/VERBS lies in PG0 power well. 3089 */ 3090 #define DG1_PW_3_POWER_DOMAINS ( \ 3091 TGL_PW_4_POWER_DOMAINS | \ 3092 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 3093 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 3094 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 3095 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 3096 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 3097 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3098 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3099 BIT_ULL(POWER_DOMAIN_VGA) | \ 3100 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 3101 BIT_ULL(POWER_DOMAIN_INIT)) 3102 3103 #define DG1_PW_2_POWER_DOMAINS ( \ 3104 DG1_PW_3_POWER_DOMAINS | \ 3105 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 3106 BIT_ULL(POWER_DOMAIN_INIT)) 3107 3108 #define DG1_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 3109 DG1_PW_3_POWER_DOMAINS | \ 3110 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 3111 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3112 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3113 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3114 BIT_ULL(POWER_DOMAIN_INIT)) 3115 3116 /* 3117 * XE_LPD Power Domains 3118 * 3119 * Previous platforms required that PG(n-1) be enabled before PG(n). That 3120 * dependency chain turns into a dependency tree on XE_LPD: 3121 * 3122 * PG0 3123 * | 3124 * --PG1-- 3125 * / \ 3126 * PGA --PG2-- 3127 * / | \ 3128 * PGB PGC PGD 3129 * 3130 * Power wells must be enabled from top to bottom and disabled from bottom 3131 * to top. This allows pipes to be power gated independently. 3132 */ 3133 3134 #define XELPD_PW_D_POWER_DOMAINS ( \ 3135 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 3136 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 3137 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 3138 BIT_ULL(POWER_DOMAIN_INIT)) 3139 3140 #define XELPD_PW_C_POWER_DOMAINS ( \ 3141 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 3142 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 3143 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 3144 BIT_ULL(POWER_DOMAIN_INIT)) 3145 3146 #define XELPD_PW_B_POWER_DOMAINS ( \ 3147 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 3148 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 3149 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 3150 BIT_ULL(POWER_DOMAIN_INIT)) 3151 3152 #define XELPD_PW_A_POWER_DOMAINS ( \ 3153 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 3154 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 3155 BIT_ULL(POWER_DOMAIN_INIT)) 3156 3157 #define XELPD_PW_2_POWER_DOMAINS ( \ 3158 XELPD_PW_B_POWER_DOMAINS | \ 3159 XELPD_PW_C_POWER_DOMAINS | \ 3160 XELPD_PW_D_POWER_DOMAINS | \ 3161 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 3162 BIT_ULL(POWER_DOMAIN_VGA) | \ 3163 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 3164 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) | \ 3165 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) | \ 3166 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 3167 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 3168 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ 3169 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ 3170 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 3171 BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) | \ 3172 BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) | \ 3173 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3174 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3175 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 3176 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 3177 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 3178 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 3179 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 3180 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 3181 BIT_ULL(POWER_DOMAIN_INIT)) 3182 3183 /* 3184 * XELPD PW_1/PG_1 domains (under HW/DMC control): 3185 * - DBUF function (registers are in PW0) 3186 * - Transcoder A 3187 * - DDI_A and DDI_B 3188 * 3189 * XELPD PW_0/PW_1 domains (under HW/DMC control): 3190 * - PCI 3191 * - Clocks except port PLL 3192 * - Shared functions: 3193 * * interrupts except pipe interrupts 3194 * * MBus except PIPE_MBUS_DBOX_CTL 3195 * * DBUF registers 3196 * - Central power except FBC 3197 * - Top-level GTC (DDI-level GTC is in the well associated with the DDI) 3198 */ 3199 3200 #define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 3201 XELPD_PW_2_POWER_DOMAINS | \ 3202 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 3203 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3204 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3205 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3206 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 3207 BIT_ULL(POWER_DOMAIN_INIT)) 3208 3209 #define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) 3210 #define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) 3211 #define XELPD_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) 3212 #define XELPD_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) 3213 #define XELPD_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) 3214 #define XELPD_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) 3215 3216 #define XELPD_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) 3217 #define XELPD_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) 3218 #define XELPD_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) 3219 #define XELPD_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) 3220 3221 #define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD) 3222 #define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD) 3223 #define XELPD_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) 3224 #define XELPD_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) 3225 #define XELPD_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) 3226 #define XELPD_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) 3227 3228 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 3229 .sync_hw = i9xx_power_well_sync_hw_noop, 3230 .enable = i9xx_always_on_power_well_noop, 3231 .disable = i9xx_always_on_power_well_noop, 3232 .is_enabled = i9xx_always_on_power_well_enabled, 3233 }; 3234 3235 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 3236 .sync_hw = chv_pipe_power_well_sync_hw, 3237 .enable = chv_pipe_power_well_enable, 3238 .disable = chv_pipe_power_well_disable, 3239 .is_enabled = chv_pipe_power_well_enabled, 3240 }; 3241 3242 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 3243 .sync_hw = i9xx_power_well_sync_hw_noop, 3244 .enable = chv_dpio_cmn_power_well_enable, 3245 .disable = chv_dpio_cmn_power_well_disable, 3246 .is_enabled = vlv_power_well_enabled, 3247 }; 3248 3249 static const struct i915_power_well_desc i9xx_always_on_power_well[] = { 3250 { 3251 .name = "always-on", 3252 .always_on = true, 3253 .domains = POWER_DOMAIN_MASK, 3254 .ops = &i9xx_always_on_power_well_ops, 3255 .id = DISP_PW_ID_NONE, 3256 }, 3257 }; 3258 3259 static const struct i915_power_well_ops i830_pipes_power_well_ops = { 3260 .sync_hw = i830_pipes_power_well_sync_hw, 3261 .enable = i830_pipes_power_well_enable, 3262 .disable = i830_pipes_power_well_disable, 3263 .is_enabled = i830_pipes_power_well_enabled, 3264 }; 3265 3266 static const struct i915_power_well_desc i830_power_wells[] = { 3267 { 3268 .name = "always-on", 3269 .always_on = true, 3270 .domains = POWER_DOMAIN_MASK, 3271 .ops = &i9xx_always_on_power_well_ops, 3272 .id = DISP_PW_ID_NONE, 3273 }, 3274 { 3275 .name = "pipes", 3276 .domains = I830_PIPES_POWER_DOMAINS, 3277 .ops = &i830_pipes_power_well_ops, 3278 .id = DISP_PW_ID_NONE, 3279 }, 3280 }; 3281 3282 static const struct i915_power_well_ops hsw_power_well_ops = { 3283 .sync_hw = hsw_power_well_sync_hw, 3284 .enable = hsw_power_well_enable, 3285 .disable = hsw_power_well_disable, 3286 .is_enabled = hsw_power_well_enabled, 3287 }; 3288 3289 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 3290 .sync_hw = i9xx_power_well_sync_hw_noop, 3291 .enable = gen9_dc_off_power_well_enable, 3292 .disable = gen9_dc_off_power_well_disable, 3293 .is_enabled = gen9_dc_off_power_well_enabled, 3294 }; 3295 3296 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 3297 .sync_hw = i9xx_power_well_sync_hw_noop, 3298 .enable = bxt_dpio_cmn_power_well_enable, 3299 .disable = bxt_dpio_cmn_power_well_disable, 3300 .is_enabled = bxt_dpio_cmn_power_well_enabled, 3301 }; 3302 3303 static const struct i915_power_well_regs hsw_power_well_regs = { 3304 .bios = HSW_PWR_WELL_CTL1, 3305 .driver = HSW_PWR_WELL_CTL2, 3306 .kvmr = HSW_PWR_WELL_CTL3, 3307 .debug = HSW_PWR_WELL_CTL4, 3308 }; 3309 3310 static const struct i915_power_well_desc hsw_power_wells[] = { 3311 { 3312 .name = "always-on", 3313 .always_on = true, 3314 .domains = POWER_DOMAIN_MASK, 3315 .ops = &i9xx_always_on_power_well_ops, 3316 .id = DISP_PW_ID_NONE, 3317 }, 3318 { 3319 .name = "display", 3320 .domains = HSW_DISPLAY_POWER_DOMAINS, 3321 .ops = &hsw_power_well_ops, 3322 .id = HSW_DISP_PW_GLOBAL, 3323 { 3324 .hsw.regs = &hsw_power_well_regs, 3325 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3326 .hsw.has_vga = true, 3327 }, 3328 }, 3329 }; 3330 3331 static const struct i915_power_well_desc bdw_power_wells[] = { 3332 { 3333 .name = "always-on", 3334 .always_on = true, 3335 .domains = POWER_DOMAIN_MASK, 3336 .ops = &i9xx_always_on_power_well_ops, 3337 .id = DISP_PW_ID_NONE, 3338 }, 3339 { 3340 .name = "display", 3341 .domains = BDW_DISPLAY_POWER_DOMAINS, 3342 .ops = &hsw_power_well_ops, 3343 .id = HSW_DISP_PW_GLOBAL, 3344 { 3345 .hsw.regs = &hsw_power_well_regs, 3346 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3347 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3348 .hsw.has_vga = true, 3349 }, 3350 }, 3351 }; 3352 3353 static const struct i915_power_well_ops vlv_display_power_well_ops = { 3354 .sync_hw = i9xx_power_well_sync_hw_noop, 3355 .enable = vlv_display_power_well_enable, 3356 .disable = vlv_display_power_well_disable, 3357 .is_enabled = vlv_power_well_enabled, 3358 }; 3359 3360 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 3361 .sync_hw = i9xx_power_well_sync_hw_noop, 3362 .enable = vlv_dpio_cmn_power_well_enable, 3363 .disable = vlv_dpio_cmn_power_well_disable, 3364 .is_enabled = vlv_power_well_enabled, 3365 }; 3366 3367 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 3368 .sync_hw = i9xx_power_well_sync_hw_noop, 3369 .enable = vlv_power_well_enable, 3370 .disable = vlv_power_well_disable, 3371 .is_enabled = vlv_power_well_enabled, 3372 }; 3373 3374 static const struct i915_power_well_desc vlv_power_wells[] = { 3375 { 3376 .name = "always-on", 3377 .always_on = true, 3378 .domains = POWER_DOMAIN_MASK, 3379 .ops = &i9xx_always_on_power_well_ops, 3380 .id = DISP_PW_ID_NONE, 3381 }, 3382 { 3383 .name = "display", 3384 .domains = VLV_DISPLAY_POWER_DOMAINS, 3385 .ops = &vlv_display_power_well_ops, 3386 .id = VLV_DISP_PW_DISP2D, 3387 { 3388 .vlv.idx = PUNIT_PWGT_IDX_DISP2D, 3389 }, 3390 }, 3391 { 3392 .name = "dpio-tx-b-01", 3393 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3394 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3395 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3396 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3397 .ops = &vlv_dpio_power_well_ops, 3398 .id = DISP_PW_ID_NONE, 3399 { 3400 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, 3401 }, 3402 }, 3403 { 3404 .name = "dpio-tx-b-23", 3405 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3406 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3407 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3408 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3409 .ops = &vlv_dpio_power_well_ops, 3410 .id = DISP_PW_ID_NONE, 3411 { 3412 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, 3413 }, 3414 }, 3415 { 3416 .name = "dpio-tx-c-01", 3417 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3418 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3419 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3420 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3421 .ops = &vlv_dpio_power_well_ops, 3422 .id = DISP_PW_ID_NONE, 3423 { 3424 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, 3425 }, 3426 }, 3427 { 3428 .name = "dpio-tx-c-23", 3429 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3430 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3431 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3432 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3433 .ops = &vlv_dpio_power_well_ops, 3434 .id = DISP_PW_ID_NONE, 3435 { 3436 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, 3437 }, 3438 }, 3439 { 3440 .name = "dpio-common", 3441 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 3442 .ops = &vlv_dpio_cmn_power_well_ops, 3443 .id = VLV_DISP_PW_DPIO_CMN_BC, 3444 { 3445 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3446 }, 3447 }, 3448 }; 3449 3450 static const struct i915_power_well_desc chv_power_wells[] = { 3451 { 3452 .name = "always-on", 3453 .always_on = true, 3454 .domains = POWER_DOMAIN_MASK, 3455 .ops = &i9xx_always_on_power_well_ops, 3456 .id = DISP_PW_ID_NONE, 3457 }, 3458 { 3459 .name = "display", 3460 /* 3461 * Pipe A power well is the new disp2d well. Pipe B and C 3462 * power wells don't actually exist. Pipe A power well is 3463 * required for any pipe to work. 3464 */ 3465 .domains = CHV_DISPLAY_POWER_DOMAINS, 3466 .ops = &chv_pipe_power_well_ops, 3467 .id = DISP_PW_ID_NONE, 3468 }, 3469 { 3470 .name = "dpio-common-bc", 3471 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 3472 .ops = &chv_dpio_cmn_power_well_ops, 3473 .id = VLV_DISP_PW_DPIO_CMN_BC, 3474 { 3475 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3476 }, 3477 }, 3478 { 3479 .name = "dpio-common-d", 3480 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 3481 .ops = &chv_dpio_cmn_power_well_ops, 3482 .id = CHV_DISP_PW_DPIO_CMN_D, 3483 { 3484 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, 3485 }, 3486 }, 3487 }; 3488 3489 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 3490 enum i915_power_well_id power_well_id) 3491 { 3492 struct i915_power_well *power_well; 3493 bool ret; 3494 3495 power_well = lookup_power_well(dev_priv, power_well_id); 3496 ret = power_well->desc->ops->is_enabled(dev_priv, power_well); 3497 3498 return ret; 3499 } 3500 3501 static const struct i915_power_well_desc skl_power_wells[] = { 3502 { 3503 .name = "always-on", 3504 .always_on = true, 3505 .domains = POWER_DOMAIN_MASK, 3506 .ops = &i9xx_always_on_power_well_ops, 3507 .id = DISP_PW_ID_NONE, 3508 }, 3509 { 3510 .name = "power well 1", 3511 /* Handled by the DMC firmware */ 3512 .always_on = true, 3513 .domains = 0, 3514 .ops = &hsw_power_well_ops, 3515 .id = SKL_DISP_PW_1, 3516 { 3517 .hsw.regs = &hsw_power_well_regs, 3518 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3519 .hsw.has_fuses = true, 3520 }, 3521 }, 3522 { 3523 .name = "MISC IO power well", 3524 /* Handled by the DMC firmware */ 3525 .always_on = true, 3526 .domains = 0, 3527 .ops = &hsw_power_well_ops, 3528 .id = SKL_DISP_PW_MISC_IO, 3529 { 3530 .hsw.regs = &hsw_power_well_regs, 3531 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, 3532 }, 3533 }, 3534 { 3535 .name = "DC off", 3536 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 3537 .ops = &gen9_dc_off_power_well_ops, 3538 .id = SKL_DISP_DC_OFF, 3539 }, 3540 { 3541 .name = "power well 2", 3542 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3543 .ops = &hsw_power_well_ops, 3544 .id = SKL_DISP_PW_2, 3545 { 3546 .hsw.regs = &hsw_power_well_regs, 3547 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3548 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3549 .hsw.has_vga = true, 3550 .hsw.has_fuses = true, 3551 }, 3552 }, 3553 { 3554 .name = "DDI A/E IO power well", 3555 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, 3556 .ops = &hsw_power_well_ops, 3557 .id = DISP_PW_ID_NONE, 3558 { 3559 .hsw.regs = &hsw_power_well_regs, 3560 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, 3561 }, 3562 }, 3563 { 3564 .name = "DDI B IO power well", 3565 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3566 .ops = &hsw_power_well_ops, 3567 .id = DISP_PW_ID_NONE, 3568 { 3569 .hsw.regs = &hsw_power_well_regs, 3570 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3571 }, 3572 }, 3573 { 3574 .name = "DDI C IO power well", 3575 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3576 .ops = &hsw_power_well_ops, 3577 .id = DISP_PW_ID_NONE, 3578 { 3579 .hsw.regs = &hsw_power_well_regs, 3580 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3581 }, 3582 }, 3583 { 3584 .name = "DDI D IO power well", 3585 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, 3586 .ops = &hsw_power_well_ops, 3587 .id = DISP_PW_ID_NONE, 3588 { 3589 .hsw.regs = &hsw_power_well_regs, 3590 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3591 }, 3592 }, 3593 }; 3594 3595 static const struct i915_power_well_desc bxt_power_wells[] = { 3596 { 3597 .name = "always-on", 3598 .always_on = true, 3599 .domains = POWER_DOMAIN_MASK, 3600 .ops = &i9xx_always_on_power_well_ops, 3601 .id = DISP_PW_ID_NONE, 3602 }, 3603 { 3604 .name = "power well 1", 3605 /* Handled by the DMC firmware */ 3606 .always_on = true, 3607 .domains = 0, 3608 .ops = &hsw_power_well_ops, 3609 .id = SKL_DISP_PW_1, 3610 { 3611 .hsw.regs = &hsw_power_well_regs, 3612 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3613 .hsw.has_fuses = true, 3614 }, 3615 }, 3616 { 3617 .name = "DC off", 3618 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 3619 .ops = &gen9_dc_off_power_well_ops, 3620 .id = SKL_DISP_DC_OFF, 3621 }, 3622 { 3623 .name = "power well 2", 3624 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3625 .ops = &hsw_power_well_ops, 3626 .id = SKL_DISP_PW_2, 3627 { 3628 .hsw.regs = &hsw_power_well_regs, 3629 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3630 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3631 .hsw.has_vga = true, 3632 .hsw.has_fuses = true, 3633 }, 3634 }, 3635 { 3636 .name = "dpio-common-a", 3637 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 3638 .ops = &bxt_dpio_cmn_power_well_ops, 3639 .id = BXT_DISP_PW_DPIO_CMN_A, 3640 { 3641 .bxt.phy = DPIO_PHY1, 3642 }, 3643 }, 3644 { 3645 .name = "dpio-common-bc", 3646 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 3647 .ops = &bxt_dpio_cmn_power_well_ops, 3648 .id = VLV_DISP_PW_DPIO_CMN_BC, 3649 { 3650 .bxt.phy = DPIO_PHY0, 3651 }, 3652 }, 3653 }; 3654 3655 static const struct i915_power_well_desc glk_power_wells[] = { 3656 { 3657 .name = "always-on", 3658 .always_on = true, 3659 .domains = POWER_DOMAIN_MASK, 3660 .ops = &i9xx_always_on_power_well_ops, 3661 .id = DISP_PW_ID_NONE, 3662 }, 3663 { 3664 .name = "power well 1", 3665 /* Handled by the DMC firmware */ 3666 .always_on = true, 3667 .domains = 0, 3668 .ops = &hsw_power_well_ops, 3669 .id = SKL_DISP_PW_1, 3670 { 3671 .hsw.regs = &hsw_power_well_regs, 3672 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3673 .hsw.has_fuses = true, 3674 }, 3675 }, 3676 { 3677 .name = "DC off", 3678 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, 3679 .ops = &gen9_dc_off_power_well_ops, 3680 .id = SKL_DISP_DC_OFF, 3681 }, 3682 { 3683 .name = "power well 2", 3684 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3685 .ops = &hsw_power_well_ops, 3686 .id = SKL_DISP_PW_2, 3687 { 3688 .hsw.regs = &hsw_power_well_regs, 3689 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3690 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3691 .hsw.has_vga = true, 3692 .hsw.has_fuses = true, 3693 }, 3694 }, 3695 { 3696 .name = "dpio-common-a", 3697 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, 3698 .ops = &bxt_dpio_cmn_power_well_ops, 3699 .id = BXT_DISP_PW_DPIO_CMN_A, 3700 { 3701 .bxt.phy = DPIO_PHY1, 3702 }, 3703 }, 3704 { 3705 .name = "dpio-common-b", 3706 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, 3707 .ops = &bxt_dpio_cmn_power_well_ops, 3708 .id = VLV_DISP_PW_DPIO_CMN_BC, 3709 { 3710 .bxt.phy = DPIO_PHY0, 3711 }, 3712 }, 3713 { 3714 .name = "dpio-common-c", 3715 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, 3716 .ops = &bxt_dpio_cmn_power_well_ops, 3717 .id = GLK_DISP_PW_DPIO_CMN_C, 3718 { 3719 .bxt.phy = DPIO_PHY2, 3720 }, 3721 }, 3722 { 3723 .name = "AUX A", 3724 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, 3725 .ops = &hsw_power_well_ops, 3726 .id = DISP_PW_ID_NONE, 3727 { 3728 .hsw.regs = &hsw_power_well_regs, 3729 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3730 }, 3731 }, 3732 { 3733 .name = "AUX B", 3734 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, 3735 .ops = &hsw_power_well_ops, 3736 .id = DISP_PW_ID_NONE, 3737 { 3738 .hsw.regs = &hsw_power_well_regs, 3739 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3740 }, 3741 }, 3742 { 3743 .name = "AUX C", 3744 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, 3745 .ops = &hsw_power_well_ops, 3746 .id = DISP_PW_ID_NONE, 3747 { 3748 .hsw.regs = &hsw_power_well_regs, 3749 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3750 }, 3751 }, 3752 { 3753 .name = "DDI A IO power well", 3754 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, 3755 .ops = &hsw_power_well_ops, 3756 .id = DISP_PW_ID_NONE, 3757 { 3758 .hsw.regs = &hsw_power_well_regs, 3759 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3760 }, 3761 }, 3762 { 3763 .name = "DDI B IO power well", 3764 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3765 .ops = &hsw_power_well_ops, 3766 .id = DISP_PW_ID_NONE, 3767 { 3768 .hsw.regs = &hsw_power_well_regs, 3769 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3770 }, 3771 }, 3772 { 3773 .name = "DDI C IO power well", 3774 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3775 .ops = &hsw_power_well_ops, 3776 .id = DISP_PW_ID_NONE, 3777 { 3778 .hsw.regs = &hsw_power_well_regs, 3779 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3780 }, 3781 }, 3782 }; 3783 3784 static const struct i915_power_well_ops icl_aux_power_well_ops = { 3785 .sync_hw = hsw_power_well_sync_hw, 3786 .enable = icl_aux_power_well_enable, 3787 .disable = icl_aux_power_well_disable, 3788 .is_enabled = hsw_power_well_enabled, 3789 }; 3790 3791 static const struct i915_power_well_regs icl_aux_power_well_regs = { 3792 .bios = ICL_PWR_WELL_CTL_AUX1, 3793 .driver = ICL_PWR_WELL_CTL_AUX2, 3794 .debug = ICL_PWR_WELL_CTL_AUX4, 3795 }; 3796 3797 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 3798 .bios = ICL_PWR_WELL_CTL_DDI1, 3799 .driver = ICL_PWR_WELL_CTL_DDI2, 3800 .debug = ICL_PWR_WELL_CTL_DDI4, 3801 }; 3802 3803 static const struct i915_power_well_desc icl_power_wells[] = { 3804 { 3805 .name = "always-on", 3806 .always_on = true, 3807 .domains = POWER_DOMAIN_MASK, 3808 .ops = &i9xx_always_on_power_well_ops, 3809 .id = DISP_PW_ID_NONE, 3810 }, 3811 { 3812 .name = "power well 1", 3813 /* Handled by the DMC firmware */ 3814 .always_on = true, 3815 .domains = 0, 3816 .ops = &hsw_power_well_ops, 3817 .id = SKL_DISP_PW_1, 3818 { 3819 .hsw.regs = &hsw_power_well_regs, 3820 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3821 .hsw.has_fuses = true, 3822 }, 3823 }, 3824 { 3825 .name = "DC off", 3826 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, 3827 .ops = &gen9_dc_off_power_well_ops, 3828 .id = SKL_DISP_DC_OFF, 3829 }, 3830 { 3831 .name = "power well 2", 3832 .domains = ICL_PW_2_POWER_DOMAINS, 3833 .ops = &hsw_power_well_ops, 3834 .id = SKL_DISP_PW_2, 3835 { 3836 .hsw.regs = &hsw_power_well_regs, 3837 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3838 .hsw.has_fuses = true, 3839 }, 3840 }, 3841 { 3842 .name = "power well 3", 3843 .domains = ICL_PW_3_POWER_DOMAINS, 3844 .ops = &hsw_power_well_ops, 3845 .id = ICL_DISP_PW_3, 3846 { 3847 .hsw.regs = &hsw_power_well_regs, 3848 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3849 .hsw.irq_pipe_mask = BIT(PIPE_B), 3850 .hsw.has_vga = true, 3851 .hsw.has_fuses = true, 3852 }, 3853 }, 3854 { 3855 .name = "DDI A IO", 3856 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3857 .ops = &hsw_power_well_ops, 3858 .id = DISP_PW_ID_NONE, 3859 { 3860 .hsw.regs = &icl_ddi_power_well_regs, 3861 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3862 }, 3863 }, 3864 { 3865 .name = "DDI B IO", 3866 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3867 .ops = &hsw_power_well_ops, 3868 .id = DISP_PW_ID_NONE, 3869 { 3870 .hsw.regs = &icl_ddi_power_well_regs, 3871 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3872 }, 3873 }, 3874 { 3875 .name = "DDI C IO", 3876 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3877 .ops = &hsw_power_well_ops, 3878 .id = DISP_PW_ID_NONE, 3879 { 3880 .hsw.regs = &icl_ddi_power_well_regs, 3881 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3882 }, 3883 }, 3884 { 3885 .name = "DDI D IO", 3886 .domains = ICL_DDI_IO_D_POWER_DOMAINS, 3887 .ops = &hsw_power_well_ops, 3888 .id = DISP_PW_ID_NONE, 3889 { 3890 .hsw.regs = &icl_ddi_power_well_regs, 3891 .hsw.idx = ICL_PW_CTL_IDX_DDI_D, 3892 }, 3893 }, 3894 { 3895 .name = "DDI E IO", 3896 .domains = ICL_DDI_IO_E_POWER_DOMAINS, 3897 .ops = &hsw_power_well_ops, 3898 .id = DISP_PW_ID_NONE, 3899 { 3900 .hsw.regs = &icl_ddi_power_well_regs, 3901 .hsw.idx = ICL_PW_CTL_IDX_DDI_E, 3902 }, 3903 }, 3904 { 3905 .name = "DDI F IO", 3906 .domains = ICL_DDI_IO_F_POWER_DOMAINS, 3907 .ops = &hsw_power_well_ops, 3908 .id = DISP_PW_ID_NONE, 3909 { 3910 .hsw.regs = &icl_ddi_power_well_regs, 3911 .hsw.idx = ICL_PW_CTL_IDX_DDI_F, 3912 }, 3913 }, 3914 { 3915 .name = "AUX A", 3916 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 3917 .ops = &icl_aux_power_well_ops, 3918 .id = DISP_PW_ID_NONE, 3919 { 3920 .hsw.regs = &icl_aux_power_well_regs, 3921 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3922 }, 3923 }, 3924 { 3925 .name = "AUX B", 3926 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 3927 .ops = &icl_aux_power_well_ops, 3928 .id = DISP_PW_ID_NONE, 3929 { 3930 .hsw.regs = &icl_aux_power_well_regs, 3931 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3932 }, 3933 }, 3934 { 3935 .name = "AUX C TC1", 3936 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, 3937 .ops = &icl_aux_power_well_ops, 3938 .id = DISP_PW_ID_NONE, 3939 { 3940 .hsw.regs = &icl_aux_power_well_regs, 3941 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3942 .hsw.is_tc_tbt = false, 3943 }, 3944 }, 3945 { 3946 .name = "AUX D TC2", 3947 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, 3948 .ops = &icl_aux_power_well_ops, 3949 .id = DISP_PW_ID_NONE, 3950 { 3951 .hsw.regs = &icl_aux_power_well_regs, 3952 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 3953 .hsw.is_tc_tbt = false, 3954 }, 3955 }, 3956 { 3957 .name = "AUX E TC3", 3958 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, 3959 .ops = &icl_aux_power_well_ops, 3960 .id = DISP_PW_ID_NONE, 3961 { 3962 .hsw.regs = &icl_aux_power_well_regs, 3963 .hsw.idx = ICL_PW_CTL_IDX_AUX_E, 3964 .hsw.is_tc_tbt = false, 3965 }, 3966 }, 3967 { 3968 .name = "AUX F TC4", 3969 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, 3970 .ops = &icl_aux_power_well_ops, 3971 .id = DISP_PW_ID_NONE, 3972 { 3973 .hsw.regs = &icl_aux_power_well_regs, 3974 .hsw.idx = ICL_PW_CTL_IDX_AUX_F, 3975 .hsw.is_tc_tbt = false, 3976 }, 3977 }, 3978 { 3979 .name = "AUX C TBT1", 3980 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, 3981 .ops = &icl_aux_power_well_ops, 3982 .id = DISP_PW_ID_NONE, 3983 { 3984 .hsw.regs = &icl_aux_power_well_regs, 3985 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, 3986 .hsw.is_tc_tbt = true, 3987 }, 3988 }, 3989 { 3990 .name = "AUX D TBT2", 3991 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, 3992 .ops = &icl_aux_power_well_ops, 3993 .id = DISP_PW_ID_NONE, 3994 { 3995 .hsw.regs = &icl_aux_power_well_regs, 3996 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, 3997 .hsw.is_tc_tbt = true, 3998 }, 3999 }, 4000 { 4001 .name = "AUX E TBT3", 4002 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, 4003 .ops = &icl_aux_power_well_ops, 4004 .id = DISP_PW_ID_NONE, 4005 { 4006 .hsw.regs = &icl_aux_power_well_regs, 4007 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, 4008 .hsw.is_tc_tbt = true, 4009 }, 4010 }, 4011 { 4012 .name = "AUX F TBT4", 4013 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, 4014 .ops = &icl_aux_power_well_ops, 4015 .id = DISP_PW_ID_NONE, 4016 { 4017 .hsw.regs = &icl_aux_power_well_regs, 4018 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, 4019 .hsw.is_tc_tbt = true, 4020 }, 4021 }, 4022 { 4023 .name = "power well 4", 4024 .domains = ICL_PW_4_POWER_DOMAINS, 4025 .ops = &hsw_power_well_ops, 4026 .id = DISP_PW_ID_NONE, 4027 { 4028 .hsw.regs = &hsw_power_well_regs, 4029 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4030 .hsw.has_fuses = true, 4031 .hsw.irq_pipe_mask = BIT(PIPE_C), 4032 }, 4033 }, 4034 }; 4035 4036 static void 4037 tgl_tc_cold_request(struct drm_i915_private *i915, bool block) 4038 { 4039 u8 tries = 0; 4040 int ret; 4041 4042 while (1) { 4043 u32 low_val; 4044 u32 high_val = 0; 4045 4046 if (block) 4047 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; 4048 else 4049 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; 4050 4051 /* 4052 * Spec states that we should timeout the request after 200us 4053 * but the function below will timeout after 500us 4054 */ 4055 ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, 4056 &high_val); 4057 if (ret == 0) { 4058 if (block && 4059 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) 4060 ret = -EIO; 4061 else 4062 break; 4063 } 4064 4065 if (++tries == 3) 4066 break; 4067 4068 msleep(1); 4069 } 4070 4071 if (ret) 4072 drm_err(&i915->drm, "TC cold %sblock failed\n", 4073 block ? "" : "un"); 4074 else 4075 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", 4076 block ? "" : "un"); 4077 } 4078 4079 static void 4080 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, 4081 struct i915_power_well *power_well) 4082 { 4083 tgl_tc_cold_request(i915, true); 4084 } 4085 4086 static void 4087 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, 4088 struct i915_power_well *power_well) 4089 { 4090 tgl_tc_cold_request(i915, false); 4091 } 4092 4093 static void 4094 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, 4095 struct i915_power_well *power_well) 4096 { 4097 if (power_well->count > 0) 4098 tgl_tc_cold_off_power_well_enable(i915, power_well); 4099 else 4100 tgl_tc_cold_off_power_well_disable(i915, power_well); 4101 } 4102 4103 static bool 4104 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, 4105 struct i915_power_well *power_well) 4106 { 4107 /* 4108 * Not the correctly implementation but there is no way to just read it 4109 * from PCODE, so returning count to avoid state mismatch errors 4110 */ 4111 return power_well->count; 4112 } 4113 4114 static const struct i915_power_well_ops tgl_tc_cold_off_ops = { 4115 .sync_hw = tgl_tc_cold_off_power_well_sync_hw, 4116 .enable = tgl_tc_cold_off_power_well_enable, 4117 .disable = tgl_tc_cold_off_power_well_disable, 4118 .is_enabled = tgl_tc_cold_off_power_well_is_enabled, 4119 }; 4120 4121 static const struct i915_power_well_desc tgl_power_wells[] = { 4122 { 4123 .name = "always-on", 4124 .always_on = true, 4125 .domains = POWER_DOMAIN_MASK, 4126 .ops = &i9xx_always_on_power_well_ops, 4127 .id = DISP_PW_ID_NONE, 4128 }, 4129 { 4130 .name = "power well 1", 4131 /* Handled by the DMC firmware */ 4132 .always_on = true, 4133 .domains = 0, 4134 .ops = &hsw_power_well_ops, 4135 .id = SKL_DISP_PW_1, 4136 { 4137 .hsw.regs = &hsw_power_well_regs, 4138 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4139 .hsw.has_fuses = true, 4140 }, 4141 }, 4142 { 4143 .name = "DC off", 4144 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, 4145 .ops = &gen9_dc_off_power_well_ops, 4146 .id = SKL_DISP_DC_OFF, 4147 }, 4148 { 4149 .name = "power well 2", 4150 .domains = TGL_PW_2_POWER_DOMAINS, 4151 .ops = &hsw_power_well_ops, 4152 .id = SKL_DISP_PW_2, 4153 { 4154 .hsw.regs = &hsw_power_well_regs, 4155 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4156 .hsw.has_fuses = true, 4157 }, 4158 }, 4159 { 4160 .name = "power well 3", 4161 .domains = TGL_PW_3_POWER_DOMAINS, 4162 .ops = &hsw_power_well_ops, 4163 .id = ICL_DISP_PW_3, 4164 { 4165 .hsw.regs = &hsw_power_well_regs, 4166 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4167 .hsw.irq_pipe_mask = BIT(PIPE_B), 4168 .hsw.has_vga = true, 4169 .hsw.has_fuses = true, 4170 }, 4171 }, 4172 { 4173 .name = "DDI A IO", 4174 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4175 .ops = &hsw_power_well_ops, 4176 .id = DISP_PW_ID_NONE, 4177 { 4178 .hsw.regs = &icl_ddi_power_well_regs, 4179 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4180 } 4181 }, 4182 { 4183 .name = "DDI B IO", 4184 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4185 .ops = &hsw_power_well_ops, 4186 .id = DISP_PW_ID_NONE, 4187 { 4188 .hsw.regs = &icl_ddi_power_well_regs, 4189 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4190 } 4191 }, 4192 { 4193 .name = "DDI C IO", 4194 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 4195 .ops = &hsw_power_well_ops, 4196 .id = DISP_PW_ID_NONE, 4197 { 4198 .hsw.regs = &icl_ddi_power_well_regs, 4199 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 4200 } 4201 }, 4202 { 4203 .name = "DDI IO TC1", 4204 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4205 .ops = &hsw_power_well_ops, 4206 .id = DISP_PW_ID_NONE, 4207 { 4208 .hsw.regs = &icl_ddi_power_well_regs, 4209 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4210 }, 4211 }, 4212 { 4213 .name = "DDI IO TC2", 4214 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4215 .ops = &hsw_power_well_ops, 4216 .id = DISP_PW_ID_NONE, 4217 { 4218 .hsw.regs = &icl_ddi_power_well_regs, 4219 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4220 }, 4221 }, 4222 { 4223 .name = "DDI IO TC3", 4224 .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, 4225 .ops = &hsw_power_well_ops, 4226 .id = DISP_PW_ID_NONE, 4227 { 4228 .hsw.regs = &icl_ddi_power_well_regs, 4229 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 4230 }, 4231 }, 4232 { 4233 .name = "DDI IO TC4", 4234 .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, 4235 .ops = &hsw_power_well_ops, 4236 .id = DISP_PW_ID_NONE, 4237 { 4238 .hsw.regs = &icl_ddi_power_well_regs, 4239 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 4240 }, 4241 }, 4242 { 4243 .name = "DDI IO TC5", 4244 .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, 4245 .ops = &hsw_power_well_ops, 4246 .id = DISP_PW_ID_NONE, 4247 { 4248 .hsw.regs = &icl_ddi_power_well_regs, 4249 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, 4250 }, 4251 }, 4252 { 4253 .name = "DDI IO TC6", 4254 .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, 4255 .ops = &hsw_power_well_ops, 4256 .id = DISP_PW_ID_NONE, 4257 { 4258 .hsw.regs = &icl_ddi_power_well_regs, 4259 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, 4260 }, 4261 }, 4262 { 4263 .name = "TC cold off", 4264 .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, 4265 .ops = &tgl_tc_cold_off_ops, 4266 .id = TGL_DISP_PW_TC_COLD_OFF, 4267 }, 4268 { 4269 .name = "AUX A", 4270 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 4271 .ops = &icl_aux_power_well_ops, 4272 .id = DISP_PW_ID_NONE, 4273 { 4274 .hsw.regs = &icl_aux_power_well_regs, 4275 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4276 }, 4277 }, 4278 { 4279 .name = "AUX B", 4280 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 4281 .ops = &icl_aux_power_well_ops, 4282 .id = DISP_PW_ID_NONE, 4283 { 4284 .hsw.regs = &icl_aux_power_well_regs, 4285 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4286 }, 4287 }, 4288 { 4289 .name = "AUX C", 4290 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 4291 .ops = &icl_aux_power_well_ops, 4292 .id = DISP_PW_ID_NONE, 4293 { 4294 .hsw.regs = &icl_aux_power_well_regs, 4295 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4296 }, 4297 }, 4298 { 4299 .name = "AUX USBC1", 4300 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4301 .ops = &icl_aux_power_well_ops, 4302 .id = DISP_PW_ID_NONE, 4303 { 4304 .hsw.regs = &icl_aux_power_well_regs, 4305 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4306 .hsw.is_tc_tbt = false, 4307 }, 4308 }, 4309 { 4310 .name = "AUX USBC2", 4311 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4312 .ops = &icl_aux_power_well_ops, 4313 .id = DISP_PW_ID_NONE, 4314 { 4315 .hsw.regs = &icl_aux_power_well_regs, 4316 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4317 .hsw.is_tc_tbt = false, 4318 }, 4319 }, 4320 { 4321 .name = "AUX USBC3", 4322 .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS, 4323 .ops = &icl_aux_power_well_ops, 4324 .id = DISP_PW_ID_NONE, 4325 { 4326 .hsw.regs = &icl_aux_power_well_regs, 4327 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 4328 .hsw.is_tc_tbt = false, 4329 }, 4330 }, 4331 { 4332 .name = "AUX USBC4", 4333 .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS, 4334 .ops = &icl_aux_power_well_ops, 4335 .id = DISP_PW_ID_NONE, 4336 { 4337 .hsw.regs = &icl_aux_power_well_regs, 4338 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 4339 .hsw.is_tc_tbt = false, 4340 }, 4341 }, 4342 { 4343 .name = "AUX USBC5", 4344 .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS, 4345 .ops = &icl_aux_power_well_ops, 4346 .id = DISP_PW_ID_NONE, 4347 { 4348 .hsw.regs = &icl_aux_power_well_regs, 4349 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, 4350 .hsw.is_tc_tbt = false, 4351 }, 4352 }, 4353 { 4354 .name = "AUX USBC6", 4355 .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS, 4356 .ops = &icl_aux_power_well_ops, 4357 .id = DISP_PW_ID_NONE, 4358 { 4359 .hsw.regs = &icl_aux_power_well_regs, 4360 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, 4361 .hsw.is_tc_tbt = false, 4362 }, 4363 }, 4364 { 4365 .name = "AUX TBT1", 4366 .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS, 4367 .ops = &icl_aux_power_well_ops, 4368 .id = DISP_PW_ID_NONE, 4369 { 4370 .hsw.regs = &icl_aux_power_well_regs, 4371 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 4372 .hsw.is_tc_tbt = true, 4373 }, 4374 }, 4375 { 4376 .name = "AUX TBT2", 4377 .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS, 4378 .ops = &icl_aux_power_well_ops, 4379 .id = DISP_PW_ID_NONE, 4380 { 4381 .hsw.regs = &icl_aux_power_well_regs, 4382 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 4383 .hsw.is_tc_tbt = true, 4384 }, 4385 }, 4386 { 4387 .name = "AUX TBT3", 4388 .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS, 4389 .ops = &icl_aux_power_well_ops, 4390 .id = DISP_PW_ID_NONE, 4391 { 4392 .hsw.regs = &icl_aux_power_well_regs, 4393 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 4394 .hsw.is_tc_tbt = true, 4395 }, 4396 }, 4397 { 4398 .name = "AUX TBT4", 4399 .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS, 4400 .ops = &icl_aux_power_well_ops, 4401 .id = DISP_PW_ID_NONE, 4402 { 4403 .hsw.regs = &icl_aux_power_well_regs, 4404 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 4405 .hsw.is_tc_tbt = true, 4406 }, 4407 }, 4408 { 4409 .name = "AUX TBT5", 4410 .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS, 4411 .ops = &icl_aux_power_well_ops, 4412 .id = DISP_PW_ID_NONE, 4413 { 4414 .hsw.regs = &icl_aux_power_well_regs, 4415 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, 4416 .hsw.is_tc_tbt = true, 4417 }, 4418 }, 4419 { 4420 .name = "AUX TBT6", 4421 .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS, 4422 .ops = &icl_aux_power_well_ops, 4423 .id = DISP_PW_ID_NONE, 4424 { 4425 .hsw.regs = &icl_aux_power_well_regs, 4426 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, 4427 .hsw.is_tc_tbt = true, 4428 }, 4429 }, 4430 { 4431 .name = "power well 4", 4432 .domains = TGL_PW_4_POWER_DOMAINS, 4433 .ops = &hsw_power_well_ops, 4434 .id = DISP_PW_ID_NONE, 4435 { 4436 .hsw.regs = &hsw_power_well_regs, 4437 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4438 .hsw.has_fuses = true, 4439 .hsw.irq_pipe_mask = BIT(PIPE_C), 4440 } 4441 }, 4442 { 4443 .name = "power well 5", 4444 .domains = TGL_PW_5_POWER_DOMAINS, 4445 .ops = &hsw_power_well_ops, 4446 .id = DISP_PW_ID_NONE, 4447 { 4448 .hsw.regs = &hsw_power_well_regs, 4449 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4450 .hsw.has_fuses = true, 4451 .hsw.irq_pipe_mask = BIT(PIPE_D), 4452 }, 4453 }, 4454 }; 4455 4456 static const struct i915_power_well_desc rkl_power_wells[] = { 4457 { 4458 .name = "always-on", 4459 .always_on = true, 4460 .domains = POWER_DOMAIN_MASK, 4461 .ops = &i9xx_always_on_power_well_ops, 4462 .id = DISP_PW_ID_NONE, 4463 }, 4464 { 4465 .name = "power well 1", 4466 /* Handled by the DMC firmware */ 4467 .always_on = true, 4468 .domains = 0, 4469 .ops = &hsw_power_well_ops, 4470 .id = SKL_DISP_PW_1, 4471 { 4472 .hsw.regs = &hsw_power_well_regs, 4473 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4474 .hsw.has_fuses = true, 4475 }, 4476 }, 4477 { 4478 .name = "DC off", 4479 .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS, 4480 .ops = &gen9_dc_off_power_well_ops, 4481 .id = SKL_DISP_DC_OFF, 4482 }, 4483 { 4484 .name = "power well 3", 4485 .domains = RKL_PW_3_POWER_DOMAINS, 4486 .ops = &hsw_power_well_ops, 4487 .id = ICL_DISP_PW_3, 4488 { 4489 .hsw.regs = &hsw_power_well_regs, 4490 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4491 .hsw.irq_pipe_mask = BIT(PIPE_B), 4492 .hsw.has_vga = true, 4493 .hsw.has_fuses = true, 4494 }, 4495 }, 4496 { 4497 .name = "power well 4", 4498 .domains = RKL_PW_4_POWER_DOMAINS, 4499 .ops = &hsw_power_well_ops, 4500 .id = DISP_PW_ID_NONE, 4501 { 4502 .hsw.regs = &hsw_power_well_regs, 4503 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4504 .hsw.has_fuses = true, 4505 .hsw.irq_pipe_mask = BIT(PIPE_C), 4506 } 4507 }, 4508 { 4509 .name = "DDI A IO", 4510 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4511 .ops = &hsw_power_well_ops, 4512 .id = DISP_PW_ID_NONE, 4513 { 4514 .hsw.regs = &icl_ddi_power_well_regs, 4515 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4516 } 4517 }, 4518 { 4519 .name = "DDI B IO", 4520 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4521 .ops = &hsw_power_well_ops, 4522 .id = DISP_PW_ID_NONE, 4523 { 4524 .hsw.regs = &icl_ddi_power_well_regs, 4525 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4526 } 4527 }, 4528 { 4529 .name = "DDI IO TC1", 4530 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4531 .ops = &hsw_power_well_ops, 4532 .id = DISP_PW_ID_NONE, 4533 { 4534 .hsw.regs = &icl_ddi_power_well_regs, 4535 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4536 }, 4537 }, 4538 { 4539 .name = "DDI IO TC2", 4540 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4541 .ops = &hsw_power_well_ops, 4542 .id = DISP_PW_ID_NONE, 4543 { 4544 .hsw.regs = &icl_ddi_power_well_regs, 4545 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4546 }, 4547 }, 4548 { 4549 .name = "AUX A", 4550 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 4551 .ops = &icl_aux_power_well_ops, 4552 .id = DISP_PW_ID_NONE, 4553 { 4554 .hsw.regs = &icl_aux_power_well_regs, 4555 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4556 }, 4557 }, 4558 { 4559 .name = "AUX B", 4560 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 4561 .ops = &icl_aux_power_well_ops, 4562 .id = DISP_PW_ID_NONE, 4563 { 4564 .hsw.regs = &icl_aux_power_well_regs, 4565 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4566 }, 4567 }, 4568 { 4569 .name = "AUX USBC1", 4570 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4571 .ops = &icl_aux_power_well_ops, 4572 .id = DISP_PW_ID_NONE, 4573 { 4574 .hsw.regs = &icl_aux_power_well_regs, 4575 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4576 }, 4577 }, 4578 { 4579 .name = "AUX USBC2", 4580 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4581 .ops = &icl_aux_power_well_ops, 4582 .id = DISP_PW_ID_NONE, 4583 { 4584 .hsw.regs = &icl_aux_power_well_regs, 4585 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4586 }, 4587 }, 4588 }; 4589 4590 static const struct i915_power_well_desc dg1_power_wells[] = { 4591 { 4592 .name = "always-on", 4593 .always_on = true, 4594 .domains = POWER_DOMAIN_MASK, 4595 .ops = &i9xx_always_on_power_well_ops, 4596 .id = DISP_PW_ID_NONE, 4597 }, 4598 { 4599 .name = "power well 1", 4600 /* Handled by the DMC firmware */ 4601 .always_on = true, 4602 .domains = 0, 4603 .ops = &hsw_power_well_ops, 4604 .id = SKL_DISP_PW_1, 4605 { 4606 .hsw.regs = &hsw_power_well_regs, 4607 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4608 .hsw.has_fuses = true, 4609 }, 4610 }, 4611 { 4612 .name = "DC off", 4613 .domains = DG1_DISPLAY_DC_OFF_POWER_DOMAINS, 4614 .ops = &gen9_dc_off_power_well_ops, 4615 .id = SKL_DISP_DC_OFF, 4616 }, 4617 { 4618 .name = "power well 2", 4619 .domains = DG1_PW_2_POWER_DOMAINS, 4620 .ops = &hsw_power_well_ops, 4621 .id = SKL_DISP_PW_2, 4622 { 4623 .hsw.regs = &hsw_power_well_regs, 4624 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4625 .hsw.has_fuses = true, 4626 }, 4627 }, 4628 { 4629 .name = "power well 3", 4630 .domains = DG1_PW_3_POWER_DOMAINS, 4631 .ops = &hsw_power_well_ops, 4632 .id = ICL_DISP_PW_3, 4633 { 4634 .hsw.regs = &hsw_power_well_regs, 4635 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4636 .hsw.irq_pipe_mask = BIT(PIPE_B), 4637 .hsw.has_vga = true, 4638 .hsw.has_fuses = true, 4639 }, 4640 }, 4641 { 4642 .name = "DDI A IO", 4643 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4644 .ops = &hsw_power_well_ops, 4645 .id = DISP_PW_ID_NONE, 4646 { 4647 .hsw.regs = &icl_ddi_power_well_regs, 4648 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4649 } 4650 }, 4651 { 4652 .name = "DDI B IO", 4653 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4654 .ops = &hsw_power_well_ops, 4655 .id = DISP_PW_ID_NONE, 4656 { 4657 .hsw.regs = &icl_ddi_power_well_regs, 4658 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4659 } 4660 }, 4661 { 4662 .name = "DDI IO TC1", 4663 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4664 .ops = &hsw_power_well_ops, 4665 .id = DISP_PW_ID_NONE, 4666 { 4667 .hsw.regs = &icl_ddi_power_well_regs, 4668 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4669 }, 4670 }, 4671 { 4672 .name = "DDI IO TC2", 4673 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4674 .ops = &hsw_power_well_ops, 4675 .id = DISP_PW_ID_NONE, 4676 { 4677 .hsw.regs = &icl_ddi_power_well_regs, 4678 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4679 }, 4680 }, 4681 { 4682 .name = "AUX A", 4683 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 4684 .ops = &icl_aux_power_well_ops, 4685 .id = DISP_PW_ID_NONE, 4686 { 4687 .hsw.regs = &icl_aux_power_well_regs, 4688 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4689 }, 4690 }, 4691 { 4692 .name = "AUX B", 4693 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 4694 .ops = &icl_aux_power_well_ops, 4695 .id = DISP_PW_ID_NONE, 4696 { 4697 .hsw.regs = &icl_aux_power_well_regs, 4698 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4699 }, 4700 }, 4701 { 4702 .name = "AUX USBC1", 4703 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4704 .ops = &icl_aux_power_well_ops, 4705 .id = DISP_PW_ID_NONE, 4706 { 4707 .hsw.regs = &icl_aux_power_well_regs, 4708 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4709 .hsw.is_tc_tbt = false, 4710 }, 4711 }, 4712 { 4713 .name = "AUX USBC2", 4714 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4715 .ops = &icl_aux_power_well_ops, 4716 .id = DISP_PW_ID_NONE, 4717 { 4718 .hsw.regs = &icl_aux_power_well_regs, 4719 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4720 .hsw.is_tc_tbt = false, 4721 }, 4722 }, 4723 { 4724 .name = "power well 4", 4725 .domains = TGL_PW_4_POWER_DOMAINS, 4726 .ops = &hsw_power_well_ops, 4727 .id = DISP_PW_ID_NONE, 4728 { 4729 .hsw.regs = &hsw_power_well_regs, 4730 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4731 .hsw.has_fuses = true, 4732 .hsw.irq_pipe_mask = BIT(PIPE_C), 4733 } 4734 }, 4735 { 4736 .name = "power well 5", 4737 .domains = TGL_PW_5_POWER_DOMAINS, 4738 .ops = &hsw_power_well_ops, 4739 .id = DISP_PW_ID_NONE, 4740 { 4741 .hsw.regs = &hsw_power_well_regs, 4742 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4743 .hsw.has_fuses = true, 4744 .hsw.irq_pipe_mask = BIT(PIPE_D), 4745 }, 4746 }, 4747 }; 4748 4749 static const struct i915_power_well_desc xelpd_power_wells[] = { 4750 { 4751 .name = "always-on", 4752 .always_on = true, 4753 .domains = POWER_DOMAIN_MASK, 4754 .ops = &i9xx_always_on_power_well_ops, 4755 .id = DISP_PW_ID_NONE, 4756 }, 4757 { 4758 .name = "power well 1", 4759 /* Handled by the DMC firmware */ 4760 .always_on = true, 4761 .domains = 0, 4762 .ops = &hsw_power_well_ops, 4763 .id = SKL_DISP_PW_1, 4764 { 4765 .hsw.regs = &hsw_power_well_regs, 4766 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4767 .hsw.has_fuses = true, 4768 }, 4769 }, 4770 { 4771 .name = "DC off", 4772 .domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS, 4773 .ops = &gen9_dc_off_power_well_ops, 4774 .id = SKL_DISP_DC_OFF, 4775 }, 4776 { 4777 .name = "power well 2", 4778 .domains = XELPD_PW_2_POWER_DOMAINS, 4779 .ops = &hsw_power_well_ops, 4780 .id = SKL_DISP_PW_2, 4781 { 4782 .hsw.regs = &hsw_power_well_regs, 4783 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4784 .hsw.has_vga = true, 4785 .hsw.has_fuses = true, 4786 }, 4787 }, 4788 { 4789 .name = "power well A", 4790 .domains = XELPD_PW_A_POWER_DOMAINS, 4791 .ops = &hsw_power_well_ops, 4792 .id = DISP_PW_ID_NONE, 4793 { 4794 .hsw.regs = &hsw_power_well_regs, 4795 .hsw.idx = XELPD_PW_CTL_IDX_PW_A, 4796 .hsw.irq_pipe_mask = BIT(PIPE_A), 4797 .hsw.has_fuses = true, 4798 }, 4799 }, 4800 { 4801 .name = "power well B", 4802 .domains = XELPD_PW_B_POWER_DOMAINS, 4803 .ops = &hsw_power_well_ops, 4804 .id = DISP_PW_ID_NONE, 4805 { 4806 .hsw.regs = &hsw_power_well_regs, 4807 .hsw.idx = XELPD_PW_CTL_IDX_PW_B, 4808 .hsw.irq_pipe_mask = BIT(PIPE_B), 4809 .hsw.has_fuses = true, 4810 }, 4811 }, 4812 { 4813 .name = "power well C", 4814 .domains = XELPD_PW_C_POWER_DOMAINS, 4815 .ops = &hsw_power_well_ops, 4816 .id = DISP_PW_ID_NONE, 4817 { 4818 .hsw.regs = &hsw_power_well_regs, 4819 .hsw.idx = XELPD_PW_CTL_IDX_PW_C, 4820 .hsw.irq_pipe_mask = BIT(PIPE_C), 4821 .hsw.has_fuses = true, 4822 }, 4823 }, 4824 { 4825 .name = "power well D", 4826 .domains = XELPD_PW_D_POWER_DOMAINS, 4827 .ops = &hsw_power_well_ops, 4828 .id = DISP_PW_ID_NONE, 4829 { 4830 .hsw.regs = &hsw_power_well_regs, 4831 .hsw.idx = XELPD_PW_CTL_IDX_PW_D, 4832 .hsw.irq_pipe_mask = BIT(PIPE_D), 4833 .hsw.has_fuses = true, 4834 }, 4835 }, 4836 { 4837 .name = "DDI A IO", 4838 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4839 .ops = &hsw_power_well_ops, 4840 .id = DISP_PW_ID_NONE, 4841 { 4842 .hsw.regs = &icl_ddi_power_well_regs, 4843 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4844 } 4845 }, 4846 { 4847 .name = "DDI B IO", 4848 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4849 .ops = &hsw_power_well_ops, 4850 .id = DISP_PW_ID_NONE, 4851 { 4852 .hsw.regs = &icl_ddi_power_well_regs, 4853 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4854 } 4855 }, 4856 { 4857 .name = "DDI C IO", 4858 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 4859 .ops = &hsw_power_well_ops, 4860 .id = DISP_PW_ID_NONE, 4861 { 4862 .hsw.regs = &icl_ddi_power_well_regs, 4863 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 4864 } 4865 }, 4866 { 4867 .name = "DDI IO D_XELPD", 4868 .domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS, 4869 .ops = &hsw_power_well_ops, 4870 .id = DISP_PW_ID_NONE, 4871 { 4872 .hsw.regs = &icl_ddi_power_well_regs, 4873 .hsw.idx = XELPD_PW_CTL_IDX_DDI_D, 4874 } 4875 }, 4876 { 4877 .name = "DDI IO E_XELPD", 4878 .domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS, 4879 .ops = &hsw_power_well_ops, 4880 .id = DISP_PW_ID_NONE, 4881 { 4882 .hsw.regs = &icl_ddi_power_well_regs, 4883 .hsw.idx = XELPD_PW_CTL_IDX_DDI_E, 4884 } 4885 }, 4886 { 4887 .name = "DDI IO TC1", 4888 .domains = XELPD_DDI_IO_TC1_POWER_DOMAINS, 4889 .ops = &hsw_power_well_ops, 4890 .id = DISP_PW_ID_NONE, 4891 { 4892 .hsw.regs = &icl_ddi_power_well_regs, 4893 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4894 } 4895 }, 4896 { 4897 .name = "DDI IO TC2", 4898 .domains = XELPD_DDI_IO_TC2_POWER_DOMAINS, 4899 .ops = &hsw_power_well_ops, 4900 .id = DISP_PW_ID_NONE, 4901 { 4902 .hsw.regs = &icl_ddi_power_well_regs, 4903 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4904 } 4905 }, 4906 { 4907 .name = "DDI IO TC3", 4908 .domains = XELPD_DDI_IO_TC3_POWER_DOMAINS, 4909 .ops = &hsw_power_well_ops, 4910 .id = DISP_PW_ID_NONE, 4911 { 4912 .hsw.regs = &icl_ddi_power_well_regs, 4913 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 4914 } 4915 }, 4916 { 4917 .name = "DDI IO TC4", 4918 .domains = XELPD_DDI_IO_TC4_POWER_DOMAINS, 4919 .ops = &hsw_power_well_ops, 4920 .id = DISP_PW_ID_NONE, 4921 { 4922 .hsw.regs = &icl_ddi_power_well_regs, 4923 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 4924 } 4925 }, 4926 { 4927 .name = "AUX A", 4928 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 4929 .ops = &icl_aux_power_well_ops, 4930 .id = DISP_PW_ID_NONE, 4931 { 4932 .hsw.regs = &icl_aux_power_well_regs, 4933 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4934 .hsw.fixed_enable_delay = 600, 4935 }, 4936 }, 4937 { 4938 .name = "AUX B", 4939 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 4940 .ops = &icl_aux_power_well_ops, 4941 .id = DISP_PW_ID_NONE, 4942 { 4943 .hsw.regs = &icl_aux_power_well_regs, 4944 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4945 .hsw.fixed_enable_delay = 600, 4946 }, 4947 }, 4948 { 4949 .name = "AUX C", 4950 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 4951 .ops = &icl_aux_power_well_ops, 4952 .id = DISP_PW_ID_NONE, 4953 { 4954 .hsw.regs = &icl_aux_power_well_regs, 4955 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4956 .hsw.fixed_enable_delay = 600, 4957 }, 4958 }, 4959 { 4960 .name = "AUX D_XELPD", 4961 .domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS, 4962 .ops = &icl_aux_power_well_ops, 4963 .id = DISP_PW_ID_NONE, 4964 { 4965 .hsw.regs = &icl_aux_power_well_regs, 4966 .hsw.idx = XELPD_PW_CTL_IDX_AUX_D, 4967 .hsw.fixed_enable_delay = 600, 4968 }, 4969 }, 4970 { 4971 .name = "AUX E_XELPD", 4972 .domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS, 4973 .ops = &icl_aux_power_well_ops, 4974 .id = DISP_PW_ID_NONE, 4975 { 4976 .hsw.regs = &icl_aux_power_well_regs, 4977 .hsw.idx = XELPD_PW_CTL_IDX_AUX_E, 4978 }, 4979 }, 4980 { 4981 .name = "AUX USBC1", 4982 .domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS, 4983 .ops = &icl_aux_power_well_ops, 4984 .id = DISP_PW_ID_NONE, 4985 { 4986 .hsw.regs = &icl_aux_power_well_regs, 4987 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4988 .hsw.fixed_enable_delay = 600, 4989 }, 4990 }, 4991 { 4992 .name = "AUX USBC2", 4993 .domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS, 4994 .ops = &icl_aux_power_well_ops, 4995 .id = DISP_PW_ID_NONE, 4996 { 4997 .hsw.regs = &icl_aux_power_well_regs, 4998 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4999 }, 5000 }, 5001 { 5002 .name = "AUX USBC3", 5003 .domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS, 5004 .ops = &icl_aux_power_well_ops, 5005 .id = DISP_PW_ID_NONE, 5006 { 5007 .hsw.regs = &icl_aux_power_well_regs, 5008 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 5009 }, 5010 }, 5011 { 5012 .name = "AUX USBC4", 5013 .domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS, 5014 .ops = &icl_aux_power_well_ops, 5015 .id = DISP_PW_ID_NONE, 5016 { 5017 .hsw.regs = &icl_aux_power_well_regs, 5018 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 5019 }, 5020 }, 5021 { 5022 .name = "AUX TBT1", 5023 .domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS, 5024 .ops = &icl_aux_power_well_ops, 5025 .id = DISP_PW_ID_NONE, 5026 { 5027 .hsw.regs = &icl_aux_power_well_regs, 5028 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 5029 .hsw.is_tc_tbt = true, 5030 }, 5031 }, 5032 { 5033 .name = "AUX TBT2", 5034 .domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS, 5035 .ops = &icl_aux_power_well_ops, 5036 .id = DISP_PW_ID_NONE, 5037 { 5038 .hsw.regs = &icl_aux_power_well_regs, 5039 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 5040 .hsw.is_tc_tbt = true, 5041 }, 5042 }, 5043 { 5044 .name = "AUX TBT3", 5045 .domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS, 5046 .ops = &icl_aux_power_well_ops, 5047 .id = DISP_PW_ID_NONE, 5048 { 5049 .hsw.regs = &icl_aux_power_well_regs, 5050 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 5051 .hsw.is_tc_tbt = true, 5052 }, 5053 }, 5054 { 5055 .name = "AUX TBT4", 5056 .domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS, 5057 .ops = &icl_aux_power_well_ops, 5058 .id = DISP_PW_ID_NONE, 5059 { 5060 .hsw.regs = &icl_aux_power_well_regs, 5061 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 5062 .hsw.is_tc_tbt = true, 5063 }, 5064 }, 5065 }; 5066 5067 static int 5068 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 5069 int disable_power_well) 5070 { 5071 if (disable_power_well >= 0) 5072 return !!disable_power_well; 5073 5074 return 1; 5075 } 5076 5077 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 5078 int enable_dc) 5079 { 5080 u32 mask; 5081 int requested_dc; 5082 int max_dc; 5083 5084 if (!HAS_DISPLAY(dev_priv)) 5085 return 0; 5086 5087 if (IS_DG1(dev_priv)) 5088 max_dc = 3; 5089 else if (DISPLAY_VER(dev_priv) >= 12) 5090 max_dc = 4; 5091 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 5092 max_dc = 1; 5093 else if (DISPLAY_VER(dev_priv) >= 9) 5094 max_dc = 2; 5095 else 5096 max_dc = 0; 5097 5098 /* 5099 * DC9 has a separate HW flow from the rest of the DC states, 5100 * not depending on the DMC firmware. It's needed by system 5101 * suspend/resume, so allow it unconditionally. 5102 */ 5103 mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || 5104 DISPLAY_VER(dev_priv) >= 11 ? 5105 DC_STATE_EN_DC9 : 0; 5106 5107 if (!dev_priv->params.disable_power_well) 5108 max_dc = 0; 5109 5110 if (enable_dc >= 0 && enable_dc <= max_dc) { 5111 requested_dc = enable_dc; 5112 } else if (enable_dc == -1) { 5113 requested_dc = max_dc; 5114 } else if (enable_dc > max_dc && enable_dc <= 4) { 5115 drm_dbg_kms(&dev_priv->drm, 5116 "Adjusting requested max DC state (%d->%d)\n", 5117 enable_dc, max_dc); 5118 requested_dc = max_dc; 5119 } else { 5120 drm_err(&dev_priv->drm, 5121 "Unexpected value for enable_dc (%d)\n", enable_dc); 5122 requested_dc = max_dc; 5123 } 5124 5125 switch (requested_dc) { 5126 case 4: 5127 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 5128 break; 5129 case 3: 5130 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 5131 break; 5132 case 2: 5133 mask |= DC_STATE_EN_UPTO_DC6; 5134 break; 5135 case 1: 5136 mask |= DC_STATE_EN_UPTO_DC5; 5137 break; 5138 } 5139 5140 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); 5141 5142 return mask; 5143 } 5144 5145 static int 5146 __set_power_wells(struct i915_power_domains *power_domains, 5147 const struct i915_power_well_desc *power_well_descs, 5148 int power_well_descs_sz, u64 skip_mask) 5149 { 5150 struct drm_i915_private *i915 = container_of(power_domains, 5151 struct drm_i915_private, 5152 power_domains); 5153 u64 power_well_ids = 0; 5154 int power_well_count = 0; 5155 int i, plt_idx = 0; 5156 5157 for (i = 0; i < power_well_descs_sz; i++) 5158 if (!(BIT_ULL(power_well_descs[i].id) & skip_mask)) 5159 power_well_count++; 5160 5161 power_domains->power_well_count = power_well_count; 5162 power_domains->power_wells = 5163 kcalloc(power_well_count, 5164 sizeof(*power_domains->power_wells), 5165 GFP_KERNEL); 5166 if (!power_domains->power_wells) 5167 return -ENOMEM; 5168 5169 for (i = 0; i < power_well_descs_sz; i++) { 5170 enum i915_power_well_id id = power_well_descs[i].id; 5171 5172 if (BIT_ULL(id) & skip_mask) 5173 continue; 5174 5175 power_domains->power_wells[plt_idx++].desc = 5176 &power_well_descs[i]; 5177 5178 if (id == DISP_PW_ID_NONE) 5179 continue; 5180 5181 drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8); 5182 drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id)); 5183 power_well_ids |= BIT_ULL(id); 5184 } 5185 5186 return 0; 5187 } 5188 5189 #define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \ 5190 __set_power_wells(power_domains, __power_well_descs, \ 5191 ARRAY_SIZE(__power_well_descs), skip_mask) 5192 5193 #define set_power_wells(power_domains, __power_well_descs) \ 5194 set_power_wells_mask(power_domains, __power_well_descs, 0) 5195 5196 /** 5197 * intel_power_domains_init - initializes the power domain structures 5198 * @dev_priv: i915 device instance 5199 * 5200 * Initializes the power domain structures for @dev_priv depending upon the 5201 * supported platform. 5202 */ 5203 int intel_power_domains_init(struct drm_i915_private *dev_priv) 5204 { 5205 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5206 int err; 5207 5208 dev_priv->params.disable_power_well = 5209 sanitize_disable_power_well_option(dev_priv, 5210 dev_priv->params.disable_power_well); 5211 dev_priv->dmc.allowed_dc_mask = 5212 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); 5213 5214 dev_priv->dmc.target_dc_state = 5215 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 5216 5217 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); 5218 5219 mutex_init(&power_domains->lock); 5220 5221 INIT_DELAYED_WORK(&power_domains->async_put_work, 5222 intel_display_power_put_async_work); 5223 5224 /* 5225 * The enabling order will be from lower to higher indexed wells, 5226 * the disabling order is reversed. 5227 */ 5228 if (!HAS_DISPLAY(dev_priv)) { 5229 power_domains->power_well_count = 0; 5230 err = 0; 5231 } else if (DISPLAY_VER(dev_priv) >= 13) { 5232 err = set_power_wells(power_domains, xelpd_power_wells); 5233 } else if (IS_DG1(dev_priv)) { 5234 err = set_power_wells(power_domains, dg1_power_wells); 5235 } else if (IS_ALDERLAKE_S(dev_priv)) { 5236 err = set_power_wells_mask(power_domains, tgl_power_wells, 5237 BIT_ULL(TGL_DISP_PW_TC_COLD_OFF)); 5238 } else if (IS_ROCKETLAKE(dev_priv)) { 5239 err = set_power_wells(power_domains, rkl_power_wells); 5240 } else if (DISPLAY_VER(dev_priv) == 12) { 5241 err = set_power_wells(power_domains, tgl_power_wells); 5242 } else if (DISPLAY_VER(dev_priv) == 11) { 5243 err = set_power_wells(power_domains, icl_power_wells); 5244 } else if (IS_GEMINILAKE(dev_priv)) { 5245 err = set_power_wells(power_domains, glk_power_wells); 5246 } else if (IS_BROXTON(dev_priv)) { 5247 err = set_power_wells(power_domains, bxt_power_wells); 5248 } else if (DISPLAY_VER(dev_priv) == 9) { 5249 err = set_power_wells(power_domains, skl_power_wells); 5250 } else if (IS_CHERRYVIEW(dev_priv)) { 5251 err = set_power_wells(power_domains, chv_power_wells); 5252 } else if (IS_BROADWELL(dev_priv)) { 5253 err = set_power_wells(power_domains, bdw_power_wells); 5254 } else if (IS_HASWELL(dev_priv)) { 5255 err = set_power_wells(power_domains, hsw_power_wells); 5256 } else if (IS_VALLEYVIEW(dev_priv)) { 5257 err = set_power_wells(power_domains, vlv_power_wells); 5258 } else if (IS_I830(dev_priv)) { 5259 err = set_power_wells(power_domains, i830_power_wells); 5260 } else { 5261 err = set_power_wells(power_domains, i9xx_always_on_power_well); 5262 } 5263 5264 return err; 5265 } 5266 5267 /** 5268 * intel_power_domains_cleanup - clean up power domains resources 5269 * @dev_priv: i915 device instance 5270 * 5271 * Release any resources acquired by intel_power_domains_init() 5272 */ 5273 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 5274 { 5275 kfree(dev_priv->power_domains.power_wells); 5276 } 5277 5278 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 5279 { 5280 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5281 struct i915_power_well *power_well; 5282 5283 mutex_lock(&power_domains->lock); 5284 for_each_power_well(dev_priv, power_well) { 5285 power_well->desc->ops->sync_hw(dev_priv, power_well); 5286 power_well->hw_enabled = 5287 power_well->desc->ops->is_enabled(dev_priv, power_well); 5288 } 5289 mutex_unlock(&power_domains->lock); 5290 } 5291 5292 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, 5293 enum dbuf_slice slice, bool enable) 5294 { 5295 i915_reg_t reg = DBUF_CTL_S(slice); 5296 bool state; 5297 5298 intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST, 5299 enable ? DBUF_POWER_REQUEST : 0); 5300 intel_de_posting_read(dev_priv, reg); 5301 udelay(10); 5302 5303 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; 5304 drm_WARN(&dev_priv->drm, enable != state, 5305 "DBuf slice %d power %s timeout!\n", 5306 slice, enabledisable(enable)); 5307 } 5308 5309 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, 5310 u8 req_slices) 5311 { 5312 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5313 u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask; 5314 enum dbuf_slice slice; 5315 5316 drm_WARN(&dev_priv->drm, req_slices & ~slice_mask, 5317 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n", 5318 req_slices, slice_mask); 5319 5320 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", 5321 req_slices); 5322 5323 /* 5324 * Might be running this in parallel to gen9_dc_off_power_well_enable 5325 * being called from intel_dp_detect for instance, 5326 * which causes assertion triggered by race condition, 5327 * as gen9_assert_dbuf_enabled might preempt this when registers 5328 * were already updated, while dev_priv was not. 5329 */ 5330 mutex_lock(&power_domains->lock); 5331 5332 for_each_dbuf_slice(dev_priv, slice) 5333 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); 5334 5335 dev_priv->dbuf.enabled_slices = req_slices; 5336 5337 mutex_unlock(&power_domains->lock); 5338 } 5339 5340 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 5341 { 5342 dev_priv->dbuf.enabled_slices = 5343 intel_enabled_dbuf_slices_mask(dev_priv); 5344 5345 /* 5346 * Just power up at least 1 slice, we will 5347 * figure out later which slices we have and what we need. 5348 */ 5349 gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | 5350 dev_priv->dbuf.enabled_slices); 5351 } 5352 5353 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 5354 { 5355 gen9_dbuf_slices_update(dev_priv, 0); 5356 } 5357 5358 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) 5359 { 5360 enum dbuf_slice slice; 5361 5362 if (IS_ALDERLAKE_P(dev_priv)) 5363 return; 5364 5365 for_each_dbuf_slice(dev_priv, slice) 5366 intel_de_rmw(dev_priv, DBUF_CTL_S(slice), 5367 DBUF_TRACKER_STATE_SERVICE_MASK, 5368 DBUF_TRACKER_STATE_SERVICE(8)); 5369 } 5370 5371 static void icl_mbus_init(struct drm_i915_private *dev_priv) 5372 { 5373 unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask; 5374 u32 mask, val, i; 5375 5376 if (IS_ALDERLAKE_P(dev_priv)) 5377 return; 5378 5379 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | 5380 MBUS_ABOX_BT_CREDIT_POOL2_MASK | 5381 MBUS_ABOX_B_CREDIT_MASK | 5382 MBUS_ABOX_BW_CREDIT_MASK; 5383 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 5384 MBUS_ABOX_BT_CREDIT_POOL2(16) | 5385 MBUS_ABOX_B_CREDIT(1) | 5386 MBUS_ABOX_BW_CREDIT(1); 5387 5388 /* 5389 * gen12 platforms that use abox1 and abox2 for pixel data reads still 5390 * expect us to program the abox_ctl0 register as well, even though 5391 * we don't have to program other instance-0 registers like BW_BUDDY. 5392 */ 5393 if (DISPLAY_VER(dev_priv) == 12) 5394 abox_regs |= BIT(0); 5395 5396 for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) 5397 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val); 5398 } 5399 5400 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 5401 { 5402 u32 val = intel_de_read(dev_priv, LCPLL_CTL); 5403 5404 /* 5405 * The LCPLL register should be turned on by the BIOS. For now 5406 * let's just check its state and print errors in case 5407 * something is wrong. Don't even try to turn it on. 5408 */ 5409 5410 if (val & LCPLL_CD_SOURCE_FCLK) 5411 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); 5412 5413 if (val & LCPLL_PLL_DISABLE) 5414 drm_err(&dev_priv->drm, "LCPLL is disabled\n"); 5415 5416 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 5417 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); 5418 } 5419 5420 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 5421 { 5422 struct drm_device *dev = &dev_priv->drm; 5423 struct intel_crtc *crtc; 5424 5425 for_each_intel_crtc(dev, crtc) 5426 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 5427 pipe_name(crtc->pipe)); 5428 5429 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), 5430 "Display power well on\n"); 5431 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, 5432 "SPLL enabled\n"); 5433 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 5434 "WRPLL1 enabled\n"); 5435 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 5436 "WRPLL2 enabled\n"); 5437 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, 5438 "Panel power on\n"); 5439 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 5440 "CPU PWM1 enabled\n"); 5441 if (IS_HASWELL(dev_priv)) 5442 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 5443 "CPU PWM2 enabled\n"); 5444 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 5445 "PCH PWM1 enabled\n"); 5446 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 5447 "Utility pin enabled\n"); 5448 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, 5449 "PCH GTC enabled\n"); 5450 5451 /* 5452 * In theory we can still leave IRQs enabled, as long as only the HPD 5453 * interrupts remain enabled. We used to check for that, but since it's 5454 * gen-specific and since we only disable LCPLL after we fully disable 5455 * the interrupts, the check below should be enough. 5456 */ 5457 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 5458 } 5459 5460 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 5461 { 5462 if (IS_HASWELL(dev_priv)) 5463 return intel_de_read(dev_priv, D_COMP_HSW); 5464 else 5465 return intel_de_read(dev_priv, D_COMP_BDW); 5466 } 5467 5468 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 5469 { 5470 if (IS_HASWELL(dev_priv)) { 5471 if (sandybridge_pcode_write(dev_priv, 5472 GEN6_PCODE_WRITE_D_COMP, val)) 5473 drm_dbg_kms(&dev_priv->drm, 5474 "Failed to write to D_COMP\n"); 5475 } else { 5476 intel_de_write(dev_priv, D_COMP_BDW, val); 5477 intel_de_posting_read(dev_priv, D_COMP_BDW); 5478 } 5479 } 5480 5481 /* 5482 * This function implements pieces of two sequences from BSpec: 5483 * - Sequence for display software to disable LCPLL 5484 * - Sequence for display software to allow package C8+ 5485 * The steps implemented here are just the steps that actually touch the LCPLL 5486 * register. Callers should take care of disabling all the display engine 5487 * functions, doing the mode unset, fixing interrupts, etc. 5488 */ 5489 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 5490 bool switch_to_fclk, bool allow_power_down) 5491 { 5492 u32 val; 5493 5494 assert_can_disable_lcpll(dev_priv); 5495 5496 val = intel_de_read(dev_priv, LCPLL_CTL); 5497 5498 if (switch_to_fclk) { 5499 val |= LCPLL_CD_SOURCE_FCLK; 5500 intel_de_write(dev_priv, LCPLL_CTL, val); 5501 5502 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & 5503 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 5504 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); 5505 5506 val = intel_de_read(dev_priv, LCPLL_CTL); 5507 } 5508 5509 val |= LCPLL_PLL_DISABLE; 5510 intel_de_write(dev_priv, LCPLL_CTL, val); 5511 intel_de_posting_read(dev_priv, LCPLL_CTL); 5512 5513 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 5514 drm_err(&dev_priv->drm, "LCPLL still locked\n"); 5515 5516 val = hsw_read_dcomp(dev_priv); 5517 val |= D_COMP_COMP_DISABLE; 5518 hsw_write_dcomp(dev_priv, val); 5519 ndelay(100); 5520 5521 if (wait_for((hsw_read_dcomp(dev_priv) & 5522 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 5523 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); 5524 5525 if (allow_power_down) { 5526 val = intel_de_read(dev_priv, LCPLL_CTL); 5527 val |= LCPLL_POWER_DOWN_ALLOW; 5528 intel_de_write(dev_priv, LCPLL_CTL, val); 5529 intel_de_posting_read(dev_priv, LCPLL_CTL); 5530 } 5531 } 5532 5533 /* 5534 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 5535 * source. 5536 */ 5537 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 5538 { 5539 u32 val; 5540 5541 val = intel_de_read(dev_priv, LCPLL_CTL); 5542 5543 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 5544 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 5545 return; 5546 5547 /* 5548 * Make sure we're not on PC8 state before disabling PC8, otherwise 5549 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 5550 */ 5551 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 5552 5553 if (val & LCPLL_POWER_DOWN_ALLOW) { 5554 val &= ~LCPLL_POWER_DOWN_ALLOW; 5555 intel_de_write(dev_priv, LCPLL_CTL, val); 5556 intel_de_posting_read(dev_priv, LCPLL_CTL); 5557 } 5558 5559 val = hsw_read_dcomp(dev_priv); 5560 val |= D_COMP_COMP_FORCE; 5561 val &= ~D_COMP_COMP_DISABLE; 5562 hsw_write_dcomp(dev_priv, val); 5563 5564 val = intel_de_read(dev_priv, LCPLL_CTL); 5565 val &= ~LCPLL_PLL_DISABLE; 5566 intel_de_write(dev_priv, LCPLL_CTL, val); 5567 5568 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 5569 drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); 5570 5571 if (val & LCPLL_CD_SOURCE_FCLK) { 5572 val = intel_de_read(dev_priv, LCPLL_CTL); 5573 val &= ~LCPLL_CD_SOURCE_FCLK; 5574 intel_de_write(dev_priv, LCPLL_CTL, val); 5575 5576 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & 5577 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 5578 drm_err(&dev_priv->drm, 5579 "Switching back to LCPLL failed\n"); 5580 } 5581 5582 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 5583 5584 intel_update_cdclk(dev_priv); 5585 intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); 5586 } 5587 5588 /* 5589 * Package states C8 and deeper are really deep PC states that can only be 5590 * reached when all the devices on the system allow it, so even if the graphics 5591 * device allows PC8+, it doesn't mean the system will actually get to these 5592 * states. Our driver only allows PC8+ when going into runtime PM. 5593 * 5594 * The requirements for PC8+ are that all the outputs are disabled, the power 5595 * well is disabled and most interrupts are disabled, and these are also 5596 * requirements for runtime PM. When these conditions are met, we manually do 5597 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 5598 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 5599 * hang the machine. 5600 * 5601 * When we really reach PC8 or deeper states (not just when we allow it) we lose 5602 * the state of some registers, so when we come back from PC8+ we need to 5603 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 5604 * need to take care of the registers kept by RC6. Notice that this happens even 5605 * if we don't put the device in PCI D3 state (which is what currently happens 5606 * because of the runtime PM support). 5607 * 5608 * For more, read "Display Sequences for Package C8" on the hardware 5609 * documentation. 5610 */ 5611 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 5612 { 5613 u32 val; 5614 5615 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); 5616 5617 if (HAS_PCH_LPT_LP(dev_priv)) { 5618 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5619 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 5620 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5621 } 5622 5623 lpt_disable_clkout_dp(dev_priv); 5624 hsw_disable_lcpll(dev_priv, true, true); 5625 } 5626 5627 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 5628 { 5629 u32 val; 5630 5631 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); 5632 5633 hsw_restore_lcpll(dev_priv); 5634 intel_init_pch_refclk(dev_priv); 5635 5636 if (HAS_PCH_LPT_LP(dev_priv)) { 5637 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5638 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 5639 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5640 } 5641 } 5642 5643 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 5644 bool enable) 5645 { 5646 i915_reg_t reg; 5647 u32 reset_bits, val; 5648 5649 if (IS_IVYBRIDGE(dev_priv)) { 5650 reg = GEN7_MSG_CTL; 5651 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 5652 } else { 5653 reg = HSW_NDE_RSTWRN_OPT; 5654 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 5655 } 5656 5657 val = intel_de_read(dev_priv, reg); 5658 5659 if (enable) 5660 val |= reset_bits; 5661 else 5662 val &= ~reset_bits; 5663 5664 intel_de_write(dev_priv, reg, val); 5665 } 5666 5667 static void skl_display_core_init(struct drm_i915_private *dev_priv, 5668 bool resume) 5669 { 5670 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5671 struct i915_power_well *well; 5672 5673 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5674 5675 /* enable PCH reset handshake */ 5676 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5677 5678 if (!HAS_DISPLAY(dev_priv)) 5679 return; 5680 5681 /* enable PG1 and Misc I/O */ 5682 mutex_lock(&power_domains->lock); 5683 5684 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5685 intel_power_well_enable(dev_priv, well); 5686 5687 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 5688 intel_power_well_enable(dev_priv, well); 5689 5690 mutex_unlock(&power_domains->lock); 5691 5692 intel_cdclk_init_hw(dev_priv); 5693 5694 gen9_dbuf_enable(dev_priv); 5695 5696 if (resume && intel_dmc_has_payload(dev_priv)) 5697 intel_dmc_load_program(dev_priv); 5698 } 5699 5700 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 5701 { 5702 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5703 struct i915_power_well *well; 5704 5705 if (!HAS_DISPLAY(dev_priv)) 5706 return; 5707 5708 gen9_disable_dc_states(dev_priv); 5709 5710 gen9_dbuf_disable(dev_priv); 5711 5712 intel_cdclk_uninit_hw(dev_priv); 5713 5714 /* The spec doesn't call for removing the reset handshake flag */ 5715 /* disable PG1 and Misc I/O */ 5716 5717 mutex_lock(&power_domains->lock); 5718 5719 /* 5720 * BSpec says to keep the MISC IO power well enabled here, only 5721 * remove our request for power well 1. 5722 * Note that even though the driver's request is removed power well 1 5723 * may stay enabled after this due to DMC's own request on it. 5724 */ 5725 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5726 intel_power_well_disable(dev_priv, well); 5727 5728 mutex_unlock(&power_domains->lock); 5729 5730 usleep_range(10, 30); /* 10 us delay per Bspec */ 5731 } 5732 5733 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 5734 { 5735 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5736 struct i915_power_well *well; 5737 5738 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5739 5740 /* 5741 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 5742 * or else the reset will hang because there is no PCH to respond. 5743 * Move the handshake programming to initialization sequence. 5744 * Previously was left up to BIOS. 5745 */ 5746 intel_pch_reset_handshake(dev_priv, false); 5747 5748 if (!HAS_DISPLAY(dev_priv)) 5749 return; 5750 5751 /* Enable PG1 */ 5752 mutex_lock(&power_domains->lock); 5753 5754 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5755 intel_power_well_enable(dev_priv, well); 5756 5757 mutex_unlock(&power_domains->lock); 5758 5759 intel_cdclk_init_hw(dev_priv); 5760 5761 gen9_dbuf_enable(dev_priv); 5762 5763 if (resume && intel_dmc_has_payload(dev_priv)) 5764 intel_dmc_load_program(dev_priv); 5765 } 5766 5767 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 5768 { 5769 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5770 struct i915_power_well *well; 5771 5772 if (!HAS_DISPLAY(dev_priv)) 5773 return; 5774 5775 gen9_disable_dc_states(dev_priv); 5776 5777 gen9_dbuf_disable(dev_priv); 5778 5779 intel_cdclk_uninit_hw(dev_priv); 5780 5781 /* The spec doesn't call for removing the reset handshake flag */ 5782 5783 /* 5784 * Disable PW1 (PG1). 5785 * Note that even though the driver's request is removed power well 1 5786 * may stay enabled after this due to DMC's own request on it. 5787 */ 5788 mutex_lock(&power_domains->lock); 5789 5790 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5791 intel_power_well_disable(dev_priv, well); 5792 5793 mutex_unlock(&power_domains->lock); 5794 5795 usleep_range(10, 30); /* 10 us delay per Bspec */ 5796 } 5797 5798 struct buddy_page_mask { 5799 u32 page_mask; 5800 u8 type; 5801 u8 num_channels; 5802 }; 5803 5804 static const struct buddy_page_mask tgl_buddy_page_masks[] = { 5805 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, 5806 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF }, 5807 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, 5808 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C }, 5809 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, 5810 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E }, 5811 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, 5812 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 }, 5813 {} 5814 }; 5815 5816 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { 5817 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, 5818 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, 5819 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 }, 5820 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 }, 5821 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, 5822 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, 5823 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 }, 5824 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 }, 5825 {} 5826 }; 5827 5828 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) 5829 { 5830 enum intel_dram_type type = dev_priv->dram_info.type; 5831 u8 num_channels = dev_priv->dram_info.num_channels; 5832 const struct buddy_page_mask *table; 5833 unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask; 5834 int config, i; 5835 5836 /* BW_BUDDY registers are not used on dgpu's beyond DG1 */ 5837 if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv)) 5838 return; 5839 5840 if (IS_ALDERLAKE_S(dev_priv) || 5841 IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 5842 IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 5843 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) 5844 /* Wa_1409767108:tgl,dg1,adl-s */ 5845 table = wa_1409767108_buddy_page_masks; 5846 else 5847 table = tgl_buddy_page_masks; 5848 5849 for (config = 0; table[config].page_mask != 0; config++) 5850 if (table[config].num_channels == num_channels && 5851 table[config].type == type) 5852 break; 5853 5854 if (table[config].page_mask == 0) { 5855 drm_dbg(&dev_priv->drm, 5856 "Unknown memory configuration; disabling address buddy logic.\n"); 5857 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) 5858 intel_de_write(dev_priv, BW_BUDDY_CTL(i), 5859 BW_BUDDY_DISABLE); 5860 } else { 5861 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { 5862 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i), 5863 table[config].page_mask); 5864 5865 /* Wa_22010178259:tgl,dg1,rkl,adl-s */ 5866 if (DISPLAY_VER(dev_priv) == 12) 5867 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i), 5868 BW_BUDDY_TLB_REQ_TIMER_MASK, 5869 BW_BUDDY_TLB_REQ_TIMER(0x8)); 5870 } 5871 } 5872 } 5873 5874 static void icl_display_core_init(struct drm_i915_private *dev_priv, 5875 bool resume) 5876 { 5877 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5878 struct i915_power_well *well; 5879 u32 val; 5880 5881 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5882 5883 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ 5884 if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP && 5885 INTEL_PCH_TYPE(dev_priv) < PCH_DG1) 5886 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0, 5887 PCH_DPMGUNIT_CLOCK_GATE_DISABLE); 5888 5889 /* 1. Enable PCH reset handshake. */ 5890 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5891 5892 if (!HAS_DISPLAY(dev_priv)) 5893 return; 5894 5895 /* 2. Initialize all combo phys */ 5896 intel_combo_phy_init(dev_priv); 5897 5898 /* 5899 * 3. Enable Power Well 1 (PG1). 5900 * The AUX IO power wells will be enabled on demand. 5901 */ 5902 mutex_lock(&power_domains->lock); 5903 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5904 intel_power_well_enable(dev_priv, well); 5905 mutex_unlock(&power_domains->lock); 5906 5907 /* 4. Enable CDCLK. */ 5908 intel_cdclk_init_hw(dev_priv); 5909 5910 if (DISPLAY_VER(dev_priv) >= 12) 5911 gen12_dbuf_slices_config(dev_priv); 5912 5913 /* 5. Enable DBUF. */ 5914 gen9_dbuf_enable(dev_priv); 5915 5916 /* 6. Setup MBUS. */ 5917 icl_mbus_init(dev_priv); 5918 5919 /* 7. Program arbiter BW_BUDDY registers */ 5920 if (DISPLAY_VER(dev_priv) >= 12) 5921 tgl_bw_buddy_init(dev_priv); 5922 5923 /* 8. Ensure PHYs have completed calibration and adaptation */ 5924 if (IS_DG2(dev_priv)) 5925 intel_snps_phy_wait_for_calibration(dev_priv); 5926 5927 if (resume && intel_dmc_has_payload(dev_priv)) 5928 intel_dmc_load_program(dev_priv); 5929 5930 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */ 5931 if (DISPLAY_VER(dev_priv) >= 12) { 5932 val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | 5933 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR; 5934 intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val); 5935 } 5936 5937 /* Wa_14011503030:xelpd */ 5938 if (DISPLAY_VER(dev_priv) >= 13) 5939 intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0); 5940 } 5941 5942 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 5943 { 5944 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5945 struct i915_power_well *well; 5946 5947 if (!HAS_DISPLAY(dev_priv)) 5948 return; 5949 5950 gen9_disable_dc_states(dev_priv); 5951 5952 /* 1. Disable all display engine functions -> aready done */ 5953 5954 /* 2. Disable DBUF */ 5955 gen9_dbuf_disable(dev_priv); 5956 5957 /* 3. Disable CD clock */ 5958 intel_cdclk_uninit_hw(dev_priv); 5959 5960 /* 5961 * 4. Disable Power Well 1 (PG1). 5962 * The AUX IO power wells are toggled on demand, so they are already 5963 * disabled at this point. 5964 */ 5965 mutex_lock(&power_domains->lock); 5966 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5967 intel_power_well_disable(dev_priv, well); 5968 mutex_unlock(&power_domains->lock); 5969 5970 /* 5. */ 5971 intel_combo_phy_uninit(dev_priv); 5972 } 5973 5974 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 5975 { 5976 struct i915_power_well *cmn_bc = 5977 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5978 struct i915_power_well *cmn_d = 5979 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 5980 5981 /* 5982 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 5983 * workaround never ever read DISPLAY_PHY_CONTROL, and 5984 * instead maintain a shadow copy ourselves. Use the actual 5985 * power well state and lane status to reconstruct the 5986 * expected initial value. 5987 */ 5988 dev_priv->chv_phy_control = 5989 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 5990 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 5991 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 5992 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 5993 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 5994 5995 /* 5996 * If all lanes are disabled we leave the override disabled 5997 * with all power down bits cleared to match the state we 5998 * would use after disabling the port. Otherwise enable the 5999 * override and set the lane powerdown bits accding to the 6000 * current lane status. 6001 */ 6002 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 6003 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)); 6004 unsigned int mask; 6005 6006 mask = status & DPLL_PORTB_READY_MASK; 6007 if (mask == 0xf) 6008 mask = 0x0; 6009 else 6010 dev_priv->chv_phy_control |= 6011 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 6012 6013 dev_priv->chv_phy_control |= 6014 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 6015 6016 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 6017 if (mask == 0xf) 6018 mask = 0x0; 6019 else 6020 dev_priv->chv_phy_control |= 6021 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 6022 6023 dev_priv->chv_phy_control |= 6024 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 6025 6026 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 6027 6028 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 6029 } else { 6030 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 6031 } 6032 6033 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 6034 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS); 6035 unsigned int mask; 6036 6037 mask = status & DPLL_PORTD_READY_MASK; 6038 6039 if (mask == 0xf) 6040 mask = 0x0; 6041 else 6042 dev_priv->chv_phy_control |= 6043 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 6044 6045 dev_priv->chv_phy_control |= 6046 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 6047 6048 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 6049 6050 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 6051 } else { 6052 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 6053 } 6054 6055 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", 6056 dev_priv->chv_phy_control); 6057 6058 /* Defer application of initial phy_control to enabling the powerwell */ 6059 } 6060 6061 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 6062 { 6063 struct i915_power_well *cmn = 6064 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 6065 struct i915_power_well *disp2d = 6066 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 6067 6068 /* If the display might be already active skip this */ 6069 if (cmn->desc->ops->is_enabled(dev_priv, cmn) && 6070 disp2d->desc->ops->is_enabled(dev_priv, disp2d) && 6071 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST) 6072 return; 6073 6074 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); 6075 6076 /* cmnlane needs DPLL registers */ 6077 disp2d->desc->ops->enable(dev_priv, disp2d); 6078 6079 /* 6080 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 6081 * Need to assert and de-assert PHY SB reset by gating the 6082 * common lane power, then un-gating it. 6083 * Simply ungating isn't enough to reset the PHY enough to get 6084 * ports and lanes running. 6085 */ 6086 cmn->desc->ops->disable(dev_priv, cmn); 6087 } 6088 6089 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 6090 { 6091 bool ret; 6092 6093 vlv_punit_get(dev_priv); 6094 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 6095 vlv_punit_put(dev_priv); 6096 6097 return ret; 6098 } 6099 6100 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 6101 { 6102 drm_WARN(&dev_priv->drm, 6103 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 6104 "VED not power gated\n"); 6105 } 6106 6107 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 6108 { 6109 static const struct pci_device_id isp_ids[] = { 6110 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 6111 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 6112 {} 6113 }; 6114 6115 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && 6116 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 6117 "ISP not power gated\n"); 6118 } 6119 6120 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 6121 6122 /** 6123 * intel_power_domains_init_hw - initialize hardware power domain state 6124 * @i915: i915 device instance 6125 * @resume: Called from resume code paths or not 6126 * 6127 * This function initializes the hardware power domain state and enables all 6128 * power wells belonging to the INIT power domain. Power wells in other 6129 * domains (and not in the INIT domain) are referenced or disabled by 6130 * intel_modeset_readout_hw_state(). After that the reference count of each 6131 * power well must match its HW enabled state, see 6132 * intel_power_domains_verify_state(). 6133 * 6134 * It will return with power domains disabled (to be enabled later by 6135 * intel_power_domains_enable()) and must be paired with 6136 * intel_power_domains_driver_remove(). 6137 */ 6138 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 6139 { 6140 struct i915_power_domains *power_domains = &i915->power_domains; 6141 6142 power_domains->initializing = true; 6143 6144 if (DISPLAY_VER(i915) >= 11) { 6145 icl_display_core_init(i915, resume); 6146 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6147 bxt_display_core_init(i915, resume); 6148 } else if (DISPLAY_VER(i915) == 9) { 6149 skl_display_core_init(i915, resume); 6150 } else if (IS_CHERRYVIEW(i915)) { 6151 mutex_lock(&power_domains->lock); 6152 chv_phy_control_init(i915); 6153 mutex_unlock(&power_domains->lock); 6154 assert_isp_power_gated(i915); 6155 } else if (IS_VALLEYVIEW(i915)) { 6156 mutex_lock(&power_domains->lock); 6157 vlv_cmnlane_wa(i915); 6158 mutex_unlock(&power_domains->lock); 6159 assert_ved_power_gated(i915); 6160 assert_isp_power_gated(i915); 6161 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 6162 hsw_assert_cdclk(i915); 6163 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 6164 } else if (IS_IVYBRIDGE(i915)) { 6165 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 6166 } 6167 6168 /* 6169 * Keep all power wells enabled for any dependent HW access during 6170 * initialization and to make sure we keep BIOS enabled display HW 6171 * resources powered until display HW readout is complete. We drop 6172 * this reference in intel_power_domains_enable(). 6173 */ 6174 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6175 power_domains->init_wakeref = 6176 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6177 6178 /* Disable power support if the user asked so. */ 6179 if (!i915->params.disable_power_well) { 6180 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); 6181 i915->power_domains.disable_wakeref = intel_display_power_get(i915, 6182 POWER_DOMAIN_INIT); 6183 } 6184 intel_power_domains_sync_hw(i915); 6185 6186 power_domains->initializing = false; 6187 } 6188 6189 /** 6190 * intel_power_domains_driver_remove - deinitialize hw power domain state 6191 * @i915: i915 device instance 6192 * 6193 * De-initializes the display power domain HW state. It also ensures that the 6194 * device stays powered up so that the driver can be reloaded. 6195 * 6196 * It must be called with power domains already disabled (after a call to 6197 * intel_power_domains_disable()) and must be paired with 6198 * intel_power_domains_init_hw(). 6199 */ 6200 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 6201 { 6202 intel_wakeref_t wakeref __maybe_unused = 6203 fetch_and_zero(&i915->power_domains.init_wakeref); 6204 6205 /* Remove the refcount we took to keep power well support disabled. */ 6206 if (!i915->params.disable_power_well) 6207 intel_display_power_put(i915, POWER_DOMAIN_INIT, 6208 fetch_and_zero(&i915->power_domains.disable_wakeref)); 6209 6210 intel_display_power_flush_work_sync(i915); 6211 6212 intel_power_domains_verify_state(i915); 6213 6214 /* Keep the power well enabled, but cancel its rpm wakeref. */ 6215 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 6216 } 6217 6218 /** 6219 * intel_power_domains_enable - enable toggling of display power wells 6220 * @i915: i915 device instance 6221 * 6222 * Enable the ondemand enabling/disabling of the display power wells. Note that 6223 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 6224 * only at specific points of the display modeset sequence, thus they are not 6225 * affected by the intel_power_domains_enable()/disable() calls. The purpose 6226 * of these function is to keep the rest of power wells enabled until the end 6227 * of display HW readout (which will acquire the power references reflecting 6228 * the current HW state). 6229 */ 6230 void intel_power_domains_enable(struct drm_i915_private *i915) 6231 { 6232 intel_wakeref_t wakeref __maybe_unused = 6233 fetch_and_zero(&i915->power_domains.init_wakeref); 6234 6235 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 6236 intel_power_domains_verify_state(i915); 6237 } 6238 6239 /** 6240 * intel_power_domains_disable - disable toggling of display power wells 6241 * @i915: i915 device instance 6242 * 6243 * Disable the ondemand enabling/disabling of the display power wells. See 6244 * intel_power_domains_enable() for which power wells this call controls. 6245 */ 6246 void intel_power_domains_disable(struct drm_i915_private *i915) 6247 { 6248 struct i915_power_domains *power_domains = &i915->power_domains; 6249 6250 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6251 power_domains->init_wakeref = 6252 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6253 6254 intel_power_domains_verify_state(i915); 6255 } 6256 6257 /** 6258 * intel_power_domains_suspend - suspend power domain state 6259 * @i915: i915 device instance 6260 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 6261 * 6262 * This function prepares the hardware power domain state before entering 6263 * system suspend. 6264 * 6265 * It must be called with power domains already disabled (after a call to 6266 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 6267 */ 6268 void intel_power_domains_suspend(struct drm_i915_private *i915, 6269 enum i915_drm_suspend_mode suspend_mode) 6270 { 6271 struct i915_power_domains *power_domains = &i915->power_domains; 6272 intel_wakeref_t wakeref __maybe_unused = 6273 fetch_and_zero(&power_domains->init_wakeref); 6274 6275 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 6276 6277 /* 6278 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 6279 * support don't manually deinit the power domains. This also means the 6280 * DMC firmware will stay active, it will power down any HW 6281 * resources as required and also enable deeper system power states 6282 * that would be blocked if the firmware was inactive. 6283 */ 6284 if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) && 6285 suspend_mode == I915_DRM_SUSPEND_IDLE && 6286 intel_dmc_has_payload(i915)) { 6287 intel_display_power_flush_work(i915); 6288 intel_power_domains_verify_state(i915); 6289 return; 6290 } 6291 6292 /* 6293 * Even if power well support was disabled we still want to disable 6294 * power wells if power domains must be deinitialized for suspend. 6295 */ 6296 if (!i915->params.disable_power_well) 6297 intel_display_power_put(i915, POWER_DOMAIN_INIT, 6298 fetch_and_zero(&i915->power_domains.disable_wakeref)); 6299 6300 intel_display_power_flush_work(i915); 6301 intel_power_domains_verify_state(i915); 6302 6303 if (DISPLAY_VER(i915) >= 11) 6304 icl_display_core_uninit(i915); 6305 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) 6306 bxt_display_core_uninit(i915); 6307 else if (DISPLAY_VER(i915) == 9) 6308 skl_display_core_uninit(i915); 6309 6310 power_domains->display_core_suspended = true; 6311 } 6312 6313 /** 6314 * intel_power_domains_resume - resume power domain state 6315 * @i915: i915 device instance 6316 * 6317 * This function resume the hardware power domain state during system resume. 6318 * 6319 * It will return with power domain support disabled (to be enabled later by 6320 * intel_power_domains_enable()) and must be paired with 6321 * intel_power_domains_suspend(). 6322 */ 6323 void intel_power_domains_resume(struct drm_i915_private *i915) 6324 { 6325 struct i915_power_domains *power_domains = &i915->power_domains; 6326 6327 if (power_domains->display_core_suspended) { 6328 intel_power_domains_init_hw(i915, true); 6329 power_domains->display_core_suspended = false; 6330 } else { 6331 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6332 power_domains->init_wakeref = 6333 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6334 } 6335 6336 intel_power_domains_verify_state(i915); 6337 } 6338 6339 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 6340 6341 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 6342 { 6343 struct i915_power_domains *power_domains = &i915->power_domains; 6344 struct i915_power_well *power_well; 6345 6346 for_each_power_well(i915, power_well) { 6347 enum intel_display_power_domain domain; 6348 6349 drm_dbg(&i915->drm, "%-25s %d\n", 6350 power_well->desc->name, power_well->count); 6351 6352 for_each_power_domain(domain, power_well->desc->domains) 6353 drm_dbg(&i915->drm, " %-23s %d\n", 6354 intel_display_power_domain_str(domain), 6355 power_domains->domain_use_count[domain]); 6356 } 6357 } 6358 6359 /** 6360 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 6361 * @i915: i915 device instance 6362 * 6363 * Verify if the reference count of each power well matches its HW enabled 6364 * state and the total refcount of the domains it belongs to. This must be 6365 * called after modeset HW state sanitization, which is responsible for 6366 * acquiring reference counts for any power wells in use and disabling the 6367 * ones left on by BIOS but not required by any active output. 6368 */ 6369 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 6370 { 6371 struct i915_power_domains *power_domains = &i915->power_domains; 6372 struct i915_power_well *power_well; 6373 bool dump_domain_info; 6374 6375 mutex_lock(&power_domains->lock); 6376 6377 verify_async_put_domains_state(power_domains); 6378 6379 dump_domain_info = false; 6380 for_each_power_well(i915, power_well) { 6381 enum intel_display_power_domain domain; 6382 int domains_count; 6383 bool enabled; 6384 6385 enabled = power_well->desc->ops->is_enabled(i915, power_well); 6386 if ((power_well->count || power_well->desc->always_on) != 6387 enabled) 6388 drm_err(&i915->drm, 6389 "power well %s state mismatch (refcount %d/enabled %d)", 6390 power_well->desc->name, 6391 power_well->count, enabled); 6392 6393 domains_count = 0; 6394 for_each_power_domain(domain, power_well->desc->domains) 6395 domains_count += power_domains->domain_use_count[domain]; 6396 6397 if (power_well->count != domains_count) { 6398 drm_err(&i915->drm, 6399 "power well %s refcount/domain refcount mismatch " 6400 "(refcount %d/domains refcount %d)\n", 6401 power_well->desc->name, power_well->count, 6402 domains_count); 6403 dump_domain_info = true; 6404 } 6405 } 6406 6407 if (dump_domain_info) { 6408 static bool dumped; 6409 6410 if (!dumped) { 6411 intel_power_domains_dump_info(i915); 6412 dumped = true; 6413 } 6414 } 6415 6416 mutex_unlock(&power_domains->lock); 6417 } 6418 6419 #else 6420 6421 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 6422 { 6423 } 6424 6425 #endif 6426 6427 void intel_display_power_suspend_late(struct drm_i915_private *i915) 6428 { 6429 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 6430 IS_BROXTON(i915)) { 6431 bxt_enable_dc9(i915); 6432 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6433 hsw_enable_pc8(i915); 6434 } 6435 6436 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 6437 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 6438 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); 6439 } 6440 6441 void intel_display_power_resume_early(struct drm_i915_private *i915) 6442 { 6443 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 6444 IS_BROXTON(i915)) { 6445 gen9_sanitize_dc_state(i915); 6446 bxt_disable_dc9(i915); 6447 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6448 hsw_disable_pc8(i915); 6449 } 6450 6451 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 6452 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 6453 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); 6454 } 6455 6456 void intel_display_power_suspend(struct drm_i915_private *i915) 6457 { 6458 if (DISPLAY_VER(i915) >= 11) { 6459 icl_display_core_uninit(i915); 6460 bxt_enable_dc9(i915); 6461 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6462 bxt_display_core_uninit(i915); 6463 bxt_enable_dc9(i915); 6464 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6465 hsw_enable_pc8(i915); 6466 } 6467 } 6468 6469 void intel_display_power_resume(struct drm_i915_private *i915) 6470 { 6471 if (DISPLAY_VER(i915) >= 11) { 6472 bxt_disable_dc9(i915); 6473 icl_display_core_init(i915, true); 6474 if (intel_dmc_has_payload(i915)) { 6475 if (i915->dmc.allowed_dc_mask & 6476 DC_STATE_EN_UPTO_DC6) 6477 skl_enable_dc6(i915); 6478 else if (i915->dmc.allowed_dc_mask & 6479 DC_STATE_EN_UPTO_DC5) 6480 gen9_enable_dc5(i915); 6481 } 6482 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6483 bxt_disable_dc9(i915); 6484 bxt_display_core_init(i915, true); 6485 if (intel_dmc_has_payload(i915) && 6486 (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 6487 gen9_enable_dc5(i915); 6488 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6489 hsw_disable_pc8(i915); 6490 } 6491 } 6492 6493 void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m) 6494 { 6495 struct i915_power_domains *power_domains = &i915->power_domains; 6496 int i; 6497 6498 mutex_lock(&power_domains->lock); 6499 6500 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 6501 for (i = 0; i < power_domains->power_well_count; i++) { 6502 struct i915_power_well *power_well; 6503 enum intel_display_power_domain power_domain; 6504 6505 power_well = &power_domains->power_wells[i]; 6506 seq_printf(m, "%-25s %d\n", power_well->desc->name, 6507 power_well->count); 6508 6509 for_each_power_domain(power_domain, power_well->desc->domains) 6510 seq_printf(m, " %-23s %d\n", 6511 intel_display_power_domain_str(power_domain), 6512 power_domains->domain_use_count[power_domain]); 6513 } 6514 6515 mutex_unlock(&power_domains->lock); 6516 } 6517