1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "display/intel_crt.h" 7 8 #include "i915_drv.h" 9 #include "i915_irq.h" 10 #include "intel_cdclk.h" 11 #include "intel_combo_phy.h" 12 #include "intel_display_power.h" 13 #include "intel_de.h" 14 #include "intel_display_types.h" 15 #include "intel_dmc.h" 16 #include "intel_dpio_phy.h" 17 #include "intel_hotplug.h" 18 #include "intel_pm.h" 19 #include "intel_pps.h" 20 #include "intel_sideband.h" 21 #include "intel_tc.h" 22 #include "intel_vga.h" 23 24 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 25 enum i915_power_well_id power_well_id); 26 27 const char * 28 intel_display_power_domain_str(enum intel_display_power_domain domain) 29 { 30 switch (domain) { 31 case POWER_DOMAIN_DISPLAY_CORE: 32 return "DISPLAY_CORE"; 33 case POWER_DOMAIN_PIPE_A: 34 return "PIPE_A"; 35 case POWER_DOMAIN_PIPE_B: 36 return "PIPE_B"; 37 case POWER_DOMAIN_PIPE_C: 38 return "PIPE_C"; 39 case POWER_DOMAIN_PIPE_D: 40 return "PIPE_D"; 41 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 42 return "PIPE_A_PANEL_FITTER"; 43 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 44 return "PIPE_B_PANEL_FITTER"; 45 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 46 return "PIPE_C_PANEL_FITTER"; 47 case POWER_DOMAIN_PIPE_D_PANEL_FITTER: 48 return "PIPE_D_PANEL_FITTER"; 49 case POWER_DOMAIN_TRANSCODER_A: 50 return "TRANSCODER_A"; 51 case POWER_DOMAIN_TRANSCODER_B: 52 return "TRANSCODER_B"; 53 case POWER_DOMAIN_TRANSCODER_C: 54 return "TRANSCODER_C"; 55 case POWER_DOMAIN_TRANSCODER_D: 56 return "TRANSCODER_D"; 57 case POWER_DOMAIN_TRANSCODER_EDP: 58 return "TRANSCODER_EDP"; 59 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 60 return "TRANSCODER_VDSC_PW2"; 61 case POWER_DOMAIN_TRANSCODER_DSI_A: 62 return "TRANSCODER_DSI_A"; 63 case POWER_DOMAIN_TRANSCODER_DSI_C: 64 return "TRANSCODER_DSI_C"; 65 case POWER_DOMAIN_PORT_DDI_A_LANES: 66 return "PORT_DDI_A_LANES"; 67 case POWER_DOMAIN_PORT_DDI_B_LANES: 68 return "PORT_DDI_B_LANES"; 69 case POWER_DOMAIN_PORT_DDI_C_LANES: 70 return "PORT_DDI_C_LANES"; 71 case POWER_DOMAIN_PORT_DDI_D_LANES: 72 return "PORT_DDI_D_LANES"; 73 case POWER_DOMAIN_PORT_DDI_E_LANES: 74 return "PORT_DDI_E_LANES"; 75 case POWER_DOMAIN_PORT_DDI_F_LANES: 76 return "PORT_DDI_F_LANES"; 77 case POWER_DOMAIN_PORT_DDI_G_LANES: 78 return "PORT_DDI_G_LANES"; 79 case POWER_DOMAIN_PORT_DDI_H_LANES: 80 return "PORT_DDI_H_LANES"; 81 case POWER_DOMAIN_PORT_DDI_I_LANES: 82 return "PORT_DDI_I_LANES"; 83 case POWER_DOMAIN_PORT_DDI_A_IO: 84 return "PORT_DDI_A_IO"; 85 case POWER_DOMAIN_PORT_DDI_B_IO: 86 return "PORT_DDI_B_IO"; 87 case POWER_DOMAIN_PORT_DDI_C_IO: 88 return "PORT_DDI_C_IO"; 89 case POWER_DOMAIN_PORT_DDI_D_IO: 90 return "PORT_DDI_D_IO"; 91 case POWER_DOMAIN_PORT_DDI_E_IO: 92 return "PORT_DDI_E_IO"; 93 case POWER_DOMAIN_PORT_DDI_F_IO: 94 return "PORT_DDI_F_IO"; 95 case POWER_DOMAIN_PORT_DDI_G_IO: 96 return "PORT_DDI_G_IO"; 97 case POWER_DOMAIN_PORT_DDI_H_IO: 98 return "PORT_DDI_H_IO"; 99 case POWER_DOMAIN_PORT_DDI_I_IO: 100 return "PORT_DDI_I_IO"; 101 case POWER_DOMAIN_PORT_DSI: 102 return "PORT_DSI"; 103 case POWER_DOMAIN_PORT_CRT: 104 return "PORT_CRT"; 105 case POWER_DOMAIN_PORT_OTHER: 106 return "PORT_OTHER"; 107 case POWER_DOMAIN_VGA: 108 return "VGA"; 109 case POWER_DOMAIN_AUDIO: 110 return "AUDIO"; 111 case POWER_DOMAIN_AUX_A: 112 return "AUX_A"; 113 case POWER_DOMAIN_AUX_B: 114 return "AUX_B"; 115 case POWER_DOMAIN_AUX_C: 116 return "AUX_C"; 117 case POWER_DOMAIN_AUX_D: 118 return "AUX_D"; 119 case POWER_DOMAIN_AUX_E: 120 return "AUX_E"; 121 case POWER_DOMAIN_AUX_F: 122 return "AUX_F"; 123 case POWER_DOMAIN_AUX_G: 124 return "AUX_G"; 125 case POWER_DOMAIN_AUX_H: 126 return "AUX_H"; 127 case POWER_DOMAIN_AUX_I: 128 return "AUX_I"; 129 case POWER_DOMAIN_AUX_IO_A: 130 return "AUX_IO_A"; 131 case POWER_DOMAIN_AUX_C_TBT: 132 return "AUX_C_TBT"; 133 case POWER_DOMAIN_AUX_D_TBT: 134 return "AUX_D_TBT"; 135 case POWER_DOMAIN_AUX_E_TBT: 136 return "AUX_E_TBT"; 137 case POWER_DOMAIN_AUX_F_TBT: 138 return "AUX_F_TBT"; 139 case POWER_DOMAIN_AUX_G_TBT: 140 return "AUX_G_TBT"; 141 case POWER_DOMAIN_AUX_H_TBT: 142 return "AUX_H_TBT"; 143 case POWER_DOMAIN_AUX_I_TBT: 144 return "AUX_I_TBT"; 145 case POWER_DOMAIN_GMBUS: 146 return "GMBUS"; 147 case POWER_DOMAIN_INIT: 148 return "INIT"; 149 case POWER_DOMAIN_MODESET: 150 return "MODESET"; 151 case POWER_DOMAIN_GT_IRQ: 152 return "GT_IRQ"; 153 case POWER_DOMAIN_DPLL_DC_OFF: 154 return "DPLL_DC_OFF"; 155 case POWER_DOMAIN_TC_COLD_OFF: 156 return "TC_COLD_OFF"; 157 default: 158 MISSING_CASE(domain); 159 return "?"; 160 } 161 } 162 163 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 164 struct i915_power_well *power_well) 165 { 166 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name); 167 power_well->desc->ops->enable(dev_priv, power_well); 168 power_well->hw_enabled = true; 169 } 170 171 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 172 struct i915_power_well *power_well) 173 { 174 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name); 175 power_well->hw_enabled = false; 176 power_well->desc->ops->disable(dev_priv, power_well); 177 } 178 179 static void intel_power_well_get(struct drm_i915_private *dev_priv, 180 struct i915_power_well *power_well) 181 { 182 if (!power_well->count++) 183 intel_power_well_enable(dev_priv, power_well); 184 } 185 186 static void intel_power_well_put(struct drm_i915_private *dev_priv, 187 struct i915_power_well *power_well) 188 { 189 drm_WARN(&dev_priv->drm, !power_well->count, 190 "Use count on power well %s is already zero", 191 power_well->desc->name); 192 193 if (!--power_well->count) 194 intel_power_well_disable(dev_priv, power_well); 195 } 196 197 /** 198 * __intel_display_power_is_enabled - unlocked check for a power domain 199 * @dev_priv: i915 device instance 200 * @domain: power domain to check 201 * 202 * This is the unlocked version of intel_display_power_is_enabled() and should 203 * only be used from error capture and recovery code where deadlocks are 204 * possible. 205 * 206 * Returns: 207 * True when the power domain is enabled, false otherwise. 208 */ 209 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 210 enum intel_display_power_domain domain) 211 { 212 struct i915_power_well *power_well; 213 bool is_enabled; 214 215 if (dev_priv->runtime_pm.suspended) 216 return false; 217 218 is_enabled = true; 219 220 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { 221 if (power_well->desc->always_on) 222 continue; 223 224 if (!power_well->hw_enabled) { 225 is_enabled = false; 226 break; 227 } 228 } 229 230 return is_enabled; 231 } 232 233 /** 234 * intel_display_power_is_enabled - check for a power domain 235 * @dev_priv: i915 device instance 236 * @domain: power domain to check 237 * 238 * This function can be used to check the hw power domain state. It is mostly 239 * used in hardware state readout functions. Everywhere else code should rely 240 * upon explicit power domain reference counting to ensure that the hardware 241 * block is powered up before accessing it. 242 * 243 * Callers must hold the relevant modesetting locks to ensure that concurrent 244 * threads can't disable the power well while the caller tries to read a few 245 * registers. 246 * 247 * Returns: 248 * True when the power domain is enabled, false otherwise. 249 */ 250 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 251 enum intel_display_power_domain domain) 252 { 253 struct i915_power_domains *power_domains; 254 bool ret; 255 256 power_domains = &dev_priv->power_domains; 257 258 mutex_lock(&power_domains->lock); 259 ret = __intel_display_power_is_enabled(dev_priv, domain); 260 mutex_unlock(&power_domains->lock); 261 262 return ret; 263 } 264 265 /* 266 * Starting with Haswell, we have a "Power Down Well" that can be turned off 267 * when not needed anymore. We have 4 registers that can request the power well 268 * to be enabled, and it will only be disabled if none of the registers is 269 * requesting it to be enabled. 270 */ 271 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, 272 u8 irq_pipe_mask, bool has_vga) 273 { 274 if (has_vga) 275 intel_vga_reset_io_mem(dev_priv); 276 277 if (irq_pipe_mask) 278 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); 279 } 280 281 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, 282 u8 irq_pipe_mask) 283 { 284 if (irq_pipe_mask) 285 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); 286 } 287 288 #define ICL_AUX_PW_TO_CH(pw_idx) \ 289 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 290 291 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 292 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 293 294 static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) 295 { 296 int pw_idx = power_well->desc->hsw.idx; 297 298 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 299 ICL_AUX_PW_TO_CH(pw_idx); 300 } 301 302 static struct intel_digital_port * 303 aux_ch_to_digital_port(struct drm_i915_private *dev_priv, 304 enum aux_ch aux_ch) 305 { 306 struct intel_digital_port *dig_port = NULL; 307 struct intel_encoder *encoder; 308 309 for_each_intel_encoder(&dev_priv->drm, encoder) { 310 /* We'll check the MST primary port */ 311 if (encoder->type == INTEL_OUTPUT_DP_MST) 312 continue; 313 314 dig_port = enc_to_dig_port(encoder); 315 if (!dig_port) 316 continue; 317 318 if (dig_port->aux_ch != aux_ch) { 319 dig_port = NULL; 320 continue; 321 } 322 323 break; 324 } 325 326 return dig_port; 327 } 328 329 static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915, 330 const struct i915_power_well *power_well) 331 { 332 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 333 struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); 334 335 return intel_port_to_phy(i915, dig_port->base.port); 336 } 337 338 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, 339 struct i915_power_well *power_well, 340 bool timeout_expected) 341 { 342 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 343 int pw_idx = power_well->desc->hsw.idx; 344 345 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 346 if (intel_de_wait_for_set(dev_priv, regs->driver, 347 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { 348 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", 349 power_well->desc->name); 350 351 drm_WARN_ON(&dev_priv->drm, !timeout_expected); 352 353 } 354 } 355 356 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, 357 const struct i915_power_well_regs *regs, 358 int pw_idx) 359 { 360 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 361 u32 ret; 362 363 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; 364 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; 365 if (regs->kvmr.reg) 366 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; 367 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; 368 369 return ret; 370 } 371 372 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, 373 struct i915_power_well *power_well) 374 { 375 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 376 int pw_idx = power_well->desc->hsw.idx; 377 bool disabled; 378 u32 reqs; 379 380 /* 381 * Bspec doesn't require waiting for PWs to get disabled, but still do 382 * this for paranoia. The known cases where a PW will be forced on: 383 * - a KVMR request on any power well via the KVMR request register 384 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 385 * DEBUG request registers 386 * Skip the wait in case any of the request bits are set and print a 387 * diagnostic message. 388 */ 389 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & 390 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 391 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); 392 if (disabled) 393 return; 394 395 drm_dbg_kms(&dev_priv->drm, 396 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 397 power_well->desc->name, 398 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 399 } 400 401 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, 402 enum skl_power_gate pg) 403 { 404 /* Timeout 5us for PG#0, for other PGs 1us */ 405 drm_WARN_ON(&dev_priv->drm, 406 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, 407 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 408 } 409 410 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 411 struct i915_power_well *power_well) 412 { 413 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 414 int pw_idx = power_well->desc->hsw.idx; 415 u32 val; 416 417 if (power_well->desc->hsw.has_fuses) { 418 enum skl_power_gate pg; 419 420 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 421 SKL_PW_CTL_IDX_TO_PG(pw_idx); 422 /* 423 * For PW1 we have to wait both for the PW0/PG0 fuse state 424 * before enabling the power well and PW1/PG1's own fuse 425 * state after the enabling. For all other power wells with 426 * fuses we only have to wait for that PW/PG's fuse state 427 * after the enabling. 428 */ 429 if (pg == SKL_PG1) 430 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 431 } 432 433 val = intel_de_read(dev_priv, regs->driver); 434 intel_de_write(dev_priv, regs->driver, 435 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 436 437 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 438 439 /* Display WA #1178: cnl */ 440 if (IS_CANNONLAKE(dev_priv) && 441 pw_idx >= GLK_PW_CTL_IDX_AUX_B && 442 pw_idx <= CNL_PW_CTL_IDX_AUX_F) { 443 u32 val; 444 445 val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx)); 446 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS; 447 intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val); 448 } 449 450 if (power_well->desc->hsw.has_fuses) { 451 enum skl_power_gate pg; 452 453 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 454 SKL_PW_CTL_IDX_TO_PG(pw_idx); 455 gen9_wait_for_power_well_fuses(dev_priv, pg); 456 } 457 458 hsw_power_well_post_enable(dev_priv, 459 power_well->desc->hsw.irq_pipe_mask, 460 power_well->desc->hsw.has_vga); 461 } 462 463 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 464 struct i915_power_well *power_well) 465 { 466 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 467 int pw_idx = power_well->desc->hsw.idx; 468 u32 val; 469 470 hsw_power_well_pre_disable(dev_priv, 471 power_well->desc->hsw.irq_pipe_mask); 472 473 val = intel_de_read(dev_priv, regs->driver); 474 intel_de_write(dev_priv, regs->driver, 475 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 476 hsw_wait_for_power_well_disable(dev_priv, power_well); 477 } 478 479 static void 480 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 481 struct i915_power_well *power_well) 482 { 483 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 484 int pw_idx = power_well->desc->hsw.idx; 485 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 486 u32 val; 487 488 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 489 490 val = intel_de_read(dev_priv, regs->driver); 491 intel_de_write(dev_priv, regs->driver, 492 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 493 494 if (DISPLAY_VER(dev_priv) < 12) { 495 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 496 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 497 val | ICL_LANE_ENABLE_AUX); 498 } 499 500 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 501 502 /* Display WA #1178: icl */ 503 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && 504 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { 505 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); 506 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; 507 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); 508 } 509 } 510 511 static void 512 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 513 struct i915_power_well *power_well) 514 { 515 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 516 int pw_idx = power_well->desc->hsw.idx; 517 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 518 u32 val; 519 520 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 521 522 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 523 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 524 val & ~ICL_LANE_ENABLE_AUX); 525 526 val = intel_de_read(dev_priv, regs->driver); 527 intel_de_write(dev_priv, regs->driver, 528 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 529 530 hsw_wait_for_power_well_disable(dev_priv, power_well); 531 } 532 533 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 534 535 static u64 async_put_domains_mask(struct i915_power_domains *power_domains); 536 537 static int power_well_async_ref_count(struct drm_i915_private *dev_priv, 538 struct i915_power_well *power_well) 539 { 540 int refs = hweight64(power_well->desc->domains & 541 async_put_domains_mask(&dev_priv->power_domains)); 542 543 drm_WARN_ON(&dev_priv->drm, refs > power_well->count); 544 545 return refs; 546 } 547 548 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 549 struct i915_power_well *power_well, 550 struct intel_digital_port *dig_port) 551 { 552 /* Bypass the check if all references are released asynchronously */ 553 if (power_well_async_ref_count(dev_priv, power_well) == 554 power_well->count) 555 return; 556 557 if (drm_WARN_ON(&dev_priv->drm, !dig_port)) 558 return; 559 560 if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port) 561 return; 562 563 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); 564 } 565 566 #else 567 568 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 569 struct i915_power_well *power_well, 570 struct intel_digital_port *dig_port) 571 { 572 } 573 574 #endif 575 576 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 577 578 static void icl_tc_cold_exit(struct drm_i915_private *i915) 579 { 580 int ret, tries = 0; 581 582 while (1) { 583 ret = sandybridge_pcode_write_timeout(i915, 584 ICL_PCODE_EXIT_TCCOLD, 585 0, 250, 1); 586 if (ret != -EAGAIN || ++tries == 3) 587 break; 588 msleep(1); 589 } 590 591 /* Spec states that TC cold exit can take up to 1ms to complete */ 592 if (!ret) 593 msleep(1); 594 595 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ 596 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : 597 "succeeded"); 598 } 599 600 static void 601 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 602 struct i915_power_well *power_well) 603 { 604 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 605 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 606 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 607 bool is_tbt = power_well->desc->hsw.is_tc_tbt; 608 bool timeout_expected; 609 u32 val; 610 611 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 612 613 val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); 614 val &= ~DP_AUX_CH_CTL_TBT_IO; 615 if (is_tbt) 616 val |= DP_AUX_CH_CTL_TBT_IO; 617 intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); 618 619 val = intel_de_read(dev_priv, regs->driver); 620 intel_de_write(dev_priv, regs->driver, 621 val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx)); 622 623 /* 624 * An AUX timeout is expected if the TBT DP tunnel is down, 625 * or need to enable AUX on a legacy TypeC port as part of the TC-cold 626 * exit sequence. 627 */ 628 timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); 629 if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port) 630 icl_tc_cold_exit(dev_priv); 631 632 hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); 633 634 if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) { 635 enum tc_port tc_port; 636 637 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); 638 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 639 HIP_INDEX_VAL(tc_port, 0x2)); 640 641 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), 642 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 643 drm_warn(&dev_priv->drm, 644 "Timeout waiting TC uC health\n"); 645 } 646 } 647 648 static void 649 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 650 struct i915_power_well *power_well) 651 { 652 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 653 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 654 655 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 656 657 hsw_power_well_disable(dev_priv, power_well); 658 } 659 660 static void 661 icl_aux_power_well_enable(struct drm_i915_private *dev_priv, 662 struct i915_power_well *power_well) 663 { 664 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 665 666 if (intel_phy_is_tc(dev_priv, phy)) 667 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); 668 else if (IS_ICELAKE(dev_priv)) 669 return icl_combo_phy_aux_power_well_enable(dev_priv, 670 power_well); 671 else 672 return hsw_power_well_enable(dev_priv, power_well); 673 } 674 675 static void 676 icl_aux_power_well_disable(struct drm_i915_private *dev_priv, 677 struct i915_power_well *power_well) 678 { 679 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 680 681 if (intel_phy_is_tc(dev_priv, phy)) 682 return icl_tc_phy_aux_power_well_disable(dev_priv, power_well); 683 else if (IS_ICELAKE(dev_priv)) 684 return icl_combo_phy_aux_power_well_disable(dev_priv, 685 power_well); 686 else 687 return hsw_power_well_disable(dev_priv, power_well); 688 } 689 690 /* 691 * We should only use the power well if we explicitly asked the hardware to 692 * enable it, so check if it's enabled and also check if we've requested it to 693 * be enabled. 694 */ 695 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 696 struct i915_power_well *power_well) 697 { 698 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 699 enum i915_power_well_id id = power_well->desc->id; 700 int pw_idx = power_well->desc->hsw.idx; 701 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 702 HSW_PWR_WELL_CTL_STATE(pw_idx); 703 u32 val; 704 705 val = intel_de_read(dev_priv, regs->driver); 706 707 /* 708 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 709 * and the MISC_IO PW will be not restored, so check instead for the 710 * BIOS's own request bits, which are forced-on for these power wells 711 * when exiting DC5/6. 712 */ 713 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) && 714 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 715 val |= intel_de_read(dev_priv, regs->bios); 716 717 return (val & mask) == mask; 718 } 719 720 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 721 { 722 drm_WARN_ONCE(&dev_priv->drm, 723 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), 724 "DC9 already programmed to be enabled.\n"); 725 drm_WARN_ONCE(&dev_priv->drm, 726 intel_de_read(dev_priv, DC_STATE_EN) & 727 DC_STATE_EN_UPTO_DC5, 728 "DC5 still not disabled to enable DC9.\n"); 729 drm_WARN_ONCE(&dev_priv->drm, 730 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & 731 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 732 "Power well 2 on.\n"); 733 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 734 "Interrupts not disabled yet.\n"); 735 736 /* 737 * TODO: check for the following to verify the conditions to enter DC9 738 * state are satisfied: 739 * 1] Check relevant display engine registers to verify if mode set 740 * disable sequence was followed. 741 * 2] Check if display uninitialize sequence is initialized. 742 */ 743 } 744 745 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 746 { 747 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 748 "Interrupts not disabled yet.\n"); 749 drm_WARN_ONCE(&dev_priv->drm, 750 intel_de_read(dev_priv, DC_STATE_EN) & 751 DC_STATE_EN_UPTO_DC5, 752 "DC5 still not disabled.\n"); 753 754 /* 755 * TODO: check for the following to verify DC9 state was indeed 756 * entered before programming to disable it: 757 * 1] Check relevant display engine registers to verify if mode 758 * set disable sequence was followed. 759 * 2] Check if display uninitialize sequence is initialized. 760 */ 761 } 762 763 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 764 u32 state) 765 { 766 int rewrites = 0; 767 int rereads = 0; 768 u32 v; 769 770 intel_de_write(dev_priv, DC_STATE_EN, state); 771 772 /* It has been observed that disabling the dc6 state sometimes 773 * doesn't stick and dmc keeps returning old value. Make sure 774 * the write really sticks enough times and also force rewrite until 775 * we are confident that state is exactly what we want. 776 */ 777 do { 778 v = intel_de_read(dev_priv, DC_STATE_EN); 779 780 if (v != state) { 781 intel_de_write(dev_priv, DC_STATE_EN, state); 782 rewrites++; 783 rereads = 0; 784 } else if (rereads++ > 5) { 785 break; 786 } 787 788 } while (rewrites < 100); 789 790 if (v != state) 791 drm_err(&dev_priv->drm, 792 "Writing dc state to 0x%x failed, now 0x%x\n", 793 state, v); 794 795 /* Most of the times we need one retry, avoid spam */ 796 if (rewrites > 1) 797 drm_dbg_kms(&dev_priv->drm, 798 "Rewrote dc state to 0x%x %d times\n", 799 state, rewrites); 800 } 801 802 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 803 { 804 u32 mask; 805 806 mask = DC_STATE_EN_UPTO_DC5; 807 808 if (DISPLAY_VER(dev_priv) >= 12) 809 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 810 | DC_STATE_EN_DC9; 811 else if (DISPLAY_VER(dev_priv) == 11) 812 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 813 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 814 mask |= DC_STATE_EN_DC9; 815 else 816 mask |= DC_STATE_EN_UPTO_DC6; 817 818 return mask; 819 } 820 821 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 822 { 823 u32 val; 824 825 if (!HAS_DISPLAY(dev_priv)) 826 return; 827 828 val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); 829 830 drm_dbg_kms(&dev_priv->drm, 831 "Resetting DC state tracking from %02x to %02x\n", 832 dev_priv->dmc.dc_state, val); 833 dev_priv->dmc.dc_state = val; 834 } 835 836 /** 837 * gen9_set_dc_state - set target display C power state 838 * @dev_priv: i915 device instance 839 * @state: target DC power state 840 * - DC_STATE_DISABLE 841 * - DC_STATE_EN_UPTO_DC5 842 * - DC_STATE_EN_UPTO_DC6 843 * - DC_STATE_EN_DC9 844 * 845 * Signal to DMC firmware/HW the target DC power state passed in @state. 846 * DMC/HW can turn off individual display clocks and power rails when entering 847 * a deeper DC power state (higher in number) and turns these back when exiting 848 * that state to a shallower power state (lower in number). The HW will decide 849 * when to actually enter a given state on an on-demand basis, for instance 850 * depending on the active state of display pipes. The state of display 851 * registers backed by affected power rails are saved/restored as needed. 852 * 853 * Based on the above enabling a deeper DC power state is asynchronous wrt. 854 * enabling it. Disabling a deeper power state is synchronous: for instance 855 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 856 * back on and register state is restored. This is guaranteed by the MMIO write 857 * to DC_STATE_EN blocking until the state is restored. 858 */ 859 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) 860 { 861 u32 val; 862 u32 mask; 863 864 if (!HAS_DISPLAY(dev_priv)) 865 return; 866 867 if (drm_WARN_ON_ONCE(&dev_priv->drm, 868 state & ~dev_priv->dmc.allowed_dc_mask)) 869 state &= dev_priv->dmc.allowed_dc_mask; 870 871 val = intel_de_read(dev_priv, DC_STATE_EN); 872 mask = gen9_dc_mask(dev_priv); 873 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", 874 val & mask, state); 875 876 /* Check if DMC is ignoring our DC state requests */ 877 if ((val & mask) != dev_priv->dmc.dc_state) 878 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", 879 dev_priv->dmc.dc_state, val & mask); 880 881 val &= ~mask; 882 val |= state; 883 884 gen9_write_dc_state(dev_priv, val); 885 886 dev_priv->dmc.dc_state = val & mask; 887 } 888 889 static u32 890 sanitize_target_dc_state(struct drm_i915_private *dev_priv, 891 u32 target_dc_state) 892 { 893 u32 states[] = { 894 DC_STATE_EN_UPTO_DC6, 895 DC_STATE_EN_UPTO_DC5, 896 DC_STATE_EN_DC3CO, 897 DC_STATE_DISABLE, 898 }; 899 int i; 900 901 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 902 if (target_dc_state != states[i]) 903 continue; 904 905 if (dev_priv->dmc.allowed_dc_mask & target_dc_state) 906 break; 907 908 target_dc_state = states[i + 1]; 909 } 910 911 return target_dc_state; 912 } 913 914 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) 915 { 916 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); 917 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); 918 } 919 920 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) 921 { 922 u32 val; 923 924 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); 925 val = intel_de_read(dev_priv, DC_STATE_EN); 926 val &= ~DC_STATE_DC3CO_STATUS; 927 intel_de_write(dev_priv, DC_STATE_EN, val); 928 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 929 /* 930 * Delay of 200us DC3CO Exit time B.Spec 49196 931 */ 932 usleep_range(200, 210); 933 } 934 935 static void bxt_enable_dc9(struct drm_i915_private *dev_priv) 936 { 937 assert_can_enable_dc9(dev_priv); 938 939 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); 940 /* 941 * Power sequencer reset is not needed on 942 * platforms with South Display Engine on PCH, 943 * because PPS registers are always on. 944 */ 945 if (!HAS_PCH_SPLIT(dev_priv)) 946 intel_pps_reset_all(dev_priv); 947 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 948 } 949 950 static void bxt_disable_dc9(struct drm_i915_private *dev_priv) 951 { 952 assert_can_disable_dc9(dev_priv); 953 954 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); 955 956 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 957 958 intel_pps_unlock_regs_wa(dev_priv); 959 } 960 961 static void assert_dmc_loaded(struct drm_i915_private *dev_priv) 962 { 963 drm_WARN_ONCE(&dev_priv->drm, 964 !intel_de_read(dev_priv, 965 DMC_PROGRAM(dev_priv->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), 966 "DMC program storage start is NULL\n"); 967 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE), 968 "DMC SSP Base Not fine\n"); 969 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL), 970 "DMC HTP Not fine\n"); 971 } 972 973 static struct i915_power_well * 974 lookup_power_well(struct drm_i915_private *dev_priv, 975 enum i915_power_well_id power_well_id) 976 { 977 struct i915_power_well *power_well; 978 979 for_each_power_well(dev_priv, power_well) 980 if (power_well->desc->id == power_well_id) 981 return power_well; 982 983 /* 984 * It's not feasible to add error checking code to the callers since 985 * this condition really shouldn't happen and it doesn't even make sense 986 * to abort things like display initialization sequences. Just return 987 * the first power well and hope the WARN gets reported so we can fix 988 * our driver. 989 */ 990 drm_WARN(&dev_priv->drm, 1, 991 "Power well %d not defined for this platform\n", 992 power_well_id); 993 return &dev_priv->power_domains.power_wells[0]; 994 } 995 996 /** 997 * intel_display_power_set_target_dc_state - Set target dc state. 998 * @dev_priv: i915 device 999 * @state: state which needs to be set as target_dc_state. 1000 * 1001 * This function set the "DC off" power well target_dc_state, 1002 * based upon this target_dc_stste, "DC off" power well will 1003 * enable desired DC state. 1004 */ 1005 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, 1006 u32 state) 1007 { 1008 struct i915_power_well *power_well; 1009 bool dc_off_enabled; 1010 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1011 1012 mutex_lock(&power_domains->lock); 1013 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); 1014 1015 if (drm_WARN_ON(&dev_priv->drm, !power_well)) 1016 goto unlock; 1017 1018 state = sanitize_target_dc_state(dev_priv, state); 1019 1020 if (state == dev_priv->dmc.target_dc_state) 1021 goto unlock; 1022 1023 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv, 1024 power_well); 1025 /* 1026 * If DC off power well is disabled, need to enable and disable the 1027 * DC off power well to effect target DC state. 1028 */ 1029 if (!dc_off_enabled) 1030 power_well->desc->ops->enable(dev_priv, power_well); 1031 1032 dev_priv->dmc.target_dc_state = state; 1033 1034 if (!dc_off_enabled) 1035 power_well->desc->ops->disable(dev_priv, power_well); 1036 1037 unlock: 1038 mutex_unlock(&power_domains->lock); 1039 } 1040 1041 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 1042 { 1043 enum i915_power_well_id high_pg; 1044 1045 /* Power wells at this level and above must be disabled for DC5 entry */ 1046 if (DISPLAY_VER(dev_priv) == 12) 1047 high_pg = ICL_DISP_PW_3; 1048 else 1049 high_pg = SKL_DISP_PW_2; 1050 1051 drm_WARN_ONCE(&dev_priv->drm, 1052 intel_display_power_well_is_enabled(dev_priv, high_pg), 1053 "Power wells above platform's DC5 limit still enabled.\n"); 1054 1055 drm_WARN_ONCE(&dev_priv->drm, 1056 (intel_de_read(dev_priv, DC_STATE_EN) & 1057 DC_STATE_EN_UPTO_DC5), 1058 "DC5 already programmed to be enabled.\n"); 1059 assert_rpm_wakelock_held(&dev_priv->runtime_pm); 1060 1061 assert_dmc_loaded(dev_priv); 1062 } 1063 1064 static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 1065 { 1066 assert_can_enable_dc5(dev_priv); 1067 1068 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); 1069 1070 /* Wa Display #1183: skl,kbl,cfl */ 1071 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 1072 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1073 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1074 1075 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 1076 } 1077 1078 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 1079 { 1080 drm_WARN_ONCE(&dev_priv->drm, 1081 intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 1082 "Backlight is not disabled.\n"); 1083 drm_WARN_ONCE(&dev_priv->drm, 1084 (intel_de_read(dev_priv, DC_STATE_EN) & 1085 DC_STATE_EN_UPTO_DC6), 1086 "DC6 already programmed to be enabled.\n"); 1087 1088 assert_dmc_loaded(dev_priv); 1089 } 1090 1091 static void skl_enable_dc6(struct drm_i915_private *dev_priv) 1092 { 1093 assert_can_enable_dc6(dev_priv); 1094 1095 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); 1096 1097 /* Wa Display #1183: skl,kbl,cfl */ 1098 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 1099 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1100 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1101 1102 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 1103 } 1104 1105 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 1106 struct i915_power_well *power_well) 1107 { 1108 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 1109 int pw_idx = power_well->desc->hsw.idx; 1110 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 1111 u32 bios_req = intel_de_read(dev_priv, regs->bios); 1112 1113 /* Take over the request bit if set by BIOS. */ 1114 if (bios_req & mask) { 1115 u32 drv_req = intel_de_read(dev_priv, regs->driver); 1116 1117 if (!(drv_req & mask)) 1118 intel_de_write(dev_priv, regs->driver, drv_req | mask); 1119 intel_de_write(dev_priv, regs->bios, bios_req & ~mask); 1120 } 1121 } 1122 1123 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1124 struct i915_power_well *power_well) 1125 { 1126 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); 1127 } 1128 1129 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1130 struct i915_power_well *power_well) 1131 { 1132 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); 1133 } 1134 1135 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 1136 struct i915_power_well *power_well) 1137 { 1138 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); 1139 } 1140 1141 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 1142 { 1143 struct i915_power_well *power_well; 1144 1145 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); 1146 if (power_well->count > 0) 1147 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1148 1149 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1150 if (power_well->count > 0) 1151 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1152 1153 if (IS_GEMINILAKE(dev_priv)) { 1154 power_well = lookup_power_well(dev_priv, 1155 GLK_DISP_PW_DPIO_CMN_C); 1156 if (power_well->count > 0) 1157 bxt_ddi_phy_verify_state(dev_priv, 1158 power_well->desc->bxt.phy); 1159 } 1160 } 1161 1162 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 1163 struct i915_power_well *power_well) 1164 { 1165 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && 1166 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); 1167 } 1168 1169 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 1170 { 1171 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); 1172 u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices; 1173 1174 drm_WARN(&dev_priv->drm, 1175 hw_enabled_dbuf_slices != enabled_dbuf_slices, 1176 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", 1177 hw_enabled_dbuf_slices, 1178 enabled_dbuf_slices); 1179 } 1180 1181 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) 1182 { 1183 struct intel_cdclk_config cdclk_config = {}; 1184 1185 if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) { 1186 tgl_disable_dc3co(dev_priv); 1187 return; 1188 } 1189 1190 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1191 1192 if (!HAS_DISPLAY(dev_priv)) 1193 return; 1194 1195 dev_priv->display.get_cdclk(dev_priv, &cdclk_config); 1196 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 1197 drm_WARN_ON(&dev_priv->drm, 1198 intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, 1199 &cdclk_config)); 1200 1201 gen9_assert_dbuf_enabled(dev_priv); 1202 1203 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 1204 bxt_verify_ddi_phy_power_wells(dev_priv); 1205 1206 if (DISPLAY_VER(dev_priv) >= 11) 1207 /* 1208 * DMC retains HW context only for port A, the other combo 1209 * PHY's HW context for port B is lost after DC transitions, 1210 * so we need to restore it manually. 1211 */ 1212 intel_combo_phy_init(dev_priv); 1213 } 1214 1215 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 1216 struct i915_power_well *power_well) 1217 { 1218 gen9_disable_dc_states(dev_priv); 1219 } 1220 1221 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 1222 struct i915_power_well *power_well) 1223 { 1224 if (!intel_dmc_has_payload(dev_priv)) 1225 return; 1226 1227 switch (dev_priv->dmc.target_dc_state) { 1228 case DC_STATE_EN_DC3CO: 1229 tgl_enable_dc3co(dev_priv); 1230 break; 1231 case DC_STATE_EN_UPTO_DC6: 1232 skl_enable_dc6(dev_priv); 1233 break; 1234 case DC_STATE_EN_UPTO_DC5: 1235 gen9_enable_dc5(dev_priv); 1236 break; 1237 } 1238 } 1239 1240 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, 1241 struct i915_power_well *power_well) 1242 { 1243 } 1244 1245 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 1246 struct i915_power_well *power_well) 1247 { 1248 } 1249 1250 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 1251 struct i915_power_well *power_well) 1252 { 1253 return true; 1254 } 1255 1256 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 1257 struct i915_power_well *power_well) 1258 { 1259 if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) 1260 i830_enable_pipe(dev_priv, PIPE_A); 1261 if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) 1262 i830_enable_pipe(dev_priv, PIPE_B); 1263 } 1264 1265 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, 1266 struct i915_power_well *power_well) 1267 { 1268 i830_disable_pipe(dev_priv, PIPE_B); 1269 i830_disable_pipe(dev_priv, PIPE_A); 1270 } 1271 1272 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 1273 struct i915_power_well *power_well) 1274 { 1275 return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && 1276 intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; 1277 } 1278 1279 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, 1280 struct i915_power_well *power_well) 1281 { 1282 if (power_well->count > 0) 1283 i830_pipes_power_well_enable(dev_priv, power_well); 1284 else 1285 i830_pipes_power_well_disable(dev_priv, power_well); 1286 } 1287 1288 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 1289 struct i915_power_well *power_well, bool enable) 1290 { 1291 int pw_idx = power_well->desc->vlv.idx; 1292 u32 mask; 1293 u32 state; 1294 u32 ctrl; 1295 1296 mask = PUNIT_PWRGT_MASK(pw_idx); 1297 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1298 PUNIT_PWRGT_PWR_GATE(pw_idx); 1299 1300 vlv_punit_get(dev_priv); 1301 1302 #define COND \ 1303 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1304 1305 if (COND) 1306 goto out; 1307 1308 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 1309 ctrl &= ~mask; 1310 ctrl |= state; 1311 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1312 1313 if (wait_for(COND, 100)) 1314 drm_err(&dev_priv->drm, 1315 "timeout setting power well state %08x (%08x)\n", 1316 state, 1317 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1318 1319 #undef COND 1320 1321 out: 1322 vlv_punit_put(dev_priv); 1323 } 1324 1325 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1326 struct i915_power_well *power_well) 1327 { 1328 vlv_set_power_well(dev_priv, power_well, true); 1329 } 1330 1331 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1332 struct i915_power_well *power_well) 1333 { 1334 vlv_set_power_well(dev_priv, power_well, false); 1335 } 1336 1337 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1338 struct i915_power_well *power_well) 1339 { 1340 int pw_idx = power_well->desc->vlv.idx; 1341 bool enabled = false; 1342 u32 mask; 1343 u32 state; 1344 u32 ctrl; 1345 1346 mask = PUNIT_PWRGT_MASK(pw_idx); 1347 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1348 1349 vlv_punit_get(dev_priv); 1350 1351 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1352 /* 1353 * We only ever set the power-on and power-gate states, anything 1354 * else is unexpected. 1355 */ 1356 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1357 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1358 if (state == ctrl) 1359 enabled = true; 1360 1361 /* 1362 * A transient state at this point would mean some unexpected party 1363 * is poking at the power controls too. 1364 */ 1365 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1366 drm_WARN_ON(&dev_priv->drm, ctrl != state); 1367 1368 vlv_punit_put(dev_priv); 1369 1370 return enabled; 1371 } 1372 1373 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1374 { 1375 u32 val; 1376 1377 /* 1378 * On driver load, a pipe may be active and driving a DSI display. 1379 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1380 * (and never recovering) in this case. intel_dsi_post_disable() will 1381 * clear it when we turn off the display. 1382 */ 1383 val = intel_de_read(dev_priv, DSPCLK_GATE_D); 1384 val &= DPOUNIT_CLOCK_GATE_DISABLE; 1385 val |= VRHUNIT_CLOCK_GATE_DISABLE; 1386 intel_de_write(dev_priv, DSPCLK_GATE_D, val); 1387 1388 /* 1389 * Disable trickle feed and enable pnd deadline calculation 1390 */ 1391 intel_de_write(dev_priv, MI_ARB_VLV, 1392 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1393 intel_de_write(dev_priv, CBR1_VLV, 0); 1394 1395 drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); 1396 intel_de_write(dev_priv, RAWCLK_FREQ_VLV, 1397 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 1398 1000)); 1399 } 1400 1401 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1402 { 1403 struct intel_encoder *encoder; 1404 enum pipe pipe; 1405 1406 /* 1407 * Enable the CRI clock source so we can get at the 1408 * display and the reference clock for VGA 1409 * hotplug / manual detection. Supposedly DSI also 1410 * needs the ref clock up and running. 1411 * 1412 * CHV DPLL B/C have some issues if VGA mode is enabled. 1413 */ 1414 for_each_pipe(dev_priv, pipe) { 1415 u32 val = intel_de_read(dev_priv, DPLL(pipe)); 1416 1417 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1418 if (pipe != PIPE_A) 1419 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1420 1421 intel_de_write(dev_priv, DPLL(pipe), val); 1422 } 1423 1424 vlv_init_display_clock_gating(dev_priv); 1425 1426 spin_lock_irq(&dev_priv->irq_lock); 1427 valleyview_enable_display_irqs(dev_priv); 1428 spin_unlock_irq(&dev_priv->irq_lock); 1429 1430 /* 1431 * During driver initialization/resume we can avoid restoring the 1432 * part of the HW/SW state that will be inited anyway explicitly. 1433 */ 1434 if (dev_priv->power_domains.initializing) 1435 return; 1436 1437 intel_hpd_init(dev_priv); 1438 intel_hpd_poll_disable(dev_priv); 1439 1440 /* Re-enable the ADPA, if we have one */ 1441 for_each_intel_encoder(&dev_priv->drm, encoder) { 1442 if (encoder->type == INTEL_OUTPUT_ANALOG) 1443 intel_crt_reset(&encoder->base); 1444 } 1445 1446 intel_vga_redisable_power_on(dev_priv); 1447 1448 intel_pps_unlock_regs_wa(dev_priv); 1449 } 1450 1451 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1452 { 1453 spin_lock_irq(&dev_priv->irq_lock); 1454 valleyview_disable_display_irqs(dev_priv); 1455 spin_unlock_irq(&dev_priv->irq_lock); 1456 1457 /* make sure we're done processing display irqs */ 1458 intel_synchronize_irq(dev_priv); 1459 1460 intel_pps_reset_all(dev_priv); 1461 1462 /* Prevent us from re-enabling polling on accident in late suspend */ 1463 if (!dev_priv->drm.dev->power.is_suspended) 1464 intel_hpd_poll_enable(dev_priv); 1465 } 1466 1467 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1468 struct i915_power_well *power_well) 1469 { 1470 vlv_set_power_well(dev_priv, power_well, true); 1471 1472 vlv_display_power_well_init(dev_priv); 1473 } 1474 1475 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1476 struct i915_power_well *power_well) 1477 { 1478 vlv_display_power_well_deinit(dev_priv); 1479 1480 vlv_set_power_well(dev_priv, power_well, false); 1481 } 1482 1483 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1484 struct i915_power_well *power_well) 1485 { 1486 /* since ref/cri clock was enabled */ 1487 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1488 1489 vlv_set_power_well(dev_priv, power_well, true); 1490 1491 /* 1492 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1493 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1494 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1495 * b. The other bits such as sfr settings / modesel may all 1496 * be set to 0. 1497 * 1498 * This should only be done on init and resume from S3 with 1499 * both PLLs disabled, or we risk losing DPIO and PLL 1500 * synchronization. 1501 */ 1502 intel_de_write(dev_priv, DPIO_CTL, 1503 intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); 1504 } 1505 1506 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1507 struct i915_power_well *power_well) 1508 { 1509 enum pipe pipe; 1510 1511 for_each_pipe(dev_priv, pipe) 1512 assert_pll_disabled(dev_priv, pipe); 1513 1514 /* Assert common reset */ 1515 intel_de_write(dev_priv, DPIO_CTL, 1516 intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); 1517 1518 vlv_set_power_well(dev_priv, power_well, false); 1519 } 1520 1521 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 1522 1523 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1524 1525 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1526 { 1527 struct i915_power_well *cmn_bc = 1528 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1529 struct i915_power_well *cmn_d = 1530 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1531 u32 phy_control = dev_priv->chv_phy_control; 1532 u32 phy_status = 0; 1533 u32 phy_status_mask = 0xffffffff; 1534 1535 /* 1536 * The BIOS can leave the PHY is some weird state 1537 * where it doesn't fully power down some parts. 1538 * Disable the asserts until the PHY has been fully 1539 * reset (ie. the power well has been disabled at 1540 * least once). 1541 */ 1542 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1543 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1544 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1545 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1546 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1547 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1548 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1549 1550 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1551 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1552 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1553 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1554 1555 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 1556 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1557 1558 /* this assumes override is only used to enable lanes */ 1559 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1560 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1561 1562 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1563 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1564 1565 /* CL1 is on whenever anything is on in either channel */ 1566 if (BITS_SET(phy_control, 1567 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1568 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1569 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1570 1571 /* 1572 * The DPLLB check accounts for the pipe B + port A usage 1573 * with CL2 powered up but all the lanes in the second channel 1574 * powered down. 1575 */ 1576 if (BITS_SET(phy_control, 1577 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1578 (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1579 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1580 1581 if (BITS_SET(phy_control, 1582 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1583 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1584 if (BITS_SET(phy_control, 1585 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1586 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1587 1588 if (BITS_SET(phy_control, 1589 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1590 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1591 if (BITS_SET(phy_control, 1592 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1593 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1594 } 1595 1596 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 1597 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1598 1599 /* this assumes override is only used to enable lanes */ 1600 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1601 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1602 1603 if (BITS_SET(phy_control, 1604 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1605 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1606 1607 if (BITS_SET(phy_control, 1608 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1609 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1610 if (BITS_SET(phy_control, 1611 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1612 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1613 } 1614 1615 phy_status &= phy_status_mask; 1616 1617 /* 1618 * The PHY may be busy with some initial calibration and whatnot, 1619 * so the power state can take a while to actually change. 1620 */ 1621 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, 1622 phy_status_mask, phy_status, 10)) 1623 drm_err(&dev_priv->drm, 1624 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1625 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, 1626 phy_status, dev_priv->chv_phy_control); 1627 } 1628 1629 #undef BITS_SET 1630 1631 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1632 struct i915_power_well *power_well) 1633 { 1634 enum dpio_phy phy; 1635 enum pipe pipe; 1636 u32 tmp; 1637 1638 drm_WARN_ON_ONCE(&dev_priv->drm, 1639 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1640 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1641 1642 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1643 pipe = PIPE_A; 1644 phy = DPIO_PHY0; 1645 } else { 1646 pipe = PIPE_C; 1647 phy = DPIO_PHY1; 1648 } 1649 1650 /* since ref/cri clock was enabled */ 1651 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1652 vlv_set_power_well(dev_priv, power_well, true); 1653 1654 /* Poll for phypwrgood signal */ 1655 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, 1656 PHY_POWERGOOD(phy), 1)) 1657 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", 1658 phy); 1659 1660 vlv_dpio_get(dev_priv); 1661 1662 /* Enable dynamic power down */ 1663 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1664 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1665 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1666 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1667 1668 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1669 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1670 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1671 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1672 } else { 1673 /* 1674 * Force the non-existing CL2 off. BXT does this 1675 * too, so maybe it saves some power even though 1676 * CL2 doesn't exist? 1677 */ 1678 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1679 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1680 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1681 } 1682 1683 vlv_dpio_put(dev_priv); 1684 1685 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1686 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1687 dev_priv->chv_phy_control); 1688 1689 drm_dbg_kms(&dev_priv->drm, 1690 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1691 phy, dev_priv->chv_phy_control); 1692 1693 assert_chv_phy_status(dev_priv); 1694 } 1695 1696 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1697 struct i915_power_well *power_well) 1698 { 1699 enum dpio_phy phy; 1700 1701 drm_WARN_ON_ONCE(&dev_priv->drm, 1702 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1703 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1704 1705 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1706 phy = DPIO_PHY0; 1707 assert_pll_disabled(dev_priv, PIPE_A); 1708 assert_pll_disabled(dev_priv, PIPE_B); 1709 } else { 1710 phy = DPIO_PHY1; 1711 assert_pll_disabled(dev_priv, PIPE_C); 1712 } 1713 1714 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1715 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1716 dev_priv->chv_phy_control); 1717 1718 vlv_set_power_well(dev_priv, power_well, false); 1719 1720 drm_dbg_kms(&dev_priv->drm, 1721 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1722 phy, dev_priv->chv_phy_control); 1723 1724 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1725 dev_priv->chv_phy_assert[phy] = true; 1726 1727 assert_chv_phy_status(dev_priv); 1728 } 1729 1730 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1731 enum dpio_channel ch, bool override, unsigned int mask) 1732 { 1733 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1734 u32 reg, val, expected, actual; 1735 1736 /* 1737 * The BIOS can leave the PHY is some weird state 1738 * where it doesn't fully power down some parts. 1739 * Disable the asserts until the PHY has been fully 1740 * reset (ie. the power well has been disabled at 1741 * least once). 1742 */ 1743 if (!dev_priv->chv_phy_assert[phy]) 1744 return; 1745 1746 if (ch == DPIO_CH0) 1747 reg = _CHV_CMN_DW0_CH0; 1748 else 1749 reg = _CHV_CMN_DW6_CH1; 1750 1751 vlv_dpio_get(dev_priv); 1752 val = vlv_dpio_read(dev_priv, pipe, reg); 1753 vlv_dpio_put(dev_priv); 1754 1755 /* 1756 * This assumes !override is only used when the port is disabled. 1757 * All lanes should power down even without the override when 1758 * the port is disabled. 1759 */ 1760 if (!override || mask == 0xf) { 1761 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1762 /* 1763 * If CH1 common lane is not active anymore 1764 * (eg. for pipe B DPLL) the entire channel will 1765 * shut down, which causes the common lane registers 1766 * to read as 0. That means we can't actually check 1767 * the lane power down status bits, but as the entire 1768 * register reads as 0 it's a good indication that the 1769 * channel is indeed entirely powered down. 1770 */ 1771 if (ch == DPIO_CH1 && val == 0) 1772 expected = 0; 1773 } else if (mask != 0x0) { 1774 expected = DPIO_ANYDL_POWERDOWN; 1775 } else { 1776 expected = 0; 1777 } 1778 1779 if (ch == DPIO_CH0) 1780 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1781 else 1782 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1783 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1784 1785 drm_WARN(&dev_priv->drm, actual != expected, 1786 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1787 !!(actual & DPIO_ALLDL_POWERDOWN), 1788 !!(actual & DPIO_ANYDL_POWERDOWN), 1789 !!(expected & DPIO_ALLDL_POWERDOWN), 1790 !!(expected & DPIO_ANYDL_POWERDOWN), 1791 reg, val); 1792 } 1793 1794 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1795 enum dpio_channel ch, bool override) 1796 { 1797 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1798 bool was_override; 1799 1800 mutex_lock(&power_domains->lock); 1801 1802 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1803 1804 if (override == was_override) 1805 goto out; 1806 1807 if (override) 1808 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1809 else 1810 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1811 1812 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1813 dev_priv->chv_phy_control); 1814 1815 drm_dbg_kms(&dev_priv->drm, 1816 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1817 phy, ch, dev_priv->chv_phy_control); 1818 1819 assert_chv_phy_status(dev_priv); 1820 1821 out: 1822 mutex_unlock(&power_domains->lock); 1823 1824 return was_override; 1825 } 1826 1827 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1828 bool override, unsigned int mask) 1829 { 1830 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1831 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1832 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); 1833 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); 1834 1835 mutex_lock(&power_domains->lock); 1836 1837 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1838 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1839 1840 if (override) 1841 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1842 else 1843 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1844 1845 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1846 dev_priv->chv_phy_control); 1847 1848 drm_dbg_kms(&dev_priv->drm, 1849 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1850 phy, ch, mask, dev_priv->chv_phy_control); 1851 1852 assert_chv_phy_status(dev_priv); 1853 1854 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1855 1856 mutex_unlock(&power_domains->lock); 1857 } 1858 1859 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1860 struct i915_power_well *power_well) 1861 { 1862 enum pipe pipe = PIPE_A; 1863 bool enabled; 1864 u32 state, ctrl; 1865 1866 vlv_punit_get(dev_priv); 1867 1868 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1869 /* 1870 * We only ever set the power-on and power-gate states, anything 1871 * else is unexpected. 1872 */ 1873 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && 1874 state != DP_SSS_PWR_GATE(pipe)); 1875 enabled = state == DP_SSS_PWR_ON(pipe); 1876 1877 /* 1878 * A transient state at this point would mean some unexpected party 1879 * is poking at the power controls too. 1880 */ 1881 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1882 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); 1883 1884 vlv_punit_put(dev_priv); 1885 1886 return enabled; 1887 } 1888 1889 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1890 struct i915_power_well *power_well, 1891 bool enable) 1892 { 1893 enum pipe pipe = PIPE_A; 1894 u32 state; 1895 u32 ctrl; 1896 1897 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1898 1899 vlv_punit_get(dev_priv); 1900 1901 #define COND \ 1902 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 1903 1904 if (COND) 1905 goto out; 1906 1907 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 1908 ctrl &= ~DP_SSC_MASK(pipe); 1909 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1910 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); 1911 1912 if (wait_for(COND, 100)) 1913 drm_err(&dev_priv->drm, 1914 "timeout setting power well state %08x (%08x)\n", 1915 state, 1916 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); 1917 1918 #undef COND 1919 1920 out: 1921 vlv_punit_put(dev_priv); 1922 } 1923 1924 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 1925 struct i915_power_well *power_well) 1926 { 1927 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1928 dev_priv->chv_phy_control); 1929 } 1930 1931 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1932 struct i915_power_well *power_well) 1933 { 1934 chv_set_pipe_power_well(dev_priv, power_well, true); 1935 1936 vlv_display_power_well_init(dev_priv); 1937 } 1938 1939 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1940 struct i915_power_well *power_well) 1941 { 1942 vlv_display_power_well_deinit(dev_priv); 1943 1944 chv_set_pipe_power_well(dev_priv, power_well, false); 1945 } 1946 1947 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) 1948 { 1949 return power_domains->async_put_domains[0] | 1950 power_domains->async_put_domains[1]; 1951 } 1952 1953 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 1954 1955 static bool 1956 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 1957 { 1958 struct drm_i915_private *i915 = container_of(power_domains, 1959 struct drm_i915_private, 1960 power_domains); 1961 return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] & 1962 power_domains->async_put_domains[1]); 1963 } 1964 1965 static bool 1966 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 1967 { 1968 struct drm_i915_private *i915 = container_of(power_domains, 1969 struct drm_i915_private, 1970 power_domains); 1971 enum intel_display_power_domain domain; 1972 bool err = false; 1973 1974 err |= !assert_async_put_domain_masks_disjoint(power_domains); 1975 err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref != 1976 !!__async_put_domains_mask(power_domains)); 1977 1978 for_each_power_domain(domain, __async_put_domains_mask(power_domains)) 1979 err |= drm_WARN_ON(&i915->drm, 1980 power_domains->domain_use_count[domain] != 1); 1981 1982 return !err; 1983 } 1984 1985 static void print_power_domains(struct i915_power_domains *power_domains, 1986 const char *prefix, u64 mask) 1987 { 1988 struct drm_i915_private *i915 = container_of(power_domains, 1989 struct drm_i915_private, 1990 power_domains); 1991 enum intel_display_power_domain domain; 1992 1993 drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask)); 1994 for_each_power_domain(domain, mask) 1995 drm_dbg(&i915->drm, "%s use_count %d\n", 1996 intel_display_power_domain_str(domain), 1997 power_domains->domain_use_count[domain]); 1998 } 1999 2000 static void 2001 print_async_put_domains_state(struct i915_power_domains *power_domains) 2002 { 2003 struct drm_i915_private *i915 = container_of(power_domains, 2004 struct drm_i915_private, 2005 power_domains); 2006 2007 drm_dbg(&i915->drm, "async_put_wakeref %u\n", 2008 power_domains->async_put_wakeref); 2009 2010 print_power_domains(power_domains, "async_put_domains[0]", 2011 power_domains->async_put_domains[0]); 2012 print_power_domains(power_domains, "async_put_domains[1]", 2013 power_domains->async_put_domains[1]); 2014 } 2015 2016 static void 2017 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2018 { 2019 if (!__async_put_domains_state_ok(power_domains)) 2020 print_async_put_domains_state(power_domains); 2021 } 2022 2023 #else 2024 2025 static void 2026 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 2027 { 2028 } 2029 2030 static void 2031 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2032 { 2033 } 2034 2035 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 2036 2037 static u64 async_put_domains_mask(struct i915_power_domains *power_domains) 2038 { 2039 assert_async_put_domain_masks_disjoint(power_domains); 2040 2041 return __async_put_domains_mask(power_domains); 2042 } 2043 2044 static void 2045 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 2046 enum intel_display_power_domain domain) 2047 { 2048 assert_async_put_domain_masks_disjoint(power_domains); 2049 2050 power_domains->async_put_domains[0] &= ~BIT_ULL(domain); 2051 power_domains->async_put_domains[1] &= ~BIT_ULL(domain); 2052 } 2053 2054 static bool 2055 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 2056 enum intel_display_power_domain domain) 2057 { 2058 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2059 bool ret = false; 2060 2061 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) 2062 goto out_verify; 2063 2064 async_put_domains_clear_domain(power_domains, domain); 2065 2066 ret = true; 2067 2068 if (async_put_domains_mask(power_domains)) 2069 goto out_verify; 2070 2071 cancel_delayed_work(&power_domains->async_put_work); 2072 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 2073 fetch_and_zero(&power_domains->async_put_wakeref)); 2074 out_verify: 2075 verify_async_put_domains_state(power_domains); 2076 2077 return ret; 2078 } 2079 2080 static void 2081 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 2082 enum intel_display_power_domain domain) 2083 { 2084 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2085 struct i915_power_well *power_well; 2086 2087 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 2088 return; 2089 2090 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) 2091 intel_power_well_get(dev_priv, power_well); 2092 2093 power_domains->domain_use_count[domain]++; 2094 } 2095 2096 /** 2097 * intel_display_power_get - grab a power domain reference 2098 * @dev_priv: i915 device instance 2099 * @domain: power domain to reference 2100 * 2101 * This function grabs a power domain reference for @domain and ensures that the 2102 * power domain and all its parents are powered up. Therefore users should only 2103 * grab a reference to the innermost power domain they need. 2104 * 2105 * Any power domain reference obtained by this function must have a symmetric 2106 * call to intel_display_power_put() to release the reference again. 2107 */ 2108 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 2109 enum intel_display_power_domain domain) 2110 { 2111 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2112 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2113 2114 mutex_lock(&power_domains->lock); 2115 __intel_display_power_get_domain(dev_priv, domain); 2116 mutex_unlock(&power_domains->lock); 2117 2118 return wakeref; 2119 } 2120 2121 /** 2122 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 2123 * @dev_priv: i915 device instance 2124 * @domain: power domain to reference 2125 * 2126 * This function grabs a power domain reference for @domain and ensures that the 2127 * power domain and all its parents are powered up. Therefore users should only 2128 * grab a reference to the innermost power domain they need. 2129 * 2130 * Any power domain reference obtained by this function must have a symmetric 2131 * call to intel_display_power_put() to release the reference again. 2132 */ 2133 intel_wakeref_t 2134 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 2135 enum intel_display_power_domain domain) 2136 { 2137 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2138 intel_wakeref_t wakeref; 2139 bool is_enabled; 2140 2141 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 2142 if (!wakeref) 2143 return false; 2144 2145 mutex_lock(&power_domains->lock); 2146 2147 if (__intel_display_power_is_enabled(dev_priv, domain)) { 2148 __intel_display_power_get_domain(dev_priv, domain); 2149 is_enabled = true; 2150 } else { 2151 is_enabled = false; 2152 } 2153 2154 mutex_unlock(&power_domains->lock); 2155 2156 if (!is_enabled) { 2157 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2158 wakeref = 0; 2159 } 2160 2161 return wakeref; 2162 } 2163 2164 static void 2165 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 2166 enum intel_display_power_domain domain) 2167 { 2168 struct i915_power_domains *power_domains; 2169 struct i915_power_well *power_well; 2170 const char *name = intel_display_power_domain_str(domain); 2171 2172 power_domains = &dev_priv->power_domains; 2173 2174 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], 2175 "Use count on domain %s is already zero\n", 2176 name); 2177 drm_WARN(&dev_priv->drm, 2178 async_put_domains_mask(power_domains) & BIT_ULL(domain), 2179 "Async disabling of domain %s is pending\n", 2180 name); 2181 2182 power_domains->domain_use_count[domain]--; 2183 2184 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) 2185 intel_power_well_put(dev_priv, power_well); 2186 } 2187 2188 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 2189 enum intel_display_power_domain domain) 2190 { 2191 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2192 2193 mutex_lock(&power_domains->lock); 2194 __intel_display_power_put_domain(dev_priv, domain); 2195 mutex_unlock(&power_domains->lock); 2196 } 2197 2198 static void 2199 queue_async_put_domains_work(struct i915_power_domains *power_domains, 2200 intel_wakeref_t wakeref) 2201 { 2202 struct drm_i915_private *i915 = container_of(power_domains, 2203 struct drm_i915_private, 2204 power_domains); 2205 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2206 power_domains->async_put_wakeref = wakeref; 2207 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, 2208 &power_domains->async_put_work, 2209 msecs_to_jiffies(100))); 2210 } 2211 2212 static void 2213 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) 2214 { 2215 struct drm_i915_private *dev_priv = 2216 container_of(power_domains, struct drm_i915_private, 2217 power_domains); 2218 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2219 enum intel_display_power_domain domain; 2220 intel_wakeref_t wakeref; 2221 2222 /* 2223 * The caller must hold already raw wakeref, upgrade that to a proper 2224 * wakeref to make the state checker happy about the HW access during 2225 * power well disabling. 2226 */ 2227 assert_rpm_raw_wakeref_held(rpm); 2228 wakeref = intel_runtime_pm_get(rpm); 2229 2230 for_each_power_domain(domain, mask) { 2231 /* Clear before put, so put's sanity check is happy. */ 2232 async_put_domains_clear_domain(power_domains, domain); 2233 __intel_display_power_put_domain(dev_priv, domain); 2234 } 2235 2236 intel_runtime_pm_put(rpm, wakeref); 2237 } 2238 2239 static void 2240 intel_display_power_put_async_work(struct work_struct *work) 2241 { 2242 struct drm_i915_private *dev_priv = 2243 container_of(work, struct drm_i915_private, 2244 power_domains.async_put_work.work); 2245 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2246 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2247 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 2248 intel_wakeref_t old_work_wakeref = 0; 2249 2250 mutex_lock(&power_domains->lock); 2251 2252 /* 2253 * Bail out if all the domain refs pending to be released were grabbed 2254 * by subsequent gets or a flush_work. 2255 */ 2256 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2257 if (!old_work_wakeref) 2258 goto out_verify; 2259 2260 release_async_put_domains(power_domains, 2261 power_domains->async_put_domains[0]); 2262 2263 /* Requeue the work if more domains were async put meanwhile. */ 2264 if (power_domains->async_put_domains[1]) { 2265 power_domains->async_put_domains[0] = 2266 fetch_and_zero(&power_domains->async_put_domains[1]); 2267 queue_async_put_domains_work(power_domains, 2268 fetch_and_zero(&new_work_wakeref)); 2269 } else { 2270 /* 2271 * Cancel the work that got queued after this one got dequeued, 2272 * since here we released the corresponding async-put reference. 2273 */ 2274 cancel_delayed_work(&power_domains->async_put_work); 2275 } 2276 2277 out_verify: 2278 verify_async_put_domains_state(power_domains); 2279 2280 mutex_unlock(&power_domains->lock); 2281 2282 if (old_work_wakeref) 2283 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 2284 if (new_work_wakeref) 2285 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 2286 } 2287 2288 /** 2289 * intel_display_power_put_async - release a power domain reference asynchronously 2290 * @i915: i915 device instance 2291 * @domain: power domain to reference 2292 * @wakeref: wakeref acquired for the reference that is being released 2293 * 2294 * This function drops the power domain reference obtained by 2295 * intel_display_power_get*() and schedules a work to power down the 2296 * corresponding hardware block if this is the last reference. 2297 */ 2298 void __intel_display_power_put_async(struct drm_i915_private *i915, 2299 enum intel_display_power_domain domain, 2300 intel_wakeref_t wakeref) 2301 { 2302 struct i915_power_domains *power_domains = &i915->power_domains; 2303 struct intel_runtime_pm *rpm = &i915->runtime_pm; 2304 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 2305 2306 mutex_lock(&power_domains->lock); 2307 2308 if (power_domains->domain_use_count[domain] > 1) { 2309 __intel_display_power_put_domain(i915, domain); 2310 2311 goto out_verify; 2312 } 2313 2314 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); 2315 2316 /* Let a pending work requeue itself or queue a new one. */ 2317 if (power_domains->async_put_wakeref) { 2318 power_domains->async_put_domains[1] |= BIT_ULL(domain); 2319 } else { 2320 power_domains->async_put_domains[0] |= BIT_ULL(domain); 2321 queue_async_put_domains_work(power_domains, 2322 fetch_and_zero(&work_wakeref)); 2323 } 2324 2325 out_verify: 2326 verify_async_put_domains_state(power_domains); 2327 2328 mutex_unlock(&power_domains->lock); 2329 2330 if (work_wakeref) 2331 intel_runtime_pm_put_raw(rpm, work_wakeref); 2332 2333 intel_runtime_pm_put(rpm, wakeref); 2334 } 2335 2336 /** 2337 * intel_display_power_flush_work - flushes the async display power disabling work 2338 * @i915: i915 device instance 2339 * 2340 * Flushes any pending work that was scheduled by a preceding 2341 * intel_display_power_put_async() call, completing the disabling of the 2342 * corresponding power domains. 2343 * 2344 * Note that the work handler function may still be running after this 2345 * function returns; to ensure that the work handler isn't running use 2346 * intel_display_power_flush_work_sync() instead. 2347 */ 2348 void intel_display_power_flush_work(struct drm_i915_private *i915) 2349 { 2350 struct i915_power_domains *power_domains = &i915->power_domains; 2351 intel_wakeref_t work_wakeref; 2352 2353 mutex_lock(&power_domains->lock); 2354 2355 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2356 if (!work_wakeref) 2357 goto out_verify; 2358 2359 release_async_put_domains(power_domains, 2360 async_put_domains_mask(power_domains)); 2361 cancel_delayed_work(&power_domains->async_put_work); 2362 2363 out_verify: 2364 verify_async_put_domains_state(power_domains); 2365 2366 mutex_unlock(&power_domains->lock); 2367 2368 if (work_wakeref) 2369 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 2370 } 2371 2372 /** 2373 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 2374 * @i915: i915 device instance 2375 * 2376 * Like intel_display_power_flush_work(), but also ensure that the work 2377 * handler function is not running any more when this function returns. 2378 */ 2379 static void 2380 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 2381 { 2382 struct i915_power_domains *power_domains = &i915->power_domains; 2383 2384 intel_display_power_flush_work(i915); 2385 cancel_delayed_work_sync(&power_domains->async_put_work); 2386 2387 verify_async_put_domains_state(power_domains); 2388 2389 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2390 } 2391 2392 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2393 /** 2394 * intel_display_power_put - release a power domain reference 2395 * @dev_priv: i915 device instance 2396 * @domain: power domain to reference 2397 * @wakeref: wakeref acquired for the reference that is being released 2398 * 2399 * This function drops the power domain reference obtained by 2400 * intel_display_power_get() and might power down the corresponding hardware 2401 * block right away if this is the last reference. 2402 */ 2403 void intel_display_power_put(struct drm_i915_private *dev_priv, 2404 enum intel_display_power_domain domain, 2405 intel_wakeref_t wakeref) 2406 { 2407 __intel_display_power_put(dev_priv, domain); 2408 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2409 } 2410 #else 2411 /** 2412 * intel_display_power_put_unchecked - release an unchecked power domain reference 2413 * @dev_priv: i915 device instance 2414 * @domain: power domain to reference 2415 * 2416 * This function drops the power domain reference obtained by 2417 * intel_display_power_get() and might power down the corresponding hardware 2418 * block right away if this is the last reference. 2419 * 2420 * This function is only for the power domain code's internal use to suppress wakeref 2421 * tracking when the correspondig debug kconfig option is disabled, should not 2422 * be used otherwise. 2423 */ 2424 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 2425 enum intel_display_power_domain domain) 2426 { 2427 __intel_display_power_put(dev_priv, domain); 2428 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 2429 } 2430 #endif 2431 2432 void 2433 intel_display_power_get_in_set(struct drm_i915_private *i915, 2434 struct intel_display_power_domain_set *power_domain_set, 2435 enum intel_display_power_domain domain) 2436 { 2437 intel_wakeref_t __maybe_unused wf; 2438 2439 drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); 2440 2441 wf = intel_display_power_get(i915, domain); 2442 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2443 power_domain_set->wakerefs[domain] = wf; 2444 #endif 2445 power_domain_set->mask |= BIT_ULL(domain); 2446 } 2447 2448 bool 2449 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, 2450 struct intel_display_power_domain_set *power_domain_set, 2451 enum intel_display_power_domain domain) 2452 { 2453 intel_wakeref_t wf; 2454 2455 drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); 2456 2457 wf = intel_display_power_get_if_enabled(i915, domain); 2458 if (!wf) 2459 return false; 2460 2461 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2462 power_domain_set->wakerefs[domain] = wf; 2463 #endif 2464 power_domain_set->mask |= BIT_ULL(domain); 2465 2466 return true; 2467 } 2468 2469 void 2470 intel_display_power_put_mask_in_set(struct drm_i915_private *i915, 2471 struct intel_display_power_domain_set *power_domain_set, 2472 u64 mask) 2473 { 2474 enum intel_display_power_domain domain; 2475 2476 drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask); 2477 2478 for_each_power_domain(domain, mask) { 2479 intel_wakeref_t __maybe_unused wf = -1; 2480 2481 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2482 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); 2483 #endif 2484 intel_display_power_put(i915, domain, wf); 2485 power_domain_set->mask &= ~BIT_ULL(domain); 2486 } 2487 } 2488 2489 #define I830_PIPES_POWER_DOMAINS ( \ 2490 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2491 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2492 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2493 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2494 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2495 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2496 BIT_ULL(POWER_DOMAIN_INIT)) 2497 2498 #define VLV_DISPLAY_POWER_DOMAINS ( \ 2499 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2500 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2501 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2502 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2503 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2504 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2505 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2506 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2507 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2508 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2509 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2510 BIT_ULL(POWER_DOMAIN_VGA) | \ 2511 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2512 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2513 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2514 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2515 BIT_ULL(POWER_DOMAIN_INIT)) 2516 2517 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2518 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2519 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2520 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2521 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2522 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2523 BIT_ULL(POWER_DOMAIN_INIT)) 2524 2525 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 2526 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2527 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2528 BIT_ULL(POWER_DOMAIN_INIT)) 2529 2530 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 2531 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2532 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2533 BIT_ULL(POWER_DOMAIN_INIT)) 2534 2535 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 2536 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2537 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2538 BIT_ULL(POWER_DOMAIN_INIT)) 2539 2540 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 2541 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2542 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2543 BIT_ULL(POWER_DOMAIN_INIT)) 2544 2545 #define CHV_DISPLAY_POWER_DOMAINS ( \ 2546 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2547 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2548 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2549 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2550 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2551 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2552 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2553 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2554 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2555 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2556 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2557 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2558 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2559 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2560 BIT_ULL(POWER_DOMAIN_VGA) | \ 2561 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2562 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2563 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2564 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2565 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2566 BIT_ULL(POWER_DOMAIN_INIT)) 2567 2568 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2569 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2570 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2571 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2572 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2573 BIT_ULL(POWER_DOMAIN_INIT)) 2574 2575 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 2576 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2577 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2578 BIT_ULL(POWER_DOMAIN_INIT)) 2579 2580 #define HSW_DISPLAY_POWER_DOMAINS ( \ 2581 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2582 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2583 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2584 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2585 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2586 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2587 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2588 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2589 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2590 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2591 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2592 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2593 BIT_ULL(POWER_DOMAIN_VGA) | \ 2594 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2595 BIT_ULL(POWER_DOMAIN_INIT)) 2596 2597 #define BDW_DISPLAY_POWER_DOMAINS ( \ 2598 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2599 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2600 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2601 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2602 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2603 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2604 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2605 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2606 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2607 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2608 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2609 BIT_ULL(POWER_DOMAIN_VGA) | \ 2610 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2611 BIT_ULL(POWER_DOMAIN_INIT)) 2612 2613 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2614 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2615 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2616 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2617 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2618 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2619 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2620 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2621 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2622 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2623 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2624 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2625 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2626 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2627 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2628 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2629 BIT_ULL(POWER_DOMAIN_VGA) | \ 2630 BIT_ULL(POWER_DOMAIN_INIT)) 2631 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ 2632 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2633 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ 2634 BIT_ULL(POWER_DOMAIN_INIT)) 2635 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2636 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2637 BIT_ULL(POWER_DOMAIN_INIT)) 2638 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2639 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2640 BIT_ULL(POWER_DOMAIN_INIT)) 2641 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ 2642 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2643 BIT_ULL(POWER_DOMAIN_INIT)) 2644 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2645 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2646 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2647 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2648 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2649 BIT_ULL(POWER_DOMAIN_INIT)) 2650 2651 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2652 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2653 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2654 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2655 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2656 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2657 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2658 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2659 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2660 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2661 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2662 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2663 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2664 BIT_ULL(POWER_DOMAIN_VGA) | \ 2665 BIT_ULL(POWER_DOMAIN_INIT)) 2666 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2667 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2668 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2669 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2670 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2671 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2672 BIT_ULL(POWER_DOMAIN_INIT)) 2673 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 2674 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2675 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2676 BIT_ULL(POWER_DOMAIN_INIT)) 2677 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 2678 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2679 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2680 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2681 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2682 BIT_ULL(POWER_DOMAIN_INIT)) 2683 2684 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2685 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2686 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2687 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2688 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2689 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2690 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2691 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2692 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2693 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2694 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2695 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2696 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2697 BIT_ULL(POWER_DOMAIN_VGA) | \ 2698 BIT_ULL(POWER_DOMAIN_INIT)) 2699 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ 2700 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2701 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2702 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2703 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2704 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2705 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ 2706 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2707 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2708 BIT_ULL(POWER_DOMAIN_INIT)) 2709 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ 2710 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2711 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2712 BIT_ULL(POWER_DOMAIN_INIT)) 2713 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ 2714 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2715 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2716 BIT_ULL(POWER_DOMAIN_INIT)) 2717 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2718 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2719 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2720 BIT_ULL(POWER_DOMAIN_INIT)) 2721 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2722 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2723 BIT_ULL(POWER_DOMAIN_INIT)) 2724 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2725 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2726 BIT_ULL(POWER_DOMAIN_INIT)) 2727 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2728 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2729 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2730 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2731 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2732 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2733 BIT_ULL(POWER_DOMAIN_INIT)) 2734 2735 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2736 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2737 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2738 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2739 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2740 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2741 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2742 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2743 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2744 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2745 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2746 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2747 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2748 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2749 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2750 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2751 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2752 BIT_ULL(POWER_DOMAIN_VGA) | \ 2753 BIT_ULL(POWER_DOMAIN_INIT)) 2754 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \ 2755 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2756 BIT_ULL(POWER_DOMAIN_INIT)) 2757 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \ 2758 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2759 BIT_ULL(POWER_DOMAIN_INIT)) 2760 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \ 2761 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2762 BIT_ULL(POWER_DOMAIN_INIT)) 2763 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \ 2764 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2765 BIT_ULL(POWER_DOMAIN_INIT)) 2766 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2767 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2768 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2769 BIT_ULL(POWER_DOMAIN_INIT)) 2770 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2771 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2772 BIT_ULL(POWER_DOMAIN_INIT)) 2773 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2774 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2775 BIT_ULL(POWER_DOMAIN_INIT)) 2776 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \ 2777 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2778 BIT_ULL(POWER_DOMAIN_INIT)) 2779 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \ 2780 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2781 BIT_ULL(POWER_DOMAIN_INIT)) 2782 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \ 2783 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ 2784 BIT_ULL(POWER_DOMAIN_INIT)) 2785 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2786 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2787 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2788 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2789 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2790 BIT_ULL(POWER_DOMAIN_INIT)) 2791 2792 /* 2793 * ICL PW_0/PG_0 domains (HW/DMC control): 2794 * - PCI 2795 * - clocks except port PLL 2796 * - central power except FBC 2797 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers 2798 * ICL PW_1/PG_1 domains (HW/DMC control): 2799 * - DBUF function 2800 * - PIPE_A and its planes, except VGA 2801 * - transcoder EDP + PSR 2802 * - transcoder DSI 2803 * - DDI_A 2804 * - FBC 2805 */ 2806 #define ICL_PW_4_POWER_DOMAINS ( \ 2807 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2808 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2809 BIT_ULL(POWER_DOMAIN_INIT)) 2810 /* VDSC/joining */ 2811 #define ICL_PW_3_POWER_DOMAINS ( \ 2812 ICL_PW_4_POWER_DOMAINS | \ 2813 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2814 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2815 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2816 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2817 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2818 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2819 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2820 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2821 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2822 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2823 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2824 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2825 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2826 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2827 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2828 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ 2829 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2830 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2831 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2832 BIT_ULL(POWER_DOMAIN_VGA) | \ 2833 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2834 BIT_ULL(POWER_DOMAIN_INIT)) 2835 /* 2836 * - transcoder WD 2837 * - KVMR (HW control) 2838 */ 2839 #define ICL_PW_2_POWER_DOMAINS ( \ 2840 ICL_PW_3_POWER_DOMAINS | \ 2841 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2842 BIT_ULL(POWER_DOMAIN_INIT)) 2843 /* 2844 * - KVMR (HW control) 2845 */ 2846 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2847 ICL_PW_2_POWER_DOMAINS | \ 2848 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2849 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2850 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \ 2851 BIT_ULL(POWER_DOMAIN_INIT)) 2852 2853 #define ICL_DDI_IO_A_POWER_DOMAINS ( \ 2854 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2855 #define ICL_DDI_IO_B_POWER_DOMAINS ( \ 2856 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2857 #define ICL_DDI_IO_C_POWER_DOMAINS ( \ 2858 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2859 #define ICL_DDI_IO_D_POWER_DOMAINS ( \ 2860 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2861 #define ICL_DDI_IO_E_POWER_DOMAINS ( \ 2862 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2863 #define ICL_DDI_IO_F_POWER_DOMAINS ( \ 2864 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2865 2866 #define ICL_AUX_A_IO_POWER_DOMAINS ( \ 2867 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2868 BIT_ULL(POWER_DOMAIN_AUX_A)) 2869 #define ICL_AUX_B_IO_POWER_DOMAINS ( \ 2870 BIT_ULL(POWER_DOMAIN_AUX_B)) 2871 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ 2872 BIT_ULL(POWER_DOMAIN_AUX_C)) 2873 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ 2874 BIT_ULL(POWER_DOMAIN_AUX_D)) 2875 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ 2876 BIT_ULL(POWER_DOMAIN_AUX_E)) 2877 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ 2878 BIT_ULL(POWER_DOMAIN_AUX_F)) 2879 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ 2880 BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) 2881 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ 2882 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2883 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ 2884 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2885 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ 2886 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2887 2888 #define TGL_PW_5_POWER_DOMAINS ( \ 2889 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 2890 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 2891 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 2892 BIT_ULL(POWER_DOMAIN_INIT)) 2893 2894 #define TGL_PW_4_POWER_DOMAINS ( \ 2895 TGL_PW_5_POWER_DOMAINS | \ 2896 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2897 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2898 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2899 BIT_ULL(POWER_DOMAIN_INIT)) 2900 2901 #define TGL_PW_3_POWER_DOMAINS ( \ 2902 TGL_PW_4_POWER_DOMAINS | \ 2903 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2904 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2905 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2906 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 2907 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 2908 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ 2909 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ 2910 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \ 2911 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \ 2912 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 2913 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 2914 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 2915 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 2916 BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ 2917 BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ 2918 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 2919 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 2920 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 2921 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 2922 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ 2923 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ 2924 BIT_ULL(POWER_DOMAIN_VGA) | \ 2925 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2926 BIT_ULL(POWER_DOMAIN_INIT)) 2927 2928 #define TGL_PW_2_POWER_DOMAINS ( \ 2929 TGL_PW_3_POWER_DOMAINS | \ 2930 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2931 BIT_ULL(POWER_DOMAIN_INIT)) 2932 2933 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2934 TGL_PW_3_POWER_DOMAINS | \ 2935 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2936 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2937 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2938 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2939 BIT_ULL(POWER_DOMAIN_INIT)) 2940 2941 #define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) 2942 #define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) 2943 #define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) 2944 #define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) 2945 #define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5) 2946 #define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6) 2947 2948 #define TGL_AUX_A_IO_POWER_DOMAINS ( \ 2949 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2950 BIT_ULL(POWER_DOMAIN_AUX_A)) 2951 #define TGL_AUX_B_IO_POWER_DOMAINS ( \ 2952 BIT_ULL(POWER_DOMAIN_AUX_B)) 2953 #define TGL_AUX_C_IO_POWER_DOMAINS ( \ 2954 BIT_ULL(POWER_DOMAIN_AUX_C)) 2955 2956 #define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) 2957 #define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) 2958 #define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) 2959 #define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) 2960 #define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5) 2961 #define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6) 2962 2963 #define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) 2964 #define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) 2965 #define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) 2966 #define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) 2967 #define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5) 2968 #define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6) 2969 2970 #define TGL_TC_COLD_OFF_POWER_DOMAINS ( \ 2971 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 2972 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 2973 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 2974 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 2975 BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ 2976 BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ 2977 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 2978 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 2979 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 2980 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 2981 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ 2982 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ 2983 BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) 2984 2985 #define RKL_PW_4_POWER_DOMAINS ( \ 2986 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2987 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2988 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2989 BIT_ULL(POWER_DOMAIN_INIT)) 2990 2991 #define RKL_PW_3_POWER_DOMAINS ( \ 2992 RKL_PW_4_POWER_DOMAINS | \ 2993 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2994 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2995 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2996 BIT_ULL(POWER_DOMAIN_VGA) | \ 2997 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2998 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 2999 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 3000 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3001 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3002 BIT_ULL(POWER_DOMAIN_INIT)) 3003 3004 /* 3005 * There is no PW_2/PG_2 on RKL. 3006 * 3007 * RKL PW_1/PG_1 domains (under HW/DMC control): 3008 * - DBUF function (note: registers are in PW0) 3009 * - PIPE_A and its planes and VDSC/joining, except VGA 3010 * - transcoder A 3011 * - DDI_A and DDI_B 3012 * - FBC 3013 * 3014 * RKL PW_0/PG_0 domains (under HW/DMC control): 3015 * - PCI 3016 * - clocks except port PLL 3017 * - shared functions: 3018 * * interrupts except pipe interrupts 3019 * * MBus except PIPE_MBUS_DBOX_CTL 3020 * * DBUF registers 3021 * - central power except FBC 3022 * - top-level GTC (DDI-level GTC is in the well associated with the DDI) 3023 */ 3024 3025 #define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 3026 RKL_PW_3_POWER_DOMAINS | \ 3027 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3028 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3029 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3030 BIT_ULL(POWER_DOMAIN_INIT)) 3031 3032 /* 3033 * XE_LPD Power Domains 3034 * 3035 * Previous platforms required that PG(n-1) be enabled before PG(n). That 3036 * dependency chain turns into a dependency tree on XE_LPD: 3037 * 3038 * PG0 3039 * | 3040 * --PG1-- 3041 * / \ 3042 * PGA --PG2-- 3043 * / | \ 3044 * PGB PGC PGD 3045 * 3046 * Power wells must be enabled from top to bottom and disabled from bottom 3047 * to top. This allows pipes to be power gated independently. 3048 */ 3049 3050 #define XELPD_PW_D_POWER_DOMAINS ( \ 3051 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 3052 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 3053 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 3054 BIT_ULL(POWER_DOMAIN_INIT)) 3055 3056 #define XELPD_PW_C_POWER_DOMAINS ( \ 3057 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 3058 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 3059 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 3060 BIT_ULL(POWER_DOMAIN_INIT)) 3061 3062 #define XELPD_PW_B_POWER_DOMAINS ( \ 3063 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 3064 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 3065 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 3066 BIT_ULL(POWER_DOMAIN_INIT)) 3067 3068 #define XELPD_PW_A_POWER_DOMAINS ( \ 3069 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 3070 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 3071 BIT_ULL(POWER_DOMAIN_INIT)) 3072 3073 #define XELPD_PW_2_POWER_DOMAINS ( \ 3074 XELPD_PW_B_POWER_DOMAINS | \ 3075 XELPD_PW_C_POWER_DOMAINS | \ 3076 XELPD_PW_D_POWER_DOMAINS | \ 3077 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 3078 BIT_ULL(POWER_DOMAIN_VGA) | \ 3079 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 3080 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) | \ 3081 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) | \ 3082 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 3083 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 3084 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ 3085 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ 3086 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 3087 BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) | \ 3088 BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) | \ 3089 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3090 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3091 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 3092 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 3093 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 3094 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 3095 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 3096 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 3097 BIT_ULL(POWER_DOMAIN_INIT)) 3098 3099 /* 3100 * XELPD PW_1/PG_1 domains (under HW/DMC control): 3101 * - DBUF function (registers are in PW0) 3102 * - Transcoder A 3103 * - DDI_A and DDI_B 3104 * 3105 * XELPD PW_0/PW_1 domains (under HW/DMC control): 3106 * - PCI 3107 * - Clocks except port PLL 3108 * - Shared functions: 3109 * * interrupts except pipe interrupts 3110 * * MBus except PIPE_MBUS_DBOX_CTL 3111 * * DBUF registers 3112 * - Central power except FBC 3113 * - Top-level GTC (DDI-level GTC is in the well associated with the DDI) 3114 */ 3115 3116 #define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 3117 XELPD_PW_2_POWER_DOMAINS | \ 3118 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3119 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3120 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3121 BIT_ULL(POWER_DOMAIN_INIT)) 3122 3123 #define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) 3124 #define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) 3125 #define XELPD_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) 3126 #define XELPD_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) 3127 #define XELPD_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) 3128 #define XELPD_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) 3129 3130 #define XELPD_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) 3131 #define XELPD_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) 3132 #define XELPD_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) 3133 #define XELPD_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) 3134 3135 #define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD) 3136 #define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD) 3137 #define XELPD_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) 3138 #define XELPD_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) 3139 #define XELPD_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) 3140 #define XELPD_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) 3141 3142 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 3143 .sync_hw = i9xx_power_well_sync_hw_noop, 3144 .enable = i9xx_always_on_power_well_noop, 3145 .disable = i9xx_always_on_power_well_noop, 3146 .is_enabled = i9xx_always_on_power_well_enabled, 3147 }; 3148 3149 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 3150 .sync_hw = chv_pipe_power_well_sync_hw, 3151 .enable = chv_pipe_power_well_enable, 3152 .disable = chv_pipe_power_well_disable, 3153 .is_enabled = chv_pipe_power_well_enabled, 3154 }; 3155 3156 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 3157 .sync_hw = i9xx_power_well_sync_hw_noop, 3158 .enable = chv_dpio_cmn_power_well_enable, 3159 .disable = chv_dpio_cmn_power_well_disable, 3160 .is_enabled = vlv_power_well_enabled, 3161 }; 3162 3163 static const struct i915_power_well_desc i9xx_always_on_power_well[] = { 3164 { 3165 .name = "always-on", 3166 .always_on = true, 3167 .domains = POWER_DOMAIN_MASK, 3168 .ops = &i9xx_always_on_power_well_ops, 3169 .id = DISP_PW_ID_NONE, 3170 }, 3171 }; 3172 3173 static const struct i915_power_well_ops i830_pipes_power_well_ops = { 3174 .sync_hw = i830_pipes_power_well_sync_hw, 3175 .enable = i830_pipes_power_well_enable, 3176 .disable = i830_pipes_power_well_disable, 3177 .is_enabled = i830_pipes_power_well_enabled, 3178 }; 3179 3180 static const struct i915_power_well_desc i830_power_wells[] = { 3181 { 3182 .name = "always-on", 3183 .always_on = true, 3184 .domains = POWER_DOMAIN_MASK, 3185 .ops = &i9xx_always_on_power_well_ops, 3186 .id = DISP_PW_ID_NONE, 3187 }, 3188 { 3189 .name = "pipes", 3190 .domains = I830_PIPES_POWER_DOMAINS, 3191 .ops = &i830_pipes_power_well_ops, 3192 .id = DISP_PW_ID_NONE, 3193 }, 3194 }; 3195 3196 static const struct i915_power_well_ops hsw_power_well_ops = { 3197 .sync_hw = hsw_power_well_sync_hw, 3198 .enable = hsw_power_well_enable, 3199 .disable = hsw_power_well_disable, 3200 .is_enabled = hsw_power_well_enabled, 3201 }; 3202 3203 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 3204 .sync_hw = i9xx_power_well_sync_hw_noop, 3205 .enable = gen9_dc_off_power_well_enable, 3206 .disable = gen9_dc_off_power_well_disable, 3207 .is_enabled = gen9_dc_off_power_well_enabled, 3208 }; 3209 3210 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 3211 .sync_hw = i9xx_power_well_sync_hw_noop, 3212 .enable = bxt_dpio_cmn_power_well_enable, 3213 .disable = bxt_dpio_cmn_power_well_disable, 3214 .is_enabled = bxt_dpio_cmn_power_well_enabled, 3215 }; 3216 3217 static const struct i915_power_well_regs hsw_power_well_regs = { 3218 .bios = HSW_PWR_WELL_CTL1, 3219 .driver = HSW_PWR_WELL_CTL2, 3220 .kvmr = HSW_PWR_WELL_CTL3, 3221 .debug = HSW_PWR_WELL_CTL4, 3222 }; 3223 3224 static const struct i915_power_well_desc hsw_power_wells[] = { 3225 { 3226 .name = "always-on", 3227 .always_on = true, 3228 .domains = POWER_DOMAIN_MASK, 3229 .ops = &i9xx_always_on_power_well_ops, 3230 .id = DISP_PW_ID_NONE, 3231 }, 3232 { 3233 .name = "display", 3234 .domains = HSW_DISPLAY_POWER_DOMAINS, 3235 .ops = &hsw_power_well_ops, 3236 .id = HSW_DISP_PW_GLOBAL, 3237 { 3238 .hsw.regs = &hsw_power_well_regs, 3239 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3240 .hsw.has_vga = true, 3241 }, 3242 }, 3243 }; 3244 3245 static const struct i915_power_well_desc bdw_power_wells[] = { 3246 { 3247 .name = "always-on", 3248 .always_on = true, 3249 .domains = POWER_DOMAIN_MASK, 3250 .ops = &i9xx_always_on_power_well_ops, 3251 .id = DISP_PW_ID_NONE, 3252 }, 3253 { 3254 .name = "display", 3255 .domains = BDW_DISPLAY_POWER_DOMAINS, 3256 .ops = &hsw_power_well_ops, 3257 .id = HSW_DISP_PW_GLOBAL, 3258 { 3259 .hsw.regs = &hsw_power_well_regs, 3260 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3261 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3262 .hsw.has_vga = true, 3263 }, 3264 }, 3265 }; 3266 3267 static const struct i915_power_well_ops vlv_display_power_well_ops = { 3268 .sync_hw = i9xx_power_well_sync_hw_noop, 3269 .enable = vlv_display_power_well_enable, 3270 .disable = vlv_display_power_well_disable, 3271 .is_enabled = vlv_power_well_enabled, 3272 }; 3273 3274 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 3275 .sync_hw = i9xx_power_well_sync_hw_noop, 3276 .enable = vlv_dpio_cmn_power_well_enable, 3277 .disable = vlv_dpio_cmn_power_well_disable, 3278 .is_enabled = vlv_power_well_enabled, 3279 }; 3280 3281 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 3282 .sync_hw = i9xx_power_well_sync_hw_noop, 3283 .enable = vlv_power_well_enable, 3284 .disable = vlv_power_well_disable, 3285 .is_enabled = vlv_power_well_enabled, 3286 }; 3287 3288 static const struct i915_power_well_desc vlv_power_wells[] = { 3289 { 3290 .name = "always-on", 3291 .always_on = true, 3292 .domains = POWER_DOMAIN_MASK, 3293 .ops = &i9xx_always_on_power_well_ops, 3294 .id = DISP_PW_ID_NONE, 3295 }, 3296 { 3297 .name = "display", 3298 .domains = VLV_DISPLAY_POWER_DOMAINS, 3299 .ops = &vlv_display_power_well_ops, 3300 .id = VLV_DISP_PW_DISP2D, 3301 { 3302 .vlv.idx = PUNIT_PWGT_IDX_DISP2D, 3303 }, 3304 }, 3305 { 3306 .name = "dpio-tx-b-01", 3307 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3308 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3309 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3310 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3311 .ops = &vlv_dpio_power_well_ops, 3312 .id = DISP_PW_ID_NONE, 3313 { 3314 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, 3315 }, 3316 }, 3317 { 3318 .name = "dpio-tx-b-23", 3319 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3320 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3321 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3322 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3323 .ops = &vlv_dpio_power_well_ops, 3324 .id = DISP_PW_ID_NONE, 3325 { 3326 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, 3327 }, 3328 }, 3329 { 3330 .name = "dpio-tx-c-01", 3331 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3332 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3333 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3334 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3335 .ops = &vlv_dpio_power_well_ops, 3336 .id = DISP_PW_ID_NONE, 3337 { 3338 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, 3339 }, 3340 }, 3341 { 3342 .name = "dpio-tx-c-23", 3343 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3344 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3345 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3346 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3347 .ops = &vlv_dpio_power_well_ops, 3348 .id = DISP_PW_ID_NONE, 3349 { 3350 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, 3351 }, 3352 }, 3353 { 3354 .name = "dpio-common", 3355 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 3356 .ops = &vlv_dpio_cmn_power_well_ops, 3357 .id = VLV_DISP_PW_DPIO_CMN_BC, 3358 { 3359 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3360 }, 3361 }, 3362 }; 3363 3364 static const struct i915_power_well_desc chv_power_wells[] = { 3365 { 3366 .name = "always-on", 3367 .always_on = true, 3368 .domains = POWER_DOMAIN_MASK, 3369 .ops = &i9xx_always_on_power_well_ops, 3370 .id = DISP_PW_ID_NONE, 3371 }, 3372 { 3373 .name = "display", 3374 /* 3375 * Pipe A power well is the new disp2d well. Pipe B and C 3376 * power wells don't actually exist. Pipe A power well is 3377 * required for any pipe to work. 3378 */ 3379 .domains = CHV_DISPLAY_POWER_DOMAINS, 3380 .ops = &chv_pipe_power_well_ops, 3381 .id = DISP_PW_ID_NONE, 3382 }, 3383 { 3384 .name = "dpio-common-bc", 3385 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 3386 .ops = &chv_dpio_cmn_power_well_ops, 3387 .id = VLV_DISP_PW_DPIO_CMN_BC, 3388 { 3389 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3390 }, 3391 }, 3392 { 3393 .name = "dpio-common-d", 3394 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 3395 .ops = &chv_dpio_cmn_power_well_ops, 3396 .id = CHV_DISP_PW_DPIO_CMN_D, 3397 { 3398 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, 3399 }, 3400 }, 3401 }; 3402 3403 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 3404 enum i915_power_well_id power_well_id) 3405 { 3406 struct i915_power_well *power_well; 3407 bool ret; 3408 3409 power_well = lookup_power_well(dev_priv, power_well_id); 3410 ret = power_well->desc->ops->is_enabled(dev_priv, power_well); 3411 3412 return ret; 3413 } 3414 3415 static const struct i915_power_well_desc skl_power_wells[] = { 3416 { 3417 .name = "always-on", 3418 .always_on = true, 3419 .domains = POWER_DOMAIN_MASK, 3420 .ops = &i9xx_always_on_power_well_ops, 3421 .id = DISP_PW_ID_NONE, 3422 }, 3423 { 3424 .name = "power well 1", 3425 /* Handled by the DMC firmware */ 3426 .always_on = true, 3427 .domains = 0, 3428 .ops = &hsw_power_well_ops, 3429 .id = SKL_DISP_PW_1, 3430 { 3431 .hsw.regs = &hsw_power_well_regs, 3432 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3433 .hsw.has_fuses = true, 3434 }, 3435 }, 3436 { 3437 .name = "MISC IO power well", 3438 /* Handled by the DMC firmware */ 3439 .always_on = true, 3440 .domains = 0, 3441 .ops = &hsw_power_well_ops, 3442 .id = SKL_DISP_PW_MISC_IO, 3443 { 3444 .hsw.regs = &hsw_power_well_regs, 3445 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, 3446 }, 3447 }, 3448 { 3449 .name = "DC off", 3450 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 3451 .ops = &gen9_dc_off_power_well_ops, 3452 .id = SKL_DISP_DC_OFF, 3453 }, 3454 { 3455 .name = "power well 2", 3456 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3457 .ops = &hsw_power_well_ops, 3458 .id = SKL_DISP_PW_2, 3459 { 3460 .hsw.regs = &hsw_power_well_regs, 3461 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3462 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3463 .hsw.has_vga = true, 3464 .hsw.has_fuses = true, 3465 }, 3466 }, 3467 { 3468 .name = "DDI A/E IO power well", 3469 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, 3470 .ops = &hsw_power_well_ops, 3471 .id = DISP_PW_ID_NONE, 3472 { 3473 .hsw.regs = &hsw_power_well_regs, 3474 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, 3475 }, 3476 }, 3477 { 3478 .name = "DDI B IO power well", 3479 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3480 .ops = &hsw_power_well_ops, 3481 .id = DISP_PW_ID_NONE, 3482 { 3483 .hsw.regs = &hsw_power_well_regs, 3484 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3485 }, 3486 }, 3487 { 3488 .name = "DDI C IO power well", 3489 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3490 .ops = &hsw_power_well_ops, 3491 .id = DISP_PW_ID_NONE, 3492 { 3493 .hsw.regs = &hsw_power_well_regs, 3494 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3495 }, 3496 }, 3497 { 3498 .name = "DDI D IO power well", 3499 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, 3500 .ops = &hsw_power_well_ops, 3501 .id = DISP_PW_ID_NONE, 3502 { 3503 .hsw.regs = &hsw_power_well_regs, 3504 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3505 }, 3506 }, 3507 }; 3508 3509 static const struct i915_power_well_desc bxt_power_wells[] = { 3510 { 3511 .name = "always-on", 3512 .always_on = true, 3513 .domains = POWER_DOMAIN_MASK, 3514 .ops = &i9xx_always_on_power_well_ops, 3515 .id = DISP_PW_ID_NONE, 3516 }, 3517 { 3518 .name = "power well 1", 3519 /* Handled by the DMC firmware */ 3520 .always_on = true, 3521 .domains = 0, 3522 .ops = &hsw_power_well_ops, 3523 .id = SKL_DISP_PW_1, 3524 { 3525 .hsw.regs = &hsw_power_well_regs, 3526 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3527 .hsw.has_fuses = true, 3528 }, 3529 }, 3530 { 3531 .name = "DC off", 3532 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 3533 .ops = &gen9_dc_off_power_well_ops, 3534 .id = SKL_DISP_DC_OFF, 3535 }, 3536 { 3537 .name = "power well 2", 3538 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3539 .ops = &hsw_power_well_ops, 3540 .id = SKL_DISP_PW_2, 3541 { 3542 .hsw.regs = &hsw_power_well_regs, 3543 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3544 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3545 .hsw.has_vga = true, 3546 .hsw.has_fuses = true, 3547 }, 3548 }, 3549 { 3550 .name = "dpio-common-a", 3551 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 3552 .ops = &bxt_dpio_cmn_power_well_ops, 3553 .id = BXT_DISP_PW_DPIO_CMN_A, 3554 { 3555 .bxt.phy = DPIO_PHY1, 3556 }, 3557 }, 3558 { 3559 .name = "dpio-common-bc", 3560 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 3561 .ops = &bxt_dpio_cmn_power_well_ops, 3562 .id = VLV_DISP_PW_DPIO_CMN_BC, 3563 { 3564 .bxt.phy = DPIO_PHY0, 3565 }, 3566 }, 3567 }; 3568 3569 static const struct i915_power_well_desc glk_power_wells[] = { 3570 { 3571 .name = "always-on", 3572 .always_on = true, 3573 .domains = POWER_DOMAIN_MASK, 3574 .ops = &i9xx_always_on_power_well_ops, 3575 .id = DISP_PW_ID_NONE, 3576 }, 3577 { 3578 .name = "power well 1", 3579 /* Handled by the DMC firmware */ 3580 .always_on = true, 3581 .domains = 0, 3582 .ops = &hsw_power_well_ops, 3583 .id = SKL_DISP_PW_1, 3584 { 3585 .hsw.regs = &hsw_power_well_regs, 3586 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3587 .hsw.has_fuses = true, 3588 }, 3589 }, 3590 { 3591 .name = "DC off", 3592 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, 3593 .ops = &gen9_dc_off_power_well_ops, 3594 .id = SKL_DISP_DC_OFF, 3595 }, 3596 { 3597 .name = "power well 2", 3598 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3599 .ops = &hsw_power_well_ops, 3600 .id = SKL_DISP_PW_2, 3601 { 3602 .hsw.regs = &hsw_power_well_regs, 3603 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3604 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3605 .hsw.has_vga = true, 3606 .hsw.has_fuses = true, 3607 }, 3608 }, 3609 { 3610 .name = "dpio-common-a", 3611 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, 3612 .ops = &bxt_dpio_cmn_power_well_ops, 3613 .id = BXT_DISP_PW_DPIO_CMN_A, 3614 { 3615 .bxt.phy = DPIO_PHY1, 3616 }, 3617 }, 3618 { 3619 .name = "dpio-common-b", 3620 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, 3621 .ops = &bxt_dpio_cmn_power_well_ops, 3622 .id = VLV_DISP_PW_DPIO_CMN_BC, 3623 { 3624 .bxt.phy = DPIO_PHY0, 3625 }, 3626 }, 3627 { 3628 .name = "dpio-common-c", 3629 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, 3630 .ops = &bxt_dpio_cmn_power_well_ops, 3631 .id = GLK_DISP_PW_DPIO_CMN_C, 3632 { 3633 .bxt.phy = DPIO_PHY2, 3634 }, 3635 }, 3636 { 3637 .name = "AUX A", 3638 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, 3639 .ops = &hsw_power_well_ops, 3640 .id = DISP_PW_ID_NONE, 3641 { 3642 .hsw.regs = &hsw_power_well_regs, 3643 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3644 }, 3645 }, 3646 { 3647 .name = "AUX B", 3648 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, 3649 .ops = &hsw_power_well_ops, 3650 .id = DISP_PW_ID_NONE, 3651 { 3652 .hsw.regs = &hsw_power_well_regs, 3653 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3654 }, 3655 }, 3656 { 3657 .name = "AUX C", 3658 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, 3659 .ops = &hsw_power_well_ops, 3660 .id = DISP_PW_ID_NONE, 3661 { 3662 .hsw.regs = &hsw_power_well_regs, 3663 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3664 }, 3665 }, 3666 { 3667 .name = "DDI A IO power well", 3668 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, 3669 .ops = &hsw_power_well_ops, 3670 .id = DISP_PW_ID_NONE, 3671 { 3672 .hsw.regs = &hsw_power_well_regs, 3673 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3674 }, 3675 }, 3676 { 3677 .name = "DDI B IO power well", 3678 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3679 .ops = &hsw_power_well_ops, 3680 .id = DISP_PW_ID_NONE, 3681 { 3682 .hsw.regs = &hsw_power_well_regs, 3683 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3684 }, 3685 }, 3686 { 3687 .name = "DDI C IO power well", 3688 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3689 .ops = &hsw_power_well_ops, 3690 .id = DISP_PW_ID_NONE, 3691 { 3692 .hsw.regs = &hsw_power_well_regs, 3693 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3694 }, 3695 }, 3696 }; 3697 3698 static const struct i915_power_well_desc cnl_power_wells[] = { 3699 { 3700 .name = "always-on", 3701 .always_on = true, 3702 .domains = POWER_DOMAIN_MASK, 3703 .ops = &i9xx_always_on_power_well_ops, 3704 .id = DISP_PW_ID_NONE, 3705 }, 3706 { 3707 .name = "power well 1", 3708 /* Handled by the DMC firmware */ 3709 .always_on = true, 3710 .domains = 0, 3711 .ops = &hsw_power_well_ops, 3712 .id = SKL_DISP_PW_1, 3713 { 3714 .hsw.regs = &hsw_power_well_regs, 3715 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3716 .hsw.has_fuses = true, 3717 }, 3718 }, 3719 { 3720 .name = "AUX A", 3721 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS, 3722 .ops = &hsw_power_well_ops, 3723 .id = DISP_PW_ID_NONE, 3724 { 3725 .hsw.regs = &hsw_power_well_regs, 3726 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3727 }, 3728 }, 3729 { 3730 .name = "AUX B", 3731 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS, 3732 .ops = &hsw_power_well_ops, 3733 .id = DISP_PW_ID_NONE, 3734 { 3735 .hsw.regs = &hsw_power_well_regs, 3736 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3737 }, 3738 }, 3739 { 3740 .name = "AUX C", 3741 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS, 3742 .ops = &hsw_power_well_ops, 3743 .id = DISP_PW_ID_NONE, 3744 { 3745 .hsw.regs = &hsw_power_well_regs, 3746 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3747 }, 3748 }, 3749 { 3750 .name = "AUX D", 3751 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS, 3752 .ops = &hsw_power_well_ops, 3753 .id = DISP_PW_ID_NONE, 3754 { 3755 .hsw.regs = &hsw_power_well_regs, 3756 .hsw.idx = CNL_PW_CTL_IDX_AUX_D, 3757 }, 3758 }, 3759 { 3760 .name = "DC off", 3761 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, 3762 .ops = &gen9_dc_off_power_well_ops, 3763 .id = SKL_DISP_DC_OFF, 3764 }, 3765 { 3766 .name = "power well 2", 3767 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3768 .ops = &hsw_power_well_ops, 3769 .id = SKL_DISP_PW_2, 3770 { 3771 .hsw.regs = &hsw_power_well_regs, 3772 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3773 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3774 .hsw.has_vga = true, 3775 .hsw.has_fuses = true, 3776 }, 3777 }, 3778 { 3779 .name = "DDI A IO power well", 3780 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS, 3781 .ops = &hsw_power_well_ops, 3782 .id = DISP_PW_ID_NONE, 3783 { 3784 .hsw.regs = &hsw_power_well_regs, 3785 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3786 }, 3787 }, 3788 { 3789 .name = "DDI B IO power well", 3790 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS, 3791 .ops = &hsw_power_well_ops, 3792 .id = DISP_PW_ID_NONE, 3793 { 3794 .hsw.regs = &hsw_power_well_regs, 3795 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3796 }, 3797 }, 3798 { 3799 .name = "DDI C IO power well", 3800 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS, 3801 .ops = &hsw_power_well_ops, 3802 .id = DISP_PW_ID_NONE, 3803 { 3804 .hsw.regs = &hsw_power_well_regs, 3805 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3806 }, 3807 }, 3808 { 3809 .name = "DDI D IO power well", 3810 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS, 3811 .ops = &hsw_power_well_ops, 3812 .id = DISP_PW_ID_NONE, 3813 { 3814 .hsw.regs = &hsw_power_well_regs, 3815 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3816 }, 3817 }, 3818 { 3819 .name = "DDI F IO power well", 3820 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS, 3821 .ops = &hsw_power_well_ops, 3822 .id = CNL_DISP_PW_DDI_F_IO, 3823 { 3824 .hsw.regs = &hsw_power_well_regs, 3825 .hsw.idx = CNL_PW_CTL_IDX_DDI_F, 3826 }, 3827 }, 3828 { 3829 .name = "AUX F", 3830 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS, 3831 .ops = &hsw_power_well_ops, 3832 .id = CNL_DISP_PW_DDI_F_AUX, 3833 { 3834 .hsw.regs = &hsw_power_well_regs, 3835 .hsw.idx = CNL_PW_CTL_IDX_AUX_F, 3836 }, 3837 }, 3838 }; 3839 3840 static const struct i915_power_well_ops icl_aux_power_well_ops = { 3841 .sync_hw = hsw_power_well_sync_hw, 3842 .enable = icl_aux_power_well_enable, 3843 .disable = icl_aux_power_well_disable, 3844 .is_enabled = hsw_power_well_enabled, 3845 }; 3846 3847 static const struct i915_power_well_regs icl_aux_power_well_regs = { 3848 .bios = ICL_PWR_WELL_CTL_AUX1, 3849 .driver = ICL_PWR_WELL_CTL_AUX2, 3850 .debug = ICL_PWR_WELL_CTL_AUX4, 3851 }; 3852 3853 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 3854 .bios = ICL_PWR_WELL_CTL_DDI1, 3855 .driver = ICL_PWR_WELL_CTL_DDI2, 3856 .debug = ICL_PWR_WELL_CTL_DDI4, 3857 }; 3858 3859 static const struct i915_power_well_desc icl_power_wells[] = { 3860 { 3861 .name = "always-on", 3862 .always_on = true, 3863 .domains = POWER_DOMAIN_MASK, 3864 .ops = &i9xx_always_on_power_well_ops, 3865 .id = DISP_PW_ID_NONE, 3866 }, 3867 { 3868 .name = "power well 1", 3869 /* Handled by the DMC firmware */ 3870 .always_on = true, 3871 .domains = 0, 3872 .ops = &hsw_power_well_ops, 3873 .id = SKL_DISP_PW_1, 3874 { 3875 .hsw.regs = &hsw_power_well_regs, 3876 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3877 .hsw.has_fuses = true, 3878 }, 3879 }, 3880 { 3881 .name = "DC off", 3882 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, 3883 .ops = &gen9_dc_off_power_well_ops, 3884 .id = SKL_DISP_DC_OFF, 3885 }, 3886 { 3887 .name = "power well 2", 3888 .domains = ICL_PW_2_POWER_DOMAINS, 3889 .ops = &hsw_power_well_ops, 3890 .id = SKL_DISP_PW_2, 3891 { 3892 .hsw.regs = &hsw_power_well_regs, 3893 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3894 .hsw.has_fuses = true, 3895 }, 3896 }, 3897 { 3898 .name = "power well 3", 3899 .domains = ICL_PW_3_POWER_DOMAINS, 3900 .ops = &hsw_power_well_ops, 3901 .id = ICL_DISP_PW_3, 3902 { 3903 .hsw.regs = &hsw_power_well_regs, 3904 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3905 .hsw.irq_pipe_mask = BIT(PIPE_B), 3906 .hsw.has_vga = true, 3907 .hsw.has_fuses = true, 3908 }, 3909 }, 3910 { 3911 .name = "DDI A IO", 3912 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3913 .ops = &hsw_power_well_ops, 3914 .id = DISP_PW_ID_NONE, 3915 { 3916 .hsw.regs = &icl_ddi_power_well_regs, 3917 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3918 }, 3919 }, 3920 { 3921 .name = "DDI B IO", 3922 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3923 .ops = &hsw_power_well_ops, 3924 .id = DISP_PW_ID_NONE, 3925 { 3926 .hsw.regs = &icl_ddi_power_well_regs, 3927 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3928 }, 3929 }, 3930 { 3931 .name = "DDI C IO", 3932 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3933 .ops = &hsw_power_well_ops, 3934 .id = DISP_PW_ID_NONE, 3935 { 3936 .hsw.regs = &icl_ddi_power_well_regs, 3937 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3938 }, 3939 }, 3940 { 3941 .name = "DDI D IO", 3942 .domains = ICL_DDI_IO_D_POWER_DOMAINS, 3943 .ops = &hsw_power_well_ops, 3944 .id = DISP_PW_ID_NONE, 3945 { 3946 .hsw.regs = &icl_ddi_power_well_regs, 3947 .hsw.idx = ICL_PW_CTL_IDX_DDI_D, 3948 }, 3949 }, 3950 { 3951 .name = "DDI E IO", 3952 .domains = ICL_DDI_IO_E_POWER_DOMAINS, 3953 .ops = &hsw_power_well_ops, 3954 .id = DISP_PW_ID_NONE, 3955 { 3956 .hsw.regs = &icl_ddi_power_well_regs, 3957 .hsw.idx = ICL_PW_CTL_IDX_DDI_E, 3958 }, 3959 }, 3960 { 3961 .name = "DDI F IO", 3962 .domains = ICL_DDI_IO_F_POWER_DOMAINS, 3963 .ops = &hsw_power_well_ops, 3964 .id = DISP_PW_ID_NONE, 3965 { 3966 .hsw.regs = &icl_ddi_power_well_regs, 3967 .hsw.idx = ICL_PW_CTL_IDX_DDI_F, 3968 }, 3969 }, 3970 { 3971 .name = "AUX A", 3972 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 3973 .ops = &icl_aux_power_well_ops, 3974 .id = DISP_PW_ID_NONE, 3975 { 3976 .hsw.regs = &icl_aux_power_well_regs, 3977 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3978 }, 3979 }, 3980 { 3981 .name = "AUX B", 3982 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 3983 .ops = &icl_aux_power_well_ops, 3984 .id = DISP_PW_ID_NONE, 3985 { 3986 .hsw.regs = &icl_aux_power_well_regs, 3987 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3988 }, 3989 }, 3990 { 3991 .name = "AUX C TC1", 3992 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, 3993 .ops = &icl_aux_power_well_ops, 3994 .id = DISP_PW_ID_NONE, 3995 { 3996 .hsw.regs = &icl_aux_power_well_regs, 3997 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3998 .hsw.is_tc_tbt = false, 3999 }, 4000 }, 4001 { 4002 .name = "AUX D TC2", 4003 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, 4004 .ops = &icl_aux_power_well_ops, 4005 .id = DISP_PW_ID_NONE, 4006 { 4007 .hsw.regs = &icl_aux_power_well_regs, 4008 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 4009 .hsw.is_tc_tbt = false, 4010 }, 4011 }, 4012 { 4013 .name = "AUX E TC3", 4014 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, 4015 .ops = &icl_aux_power_well_ops, 4016 .id = DISP_PW_ID_NONE, 4017 { 4018 .hsw.regs = &icl_aux_power_well_regs, 4019 .hsw.idx = ICL_PW_CTL_IDX_AUX_E, 4020 .hsw.is_tc_tbt = false, 4021 }, 4022 }, 4023 { 4024 .name = "AUX F TC4", 4025 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, 4026 .ops = &icl_aux_power_well_ops, 4027 .id = DISP_PW_ID_NONE, 4028 { 4029 .hsw.regs = &icl_aux_power_well_regs, 4030 .hsw.idx = ICL_PW_CTL_IDX_AUX_F, 4031 .hsw.is_tc_tbt = false, 4032 }, 4033 }, 4034 { 4035 .name = "AUX C TBT1", 4036 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, 4037 .ops = &icl_aux_power_well_ops, 4038 .id = DISP_PW_ID_NONE, 4039 { 4040 .hsw.regs = &icl_aux_power_well_regs, 4041 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, 4042 .hsw.is_tc_tbt = true, 4043 }, 4044 }, 4045 { 4046 .name = "AUX D TBT2", 4047 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, 4048 .ops = &icl_aux_power_well_ops, 4049 .id = DISP_PW_ID_NONE, 4050 { 4051 .hsw.regs = &icl_aux_power_well_regs, 4052 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, 4053 .hsw.is_tc_tbt = true, 4054 }, 4055 }, 4056 { 4057 .name = "AUX E TBT3", 4058 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, 4059 .ops = &icl_aux_power_well_ops, 4060 .id = DISP_PW_ID_NONE, 4061 { 4062 .hsw.regs = &icl_aux_power_well_regs, 4063 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, 4064 .hsw.is_tc_tbt = true, 4065 }, 4066 }, 4067 { 4068 .name = "AUX F TBT4", 4069 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, 4070 .ops = &icl_aux_power_well_ops, 4071 .id = DISP_PW_ID_NONE, 4072 { 4073 .hsw.regs = &icl_aux_power_well_regs, 4074 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, 4075 .hsw.is_tc_tbt = true, 4076 }, 4077 }, 4078 { 4079 .name = "power well 4", 4080 .domains = ICL_PW_4_POWER_DOMAINS, 4081 .ops = &hsw_power_well_ops, 4082 .id = DISP_PW_ID_NONE, 4083 { 4084 .hsw.regs = &hsw_power_well_regs, 4085 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4086 .hsw.has_fuses = true, 4087 .hsw.irq_pipe_mask = BIT(PIPE_C), 4088 }, 4089 }, 4090 }; 4091 4092 static void 4093 tgl_tc_cold_request(struct drm_i915_private *i915, bool block) 4094 { 4095 u8 tries = 0; 4096 int ret; 4097 4098 while (1) { 4099 u32 low_val; 4100 u32 high_val = 0; 4101 4102 if (block) 4103 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; 4104 else 4105 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; 4106 4107 /* 4108 * Spec states that we should timeout the request after 200us 4109 * but the function below will timeout after 500us 4110 */ 4111 ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, 4112 &high_val); 4113 if (ret == 0) { 4114 if (block && 4115 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) 4116 ret = -EIO; 4117 else 4118 break; 4119 } 4120 4121 if (++tries == 3) 4122 break; 4123 4124 msleep(1); 4125 } 4126 4127 if (ret) 4128 drm_err(&i915->drm, "TC cold %sblock failed\n", 4129 block ? "" : "un"); 4130 else 4131 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", 4132 block ? "" : "un"); 4133 } 4134 4135 static void 4136 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, 4137 struct i915_power_well *power_well) 4138 { 4139 tgl_tc_cold_request(i915, true); 4140 } 4141 4142 static void 4143 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, 4144 struct i915_power_well *power_well) 4145 { 4146 tgl_tc_cold_request(i915, false); 4147 } 4148 4149 static void 4150 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, 4151 struct i915_power_well *power_well) 4152 { 4153 if (power_well->count > 0) 4154 tgl_tc_cold_off_power_well_enable(i915, power_well); 4155 else 4156 tgl_tc_cold_off_power_well_disable(i915, power_well); 4157 } 4158 4159 static bool 4160 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, 4161 struct i915_power_well *power_well) 4162 { 4163 /* 4164 * Not the correctly implementation but there is no way to just read it 4165 * from PCODE, so returning count to avoid state mismatch errors 4166 */ 4167 return power_well->count; 4168 } 4169 4170 static const struct i915_power_well_ops tgl_tc_cold_off_ops = { 4171 .sync_hw = tgl_tc_cold_off_power_well_sync_hw, 4172 .enable = tgl_tc_cold_off_power_well_enable, 4173 .disable = tgl_tc_cold_off_power_well_disable, 4174 .is_enabled = tgl_tc_cold_off_power_well_is_enabled, 4175 }; 4176 4177 static const struct i915_power_well_desc tgl_power_wells[] = { 4178 { 4179 .name = "always-on", 4180 .always_on = true, 4181 .domains = POWER_DOMAIN_MASK, 4182 .ops = &i9xx_always_on_power_well_ops, 4183 .id = DISP_PW_ID_NONE, 4184 }, 4185 { 4186 .name = "power well 1", 4187 /* Handled by the DMC firmware */ 4188 .always_on = true, 4189 .domains = 0, 4190 .ops = &hsw_power_well_ops, 4191 .id = SKL_DISP_PW_1, 4192 { 4193 .hsw.regs = &hsw_power_well_regs, 4194 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4195 .hsw.has_fuses = true, 4196 }, 4197 }, 4198 { 4199 .name = "DC off", 4200 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, 4201 .ops = &gen9_dc_off_power_well_ops, 4202 .id = SKL_DISP_DC_OFF, 4203 }, 4204 { 4205 .name = "power well 2", 4206 .domains = TGL_PW_2_POWER_DOMAINS, 4207 .ops = &hsw_power_well_ops, 4208 .id = SKL_DISP_PW_2, 4209 { 4210 .hsw.regs = &hsw_power_well_regs, 4211 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4212 .hsw.has_fuses = true, 4213 }, 4214 }, 4215 { 4216 .name = "power well 3", 4217 .domains = TGL_PW_3_POWER_DOMAINS, 4218 .ops = &hsw_power_well_ops, 4219 .id = ICL_DISP_PW_3, 4220 { 4221 .hsw.regs = &hsw_power_well_regs, 4222 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4223 .hsw.irq_pipe_mask = BIT(PIPE_B), 4224 .hsw.has_vga = true, 4225 .hsw.has_fuses = true, 4226 }, 4227 }, 4228 { 4229 .name = "DDI A IO", 4230 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4231 .ops = &hsw_power_well_ops, 4232 .id = DISP_PW_ID_NONE, 4233 { 4234 .hsw.regs = &icl_ddi_power_well_regs, 4235 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4236 } 4237 }, 4238 { 4239 .name = "DDI B IO", 4240 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4241 .ops = &hsw_power_well_ops, 4242 .id = DISP_PW_ID_NONE, 4243 { 4244 .hsw.regs = &icl_ddi_power_well_regs, 4245 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4246 } 4247 }, 4248 { 4249 .name = "DDI C IO", 4250 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 4251 .ops = &hsw_power_well_ops, 4252 .id = DISP_PW_ID_NONE, 4253 { 4254 .hsw.regs = &icl_ddi_power_well_regs, 4255 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 4256 } 4257 }, 4258 { 4259 .name = "DDI IO TC1", 4260 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4261 .ops = &hsw_power_well_ops, 4262 .id = DISP_PW_ID_NONE, 4263 { 4264 .hsw.regs = &icl_ddi_power_well_regs, 4265 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4266 }, 4267 }, 4268 { 4269 .name = "DDI IO TC2", 4270 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4271 .ops = &hsw_power_well_ops, 4272 .id = DISP_PW_ID_NONE, 4273 { 4274 .hsw.regs = &icl_ddi_power_well_regs, 4275 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4276 }, 4277 }, 4278 { 4279 .name = "DDI IO TC3", 4280 .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, 4281 .ops = &hsw_power_well_ops, 4282 .id = DISP_PW_ID_NONE, 4283 { 4284 .hsw.regs = &icl_ddi_power_well_regs, 4285 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 4286 }, 4287 }, 4288 { 4289 .name = "DDI IO TC4", 4290 .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, 4291 .ops = &hsw_power_well_ops, 4292 .id = DISP_PW_ID_NONE, 4293 { 4294 .hsw.regs = &icl_ddi_power_well_regs, 4295 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 4296 }, 4297 }, 4298 { 4299 .name = "DDI IO TC5", 4300 .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, 4301 .ops = &hsw_power_well_ops, 4302 .id = DISP_PW_ID_NONE, 4303 { 4304 .hsw.regs = &icl_ddi_power_well_regs, 4305 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, 4306 }, 4307 }, 4308 { 4309 .name = "DDI IO TC6", 4310 .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, 4311 .ops = &hsw_power_well_ops, 4312 .id = DISP_PW_ID_NONE, 4313 { 4314 .hsw.regs = &icl_ddi_power_well_regs, 4315 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, 4316 }, 4317 }, 4318 { 4319 .name = "TC cold off", 4320 .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, 4321 .ops = &tgl_tc_cold_off_ops, 4322 .id = TGL_DISP_PW_TC_COLD_OFF, 4323 }, 4324 { 4325 .name = "AUX A", 4326 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 4327 .ops = &icl_aux_power_well_ops, 4328 .id = DISP_PW_ID_NONE, 4329 { 4330 .hsw.regs = &icl_aux_power_well_regs, 4331 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4332 }, 4333 }, 4334 { 4335 .name = "AUX B", 4336 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 4337 .ops = &icl_aux_power_well_ops, 4338 .id = DISP_PW_ID_NONE, 4339 { 4340 .hsw.regs = &icl_aux_power_well_regs, 4341 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4342 }, 4343 }, 4344 { 4345 .name = "AUX C", 4346 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 4347 .ops = &icl_aux_power_well_ops, 4348 .id = DISP_PW_ID_NONE, 4349 { 4350 .hsw.regs = &icl_aux_power_well_regs, 4351 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4352 }, 4353 }, 4354 { 4355 .name = "AUX USBC1", 4356 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4357 .ops = &icl_aux_power_well_ops, 4358 .id = DISP_PW_ID_NONE, 4359 { 4360 .hsw.regs = &icl_aux_power_well_regs, 4361 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4362 .hsw.is_tc_tbt = false, 4363 }, 4364 }, 4365 { 4366 .name = "AUX USBC2", 4367 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4368 .ops = &icl_aux_power_well_ops, 4369 .id = DISP_PW_ID_NONE, 4370 { 4371 .hsw.regs = &icl_aux_power_well_regs, 4372 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4373 .hsw.is_tc_tbt = false, 4374 }, 4375 }, 4376 { 4377 .name = "AUX USBC3", 4378 .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS, 4379 .ops = &icl_aux_power_well_ops, 4380 .id = DISP_PW_ID_NONE, 4381 { 4382 .hsw.regs = &icl_aux_power_well_regs, 4383 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 4384 .hsw.is_tc_tbt = false, 4385 }, 4386 }, 4387 { 4388 .name = "AUX USBC4", 4389 .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS, 4390 .ops = &icl_aux_power_well_ops, 4391 .id = DISP_PW_ID_NONE, 4392 { 4393 .hsw.regs = &icl_aux_power_well_regs, 4394 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 4395 .hsw.is_tc_tbt = false, 4396 }, 4397 }, 4398 { 4399 .name = "AUX USBC5", 4400 .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS, 4401 .ops = &icl_aux_power_well_ops, 4402 .id = DISP_PW_ID_NONE, 4403 { 4404 .hsw.regs = &icl_aux_power_well_regs, 4405 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, 4406 .hsw.is_tc_tbt = false, 4407 }, 4408 }, 4409 { 4410 .name = "AUX USBC6", 4411 .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS, 4412 .ops = &icl_aux_power_well_ops, 4413 .id = DISP_PW_ID_NONE, 4414 { 4415 .hsw.regs = &icl_aux_power_well_regs, 4416 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, 4417 .hsw.is_tc_tbt = false, 4418 }, 4419 }, 4420 { 4421 .name = "AUX TBT1", 4422 .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS, 4423 .ops = &icl_aux_power_well_ops, 4424 .id = DISP_PW_ID_NONE, 4425 { 4426 .hsw.regs = &icl_aux_power_well_regs, 4427 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 4428 .hsw.is_tc_tbt = true, 4429 }, 4430 }, 4431 { 4432 .name = "AUX TBT2", 4433 .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS, 4434 .ops = &icl_aux_power_well_ops, 4435 .id = DISP_PW_ID_NONE, 4436 { 4437 .hsw.regs = &icl_aux_power_well_regs, 4438 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 4439 .hsw.is_tc_tbt = true, 4440 }, 4441 }, 4442 { 4443 .name = "AUX TBT3", 4444 .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS, 4445 .ops = &icl_aux_power_well_ops, 4446 .id = DISP_PW_ID_NONE, 4447 { 4448 .hsw.regs = &icl_aux_power_well_regs, 4449 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 4450 .hsw.is_tc_tbt = true, 4451 }, 4452 }, 4453 { 4454 .name = "AUX TBT4", 4455 .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS, 4456 .ops = &icl_aux_power_well_ops, 4457 .id = DISP_PW_ID_NONE, 4458 { 4459 .hsw.regs = &icl_aux_power_well_regs, 4460 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 4461 .hsw.is_tc_tbt = true, 4462 }, 4463 }, 4464 { 4465 .name = "AUX TBT5", 4466 .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS, 4467 .ops = &icl_aux_power_well_ops, 4468 .id = DISP_PW_ID_NONE, 4469 { 4470 .hsw.regs = &icl_aux_power_well_regs, 4471 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, 4472 .hsw.is_tc_tbt = true, 4473 }, 4474 }, 4475 { 4476 .name = "AUX TBT6", 4477 .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS, 4478 .ops = &icl_aux_power_well_ops, 4479 .id = DISP_PW_ID_NONE, 4480 { 4481 .hsw.regs = &icl_aux_power_well_regs, 4482 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, 4483 .hsw.is_tc_tbt = true, 4484 }, 4485 }, 4486 { 4487 .name = "power well 4", 4488 .domains = TGL_PW_4_POWER_DOMAINS, 4489 .ops = &hsw_power_well_ops, 4490 .id = DISP_PW_ID_NONE, 4491 { 4492 .hsw.regs = &hsw_power_well_regs, 4493 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4494 .hsw.has_fuses = true, 4495 .hsw.irq_pipe_mask = BIT(PIPE_C), 4496 } 4497 }, 4498 { 4499 .name = "power well 5", 4500 .domains = TGL_PW_5_POWER_DOMAINS, 4501 .ops = &hsw_power_well_ops, 4502 .id = DISP_PW_ID_NONE, 4503 { 4504 .hsw.regs = &hsw_power_well_regs, 4505 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4506 .hsw.has_fuses = true, 4507 .hsw.irq_pipe_mask = BIT(PIPE_D), 4508 }, 4509 }, 4510 }; 4511 4512 static const struct i915_power_well_desc rkl_power_wells[] = { 4513 { 4514 .name = "always-on", 4515 .always_on = true, 4516 .domains = POWER_DOMAIN_MASK, 4517 .ops = &i9xx_always_on_power_well_ops, 4518 .id = DISP_PW_ID_NONE, 4519 }, 4520 { 4521 .name = "power well 1", 4522 /* Handled by the DMC firmware */ 4523 .always_on = true, 4524 .domains = 0, 4525 .ops = &hsw_power_well_ops, 4526 .id = SKL_DISP_PW_1, 4527 { 4528 .hsw.regs = &hsw_power_well_regs, 4529 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4530 .hsw.has_fuses = true, 4531 }, 4532 }, 4533 { 4534 .name = "DC off", 4535 .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS, 4536 .ops = &gen9_dc_off_power_well_ops, 4537 .id = SKL_DISP_DC_OFF, 4538 }, 4539 { 4540 .name = "power well 3", 4541 .domains = RKL_PW_3_POWER_DOMAINS, 4542 .ops = &hsw_power_well_ops, 4543 .id = ICL_DISP_PW_3, 4544 { 4545 .hsw.regs = &hsw_power_well_regs, 4546 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4547 .hsw.irq_pipe_mask = BIT(PIPE_B), 4548 .hsw.has_vga = true, 4549 .hsw.has_fuses = true, 4550 }, 4551 }, 4552 { 4553 .name = "power well 4", 4554 .domains = RKL_PW_4_POWER_DOMAINS, 4555 .ops = &hsw_power_well_ops, 4556 .id = DISP_PW_ID_NONE, 4557 { 4558 .hsw.regs = &hsw_power_well_regs, 4559 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4560 .hsw.has_fuses = true, 4561 .hsw.irq_pipe_mask = BIT(PIPE_C), 4562 } 4563 }, 4564 { 4565 .name = "DDI A IO", 4566 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4567 .ops = &hsw_power_well_ops, 4568 .id = DISP_PW_ID_NONE, 4569 { 4570 .hsw.regs = &icl_ddi_power_well_regs, 4571 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4572 } 4573 }, 4574 { 4575 .name = "DDI B IO", 4576 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4577 .ops = &hsw_power_well_ops, 4578 .id = DISP_PW_ID_NONE, 4579 { 4580 .hsw.regs = &icl_ddi_power_well_regs, 4581 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4582 } 4583 }, 4584 { 4585 .name = "DDI IO TC1", 4586 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4587 .ops = &hsw_power_well_ops, 4588 .id = DISP_PW_ID_NONE, 4589 { 4590 .hsw.regs = &icl_ddi_power_well_regs, 4591 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4592 }, 4593 }, 4594 { 4595 .name = "DDI IO TC2", 4596 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4597 .ops = &hsw_power_well_ops, 4598 .id = DISP_PW_ID_NONE, 4599 { 4600 .hsw.regs = &icl_ddi_power_well_regs, 4601 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4602 }, 4603 }, 4604 { 4605 .name = "AUX A", 4606 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 4607 .ops = &icl_aux_power_well_ops, 4608 .id = DISP_PW_ID_NONE, 4609 { 4610 .hsw.regs = &icl_aux_power_well_regs, 4611 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4612 }, 4613 }, 4614 { 4615 .name = "AUX B", 4616 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 4617 .ops = &icl_aux_power_well_ops, 4618 .id = DISP_PW_ID_NONE, 4619 { 4620 .hsw.regs = &icl_aux_power_well_regs, 4621 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4622 }, 4623 }, 4624 { 4625 .name = "AUX USBC1", 4626 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4627 .ops = &icl_aux_power_well_ops, 4628 .id = DISP_PW_ID_NONE, 4629 { 4630 .hsw.regs = &icl_aux_power_well_regs, 4631 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4632 }, 4633 }, 4634 { 4635 .name = "AUX USBC2", 4636 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4637 .ops = &icl_aux_power_well_ops, 4638 .id = DISP_PW_ID_NONE, 4639 { 4640 .hsw.regs = &icl_aux_power_well_regs, 4641 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4642 }, 4643 }, 4644 }; 4645 4646 static const struct i915_power_well_desc xelpd_power_wells[] = { 4647 { 4648 .name = "always-on", 4649 .always_on = true, 4650 .domains = POWER_DOMAIN_MASK, 4651 .ops = &i9xx_always_on_power_well_ops, 4652 .id = DISP_PW_ID_NONE, 4653 }, 4654 { 4655 .name = "power well 1", 4656 /* Handled by the DMC firmware */ 4657 .always_on = true, 4658 .domains = 0, 4659 .ops = &hsw_power_well_ops, 4660 .id = SKL_DISP_PW_1, 4661 { 4662 .hsw.regs = &hsw_power_well_regs, 4663 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4664 .hsw.has_fuses = true, 4665 }, 4666 }, 4667 { 4668 .name = "DC off", 4669 .domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS, 4670 .ops = &gen9_dc_off_power_well_ops, 4671 .id = SKL_DISP_DC_OFF, 4672 }, 4673 { 4674 .name = "power well 2", 4675 .domains = XELPD_PW_2_POWER_DOMAINS, 4676 .ops = &hsw_power_well_ops, 4677 .id = SKL_DISP_PW_2, 4678 { 4679 .hsw.regs = &hsw_power_well_regs, 4680 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4681 .hsw.has_vga = true, 4682 .hsw.has_fuses = true, 4683 }, 4684 }, 4685 { 4686 .name = "power well A", 4687 .domains = XELPD_PW_A_POWER_DOMAINS, 4688 .ops = &hsw_power_well_ops, 4689 .id = DISP_PW_ID_NONE, 4690 { 4691 .hsw.regs = &hsw_power_well_regs, 4692 .hsw.idx = XELPD_PW_CTL_IDX_PW_A, 4693 .hsw.irq_pipe_mask = BIT(PIPE_A), 4694 .hsw.has_fuses = true, 4695 }, 4696 }, 4697 { 4698 .name = "power well B", 4699 .domains = XELPD_PW_B_POWER_DOMAINS, 4700 .ops = &hsw_power_well_ops, 4701 .id = DISP_PW_ID_NONE, 4702 { 4703 .hsw.regs = &hsw_power_well_regs, 4704 .hsw.idx = XELPD_PW_CTL_IDX_PW_B, 4705 .hsw.irq_pipe_mask = BIT(PIPE_B), 4706 .hsw.has_fuses = true, 4707 }, 4708 }, 4709 { 4710 .name = "power well C", 4711 .domains = XELPD_PW_C_POWER_DOMAINS, 4712 .ops = &hsw_power_well_ops, 4713 .id = DISP_PW_ID_NONE, 4714 { 4715 .hsw.regs = &hsw_power_well_regs, 4716 .hsw.idx = XELPD_PW_CTL_IDX_PW_C, 4717 .hsw.irq_pipe_mask = BIT(PIPE_C), 4718 .hsw.has_fuses = true, 4719 }, 4720 }, 4721 { 4722 .name = "power well D", 4723 .domains = XELPD_PW_D_POWER_DOMAINS, 4724 .ops = &hsw_power_well_ops, 4725 .id = DISP_PW_ID_NONE, 4726 { 4727 .hsw.regs = &hsw_power_well_regs, 4728 .hsw.idx = XELPD_PW_CTL_IDX_PW_D, 4729 .hsw.irq_pipe_mask = BIT(PIPE_D), 4730 .hsw.has_fuses = true, 4731 }, 4732 }, 4733 { 4734 .name = "DDI A IO", 4735 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4736 .ops = &hsw_power_well_ops, 4737 .id = DISP_PW_ID_NONE, 4738 { 4739 .hsw.regs = &icl_ddi_power_well_regs, 4740 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4741 } 4742 }, 4743 { 4744 .name = "DDI B IO", 4745 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4746 .ops = &hsw_power_well_ops, 4747 .id = DISP_PW_ID_NONE, 4748 { 4749 .hsw.regs = &icl_ddi_power_well_regs, 4750 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4751 } 4752 }, 4753 { 4754 .name = "DDI C IO", 4755 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 4756 .ops = &hsw_power_well_ops, 4757 .id = DISP_PW_ID_NONE, 4758 { 4759 .hsw.regs = &icl_ddi_power_well_regs, 4760 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 4761 } 4762 }, 4763 { 4764 .name = "DDI IO D_XELPD", 4765 .domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS, 4766 .ops = &hsw_power_well_ops, 4767 .id = DISP_PW_ID_NONE, 4768 { 4769 .hsw.regs = &icl_ddi_power_well_regs, 4770 .hsw.idx = XELPD_PW_CTL_IDX_DDI_D, 4771 } 4772 }, 4773 { 4774 .name = "DDI IO E_XELPD", 4775 .domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS, 4776 .ops = &hsw_power_well_ops, 4777 .id = DISP_PW_ID_NONE, 4778 { 4779 .hsw.regs = &icl_ddi_power_well_regs, 4780 .hsw.idx = XELPD_PW_CTL_IDX_DDI_E, 4781 } 4782 }, 4783 { 4784 .name = "DDI IO TC1", 4785 .domains = XELPD_DDI_IO_TC1_POWER_DOMAINS, 4786 .ops = &hsw_power_well_ops, 4787 .id = DISP_PW_ID_NONE, 4788 { 4789 .hsw.regs = &icl_ddi_power_well_regs, 4790 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4791 } 4792 }, 4793 { 4794 .name = "DDI IO TC2", 4795 .domains = XELPD_DDI_IO_TC2_POWER_DOMAINS, 4796 .ops = &hsw_power_well_ops, 4797 .id = DISP_PW_ID_NONE, 4798 { 4799 .hsw.regs = &icl_ddi_power_well_regs, 4800 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4801 } 4802 }, 4803 { 4804 .name = "DDI IO TC3", 4805 .domains = XELPD_DDI_IO_TC3_POWER_DOMAINS, 4806 .ops = &hsw_power_well_ops, 4807 .id = DISP_PW_ID_NONE, 4808 { 4809 .hsw.regs = &icl_ddi_power_well_regs, 4810 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 4811 } 4812 }, 4813 { 4814 .name = "DDI IO TC4", 4815 .domains = XELPD_DDI_IO_TC4_POWER_DOMAINS, 4816 .ops = &hsw_power_well_ops, 4817 .id = DISP_PW_ID_NONE, 4818 { 4819 .hsw.regs = &icl_ddi_power_well_regs, 4820 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 4821 } 4822 }, 4823 { 4824 .name = "AUX A", 4825 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 4826 .ops = &icl_aux_power_well_ops, 4827 .id = DISP_PW_ID_NONE, 4828 { 4829 .hsw.regs = &icl_aux_power_well_regs, 4830 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4831 }, 4832 }, 4833 { 4834 .name = "AUX B", 4835 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 4836 .ops = &icl_aux_power_well_ops, 4837 .id = DISP_PW_ID_NONE, 4838 { 4839 .hsw.regs = &icl_aux_power_well_regs, 4840 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4841 }, 4842 }, 4843 { 4844 .name = "AUX C", 4845 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 4846 .ops = &icl_aux_power_well_ops, 4847 .id = DISP_PW_ID_NONE, 4848 { 4849 .hsw.regs = &icl_aux_power_well_regs, 4850 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4851 }, 4852 }, 4853 { 4854 .name = "AUX D_XELPD", 4855 .domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS, 4856 .ops = &icl_aux_power_well_ops, 4857 .id = DISP_PW_ID_NONE, 4858 { 4859 .hsw.regs = &icl_aux_power_well_regs, 4860 .hsw.idx = XELPD_PW_CTL_IDX_AUX_D, 4861 }, 4862 }, 4863 { 4864 .name = "AUX E_XELPD", 4865 .domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS, 4866 .ops = &icl_aux_power_well_ops, 4867 .id = DISP_PW_ID_NONE, 4868 { 4869 .hsw.regs = &icl_aux_power_well_regs, 4870 .hsw.idx = XELPD_PW_CTL_IDX_AUX_E, 4871 }, 4872 }, 4873 { 4874 .name = "AUX USBC1", 4875 .domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS, 4876 .ops = &icl_aux_power_well_ops, 4877 .id = DISP_PW_ID_NONE, 4878 { 4879 .hsw.regs = &icl_aux_power_well_regs, 4880 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4881 }, 4882 }, 4883 { 4884 .name = "AUX USBC2", 4885 .domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS, 4886 .ops = &icl_aux_power_well_ops, 4887 .id = DISP_PW_ID_NONE, 4888 { 4889 .hsw.regs = &icl_aux_power_well_regs, 4890 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4891 }, 4892 }, 4893 { 4894 .name = "AUX USBC3", 4895 .domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS, 4896 .ops = &icl_aux_power_well_ops, 4897 .id = DISP_PW_ID_NONE, 4898 { 4899 .hsw.regs = &icl_aux_power_well_regs, 4900 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 4901 }, 4902 }, 4903 { 4904 .name = "AUX USBC4", 4905 .domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS, 4906 .ops = &icl_aux_power_well_ops, 4907 .id = DISP_PW_ID_NONE, 4908 { 4909 .hsw.regs = &icl_aux_power_well_regs, 4910 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 4911 }, 4912 }, 4913 { 4914 .name = "AUX TBT1", 4915 .domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS, 4916 .ops = &icl_aux_power_well_ops, 4917 .id = DISP_PW_ID_NONE, 4918 { 4919 .hsw.regs = &icl_aux_power_well_regs, 4920 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 4921 .hsw.is_tc_tbt = true, 4922 }, 4923 }, 4924 { 4925 .name = "AUX TBT2", 4926 .domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS, 4927 .ops = &icl_aux_power_well_ops, 4928 .id = DISP_PW_ID_NONE, 4929 { 4930 .hsw.regs = &icl_aux_power_well_regs, 4931 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 4932 .hsw.is_tc_tbt = true, 4933 }, 4934 }, 4935 { 4936 .name = "AUX TBT3", 4937 .domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS, 4938 .ops = &icl_aux_power_well_ops, 4939 .id = DISP_PW_ID_NONE, 4940 { 4941 .hsw.regs = &icl_aux_power_well_regs, 4942 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 4943 .hsw.is_tc_tbt = true, 4944 }, 4945 }, 4946 { 4947 .name = "AUX TBT4", 4948 .domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS, 4949 .ops = &icl_aux_power_well_ops, 4950 .id = DISP_PW_ID_NONE, 4951 { 4952 .hsw.regs = &icl_aux_power_well_regs, 4953 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 4954 .hsw.is_tc_tbt = true, 4955 }, 4956 }, 4957 }; 4958 4959 static int 4960 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 4961 int disable_power_well) 4962 { 4963 if (disable_power_well >= 0) 4964 return !!disable_power_well; 4965 4966 return 1; 4967 } 4968 4969 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 4970 int enable_dc) 4971 { 4972 u32 mask; 4973 int requested_dc; 4974 int max_dc; 4975 4976 if (!HAS_DISPLAY(dev_priv)) 4977 return 0; 4978 4979 if (IS_DG1(dev_priv)) 4980 max_dc = 3; 4981 else if (DISPLAY_VER(dev_priv) >= 12) 4982 max_dc = 4; 4983 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 4984 max_dc = 1; 4985 else if (DISPLAY_VER(dev_priv) >= 9) 4986 max_dc = 2; 4987 else 4988 max_dc = 0; 4989 4990 /* 4991 * DC9 has a separate HW flow from the rest of the DC states, 4992 * not depending on the DMC firmware. It's needed by system 4993 * suspend/resume, so allow it unconditionally. 4994 */ 4995 mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || 4996 DISPLAY_VER(dev_priv) >= 11 ? 4997 DC_STATE_EN_DC9 : 0; 4998 4999 if (!dev_priv->params.disable_power_well) 5000 max_dc = 0; 5001 5002 if (enable_dc >= 0 && enable_dc <= max_dc) { 5003 requested_dc = enable_dc; 5004 } else if (enable_dc == -1) { 5005 requested_dc = max_dc; 5006 } else if (enable_dc > max_dc && enable_dc <= 4) { 5007 drm_dbg_kms(&dev_priv->drm, 5008 "Adjusting requested max DC state (%d->%d)\n", 5009 enable_dc, max_dc); 5010 requested_dc = max_dc; 5011 } else { 5012 drm_err(&dev_priv->drm, 5013 "Unexpected value for enable_dc (%d)\n", enable_dc); 5014 requested_dc = max_dc; 5015 } 5016 5017 switch (requested_dc) { 5018 case 4: 5019 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 5020 break; 5021 case 3: 5022 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 5023 break; 5024 case 2: 5025 mask |= DC_STATE_EN_UPTO_DC6; 5026 break; 5027 case 1: 5028 mask |= DC_STATE_EN_UPTO_DC5; 5029 break; 5030 } 5031 5032 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); 5033 5034 return mask; 5035 } 5036 5037 static int 5038 __set_power_wells(struct i915_power_domains *power_domains, 5039 const struct i915_power_well_desc *power_well_descs, 5040 int power_well_descs_sz, u64 skip_mask) 5041 { 5042 struct drm_i915_private *i915 = container_of(power_domains, 5043 struct drm_i915_private, 5044 power_domains); 5045 u64 power_well_ids = 0; 5046 int power_well_count = 0; 5047 int i, plt_idx = 0; 5048 5049 for (i = 0; i < power_well_descs_sz; i++) 5050 if (!(BIT_ULL(power_well_descs[i].id) & skip_mask)) 5051 power_well_count++; 5052 5053 power_domains->power_well_count = power_well_count; 5054 power_domains->power_wells = 5055 kcalloc(power_well_count, 5056 sizeof(*power_domains->power_wells), 5057 GFP_KERNEL); 5058 if (!power_domains->power_wells) 5059 return -ENOMEM; 5060 5061 for (i = 0; i < power_well_descs_sz; i++) { 5062 enum i915_power_well_id id = power_well_descs[i].id; 5063 5064 if (BIT_ULL(id) & skip_mask) 5065 continue; 5066 5067 power_domains->power_wells[plt_idx++].desc = 5068 &power_well_descs[i]; 5069 5070 if (id == DISP_PW_ID_NONE) 5071 continue; 5072 5073 drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8); 5074 drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id)); 5075 power_well_ids |= BIT_ULL(id); 5076 } 5077 5078 return 0; 5079 } 5080 5081 #define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \ 5082 __set_power_wells(power_domains, __power_well_descs, \ 5083 ARRAY_SIZE(__power_well_descs), skip_mask) 5084 5085 #define set_power_wells(power_domains, __power_well_descs) \ 5086 set_power_wells_mask(power_domains, __power_well_descs, 0) 5087 5088 /** 5089 * intel_power_domains_init - initializes the power domain structures 5090 * @dev_priv: i915 device instance 5091 * 5092 * Initializes the power domain structures for @dev_priv depending upon the 5093 * supported platform. 5094 */ 5095 int intel_power_domains_init(struct drm_i915_private *dev_priv) 5096 { 5097 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5098 int err; 5099 5100 dev_priv->params.disable_power_well = 5101 sanitize_disable_power_well_option(dev_priv, 5102 dev_priv->params.disable_power_well); 5103 dev_priv->dmc.allowed_dc_mask = 5104 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); 5105 5106 dev_priv->dmc.target_dc_state = 5107 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 5108 5109 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); 5110 5111 mutex_init(&power_domains->lock); 5112 5113 INIT_DELAYED_WORK(&power_domains->async_put_work, 5114 intel_display_power_put_async_work); 5115 5116 /* 5117 * The enabling order will be from lower to higher indexed wells, 5118 * the disabling order is reversed. 5119 */ 5120 if (!HAS_DISPLAY(dev_priv)) { 5121 power_domains->power_well_count = 0; 5122 err = 0; 5123 } else if (DISPLAY_VER(dev_priv) >= 13) { 5124 err = set_power_wells(power_domains, xelpd_power_wells); 5125 } else if (IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) { 5126 err = set_power_wells_mask(power_domains, tgl_power_wells, 5127 BIT_ULL(TGL_DISP_PW_TC_COLD_OFF)); 5128 } else if (IS_ROCKETLAKE(dev_priv)) { 5129 err = set_power_wells(power_domains, rkl_power_wells); 5130 } else if (DISPLAY_VER(dev_priv) == 12) { 5131 err = set_power_wells(power_domains, tgl_power_wells); 5132 } else if (DISPLAY_VER(dev_priv) == 11) { 5133 err = set_power_wells(power_domains, icl_power_wells); 5134 } else if (IS_CNL_WITH_PORT_F(dev_priv)) { 5135 err = set_power_wells(power_domains, cnl_power_wells); 5136 } else if (IS_CANNONLAKE(dev_priv)) { 5137 err = set_power_wells_mask(power_domains, cnl_power_wells, 5138 BIT_ULL(CNL_DISP_PW_DDI_F_IO) | 5139 BIT_ULL(CNL_DISP_PW_DDI_F_AUX)); 5140 } else if (IS_GEMINILAKE(dev_priv)) { 5141 err = set_power_wells(power_domains, glk_power_wells); 5142 } else if (IS_BROXTON(dev_priv)) { 5143 err = set_power_wells(power_domains, bxt_power_wells); 5144 } else if (DISPLAY_VER(dev_priv) == 9) { 5145 err = set_power_wells(power_domains, skl_power_wells); 5146 } else if (IS_CHERRYVIEW(dev_priv)) { 5147 err = set_power_wells(power_domains, chv_power_wells); 5148 } else if (IS_BROADWELL(dev_priv)) { 5149 err = set_power_wells(power_domains, bdw_power_wells); 5150 } else if (IS_HASWELL(dev_priv)) { 5151 err = set_power_wells(power_domains, hsw_power_wells); 5152 } else if (IS_VALLEYVIEW(dev_priv)) { 5153 err = set_power_wells(power_domains, vlv_power_wells); 5154 } else if (IS_I830(dev_priv)) { 5155 err = set_power_wells(power_domains, i830_power_wells); 5156 } else { 5157 err = set_power_wells(power_domains, i9xx_always_on_power_well); 5158 } 5159 5160 return err; 5161 } 5162 5163 /** 5164 * intel_power_domains_cleanup - clean up power domains resources 5165 * @dev_priv: i915 device instance 5166 * 5167 * Release any resources acquired by intel_power_domains_init() 5168 */ 5169 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 5170 { 5171 kfree(dev_priv->power_domains.power_wells); 5172 } 5173 5174 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 5175 { 5176 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5177 struct i915_power_well *power_well; 5178 5179 mutex_lock(&power_domains->lock); 5180 for_each_power_well(dev_priv, power_well) { 5181 power_well->desc->ops->sync_hw(dev_priv, power_well); 5182 power_well->hw_enabled = 5183 power_well->desc->ops->is_enabled(dev_priv, power_well); 5184 } 5185 mutex_unlock(&power_domains->lock); 5186 } 5187 5188 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, 5189 enum dbuf_slice slice, bool enable) 5190 { 5191 i915_reg_t reg = DBUF_CTL_S(slice); 5192 bool state; 5193 5194 intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST, 5195 enable ? DBUF_POWER_REQUEST : 0); 5196 intel_de_posting_read(dev_priv, reg); 5197 udelay(10); 5198 5199 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; 5200 drm_WARN(&dev_priv->drm, enable != state, 5201 "DBuf slice %d power %s timeout!\n", 5202 slice, enabledisable(enable)); 5203 } 5204 5205 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, 5206 u8 req_slices) 5207 { 5208 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5209 u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask; 5210 enum dbuf_slice slice; 5211 5212 drm_WARN(&dev_priv->drm, req_slices & ~slice_mask, 5213 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n", 5214 req_slices, slice_mask); 5215 5216 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", 5217 req_slices); 5218 5219 /* 5220 * Might be running this in parallel to gen9_dc_off_power_well_enable 5221 * being called from intel_dp_detect for instance, 5222 * which causes assertion triggered by race condition, 5223 * as gen9_assert_dbuf_enabled might preempt this when registers 5224 * were already updated, while dev_priv was not. 5225 */ 5226 mutex_lock(&power_domains->lock); 5227 5228 for_each_dbuf_slice(dev_priv, slice) 5229 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); 5230 5231 dev_priv->dbuf.enabled_slices = req_slices; 5232 5233 mutex_unlock(&power_domains->lock); 5234 } 5235 5236 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 5237 { 5238 dev_priv->dbuf.enabled_slices = 5239 intel_enabled_dbuf_slices_mask(dev_priv); 5240 5241 /* 5242 * Just power up at least 1 slice, we will 5243 * figure out later which slices we have and what we need. 5244 */ 5245 gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | 5246 dev_priv->dbuf.enabled_slices); 5247 } 5248 5249 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 5250 { 5251 gen9_dbuf_slices_update(dev_priv, 0); 5252 } 5253 5254 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) 5255 { 5256 enum dbuf_slice slice; 5257 5258 if (IS_ALDERLAKE_P(dev_priv)) 5259 return; 5260 5261 for_each_dbuf_slice(dev_priv, slice) 5262 intel_de_rmw(dev_priv, DBUF_CTL_S(slice), 5263 DBUF_TRACKER_STATE_SERVICE_MASK, 5264 DBUF_TRACKER_STATE_SERVICE(8)); 5265 } 5266 5267 static void icl_mbus_init(struct drm_i915_private *dev_priv) 5268 { 5269 unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask; 5270 u32 mask, val, i; 5271 5272 if (IS_ALDERLAKE_P(dev_priv)) 5273 return; 5274 5275 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | 5276 MBUS_ABOX_BT_CREDIT_POOL2_MASK | 5277 MBUS_ABOX_B_CREDIT_MASK | 5278 MBUS_ABOX_BW_CREDIT_MASK; 5279 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 5280 MBUS_ABOX_BT_CREDIT_POOL2(16) | 5281 MBUS_ABOX_B_CREDIT(1) | 5282 MBUS_ABOX_BW_CREDIT(1); 5283 5284 /* 5285 * gen12 platforms that use abox1 and abox2 for pixel data reads still 5286 * expect us to program the abox_ctl0 register as well, even though 5287 * we don't have to program other instance-0 registers like BW_BUDDY. 5288 */ 5289 if (DISPLAY_VER(dev_priv) == 12) 5290 abox_regs |= BIT(0); 5291 5292 for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) 5293 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val); 5294 } 5295 5296 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 5297 { 5298 u32 val = intel_de_read(dev_priv, LCPLL_CTL); 5299 5300 /* 5301 * The LCPLL register should be turned on by the BIOS. For now 5302 * let's just check its state and print errors in case 5303 * something is wrong. Don't even try to turn it on. 5304 */ 5305 5306 if (val & LCPLL_CD_SOURCE_FCLK) 5307 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); 5308 5309 if (val & LCPLL_PLL_DISABLE) 5310 drm_err(&dev_priv->drm, "LCPLL is disabled\n"); 5311 5312 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 5313 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); 5314 } 5315 5316 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 5317 { 5318 struct drm_device *dev = &dev_priv->drm; 5319 struct intel_crtc *crtc; 5320 5321 for_each_intel_crtc(dev, crtc) 5322 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 5323 pipe_name(crtc->pipe)); 5324 5325 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), 5326 "Display power well on\n"); 5327 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, 5328 "SPLL enabled\n"); 5329 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 5330 "WRPLL1 enabled\n"); 5331 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 5332 "WRPLL2 enabled\n"); 5333 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, 5334 "Panel power on\n"); 5335 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 5336 "CPU PWM1 enabled\n"); 5337 if (IS_HASWELL(dev_priv)) 5338 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 5339 "CPU PWM2 enabled\n"); 5340 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 5341 "PCH PWM1 enabled\n"); 5342 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 5343 "Utility pin enabled\n"); 5344 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, 5345 "PCH GTC enabled\n"); 5346 5347 /* 5348 * In theory we can still leave IRQs enabled, as long as only the HPD 5349 * interrupts remain enabled. We used to check for that, but since it's 5350 * gen-specific and since we only disable LCPLL after we fully disable 5351 * the interrupts, the check below should be enough. 5352 */ 5353 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 5354 } 5355 5356 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 5357 { 5358 if (IS_HASWELL(dev_priv)) 5359 return intel_de_read(dev_priv, D_COMP_HSW); 5360 else 5361 return intel_de_read(dev_priv, D_COMP_BDW); 5362 } 5363 5364 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 5365 { 5366 if (IS_HASWELL(dev_priv)) { 5367 if (sandybridge_pcode_write(dev_priv, 5368 GEN6_PCODE_WRITE_D_COMP, val)) 5369 drm_dbg_kms(&dev_priv->drm, 5370 "Failed to write to D_COMP\n"); 5371 } else { 5372 intel_de_write(dev_priv, D_COMP_BDW, val); 5373 intel_de_posting_read(dev_priv, D_COMP_BDW); 5374 } 5375 } 5376 5377 /* 5378 * This function implements pieces of two sequences from BSpec: 5379 * - Sequence for display software to disable LCPLL 5380 * - Sequence for display software to allow package C8+ 5381 * The steps implemented here are just the steps that actually touch the LCPLL 5382 * register. Callers should take care of disabling all the display engine 5383 * functions, doing the mode unset, fixing interrupts, etc. 5384 */ 5385 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 5386 bool switch_to_fclk, bool allow_power_down) 5387 { 5388 u32 val; 5389 5390 assert_can_disable_lcpll(dev_priv); 5391 5392 val = intel_de_read(dev_priv, LCPLL_CTL); 5393 5394 if (switch_to_fclk) { 5395 val |= LCPLL_CD_SOURCE_FCLK; 5396 intel_de_write(dev_priv, LCPLL_CTL, val); 5397 5398 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & 5399 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 5400 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); 5401 5402 val = intel_de_read(dev_priv, LCPLL_CTL); 5403 } 5404 5405 val |= LCPLL_PLL_DISABLE; 5406 intel_de_write(dev_priv, LCPLL_CTL, val); 5407 intel_de_posting_read(dev_priv, LCPLL_CTL); 5408 5409 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 5410 drm_err(&dev_priv->drm, "LCPLL still locked\n"); 5411 5412 val = hsw_read_dcomp(dev_priv); 5413 val |= D_COMP_COMP_DISABLE; 5414 hsw_write_dcomp(dev_priv, val); 5415 ndelay(100); 5416 5417 if (wait_for((hsw_read_dcomp(dev_priv) & 5418 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 5419 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); 5420 5421 if (allow_power_down) { 5422 val = intel_de_read(dev_priv, LCPLL_CTL); 5423 val |= LCPLL_POWER_DOWN_ALLOW; 5424 intel_de_write(dev_priv, LCPLL_CTL, val); 5425 intel_de_posting_read(dev_priv, LCPLL_CTL); 5426 } 5427 } 5428 5429 /* 5430 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 5431 * source. 5432 */ 5433 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 5434 { 5435 u32 val; 5436 5437 val = intel_de_read(dev_priv, LCPLL_CTL); 5438 5439 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 5440 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 5441 return; 5442 5443 /* 5444 * Make sure we're not on PC8 state before disabling PC8, otherwise 5445 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 5446 */ 5447 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 5448 5449 if (val & LCPLL_POWER_DOWN_ALLOW) { 5450 val &= ~LCPLL_POWER_DOWN_ALLOW; 5451 intel_de_write(dev_priv, LCPLL_CTL, val); 5452 intel_de_posting_read(dev_priv, LCPLL_CTL); 5453 } 5454 5455 val = hsw_read_dcomp(dev_priv); 5456 val |= D_COMP_COMP_FORCE; 5457 val &= ~D_COMP_COMP_DISABLE; 5458 hsw_write_dcomp(dev_priv, val); 5459 5460 val = intel_de_read(dev_priv, LCPLL_CTL); 5461 val &= ~LCPLL_PLL_DISABLE; 5462 intel_de_write(dev_priv, LCPLL_CTL, val); 5463 5464 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 5465 drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); 5466 5467 if (val & LCPLL_CD_SOURCE_FCLK) { 5468 val = intel_de_read(dev_priv, LCPLL_CTL); 5469 val &= ~LCPLL_CD_SOURCE_FCLK; 5470 intel_de_write(dev_priv, LCPLL_CTL, val); 5471 5472 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & 5473 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 5474 drm_err(&dev_priv->drm, 5475 "Switching back to LCPLL failed\n"); 5476 } 5477 5478 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 5479 5480 intel_update_cdclk(dev_priv); 5481 intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); 5482 } 5483 5484 /* 5485 * Package states C8 and deeper are really deep PC states that can only be 5486 * reached when all the devices on the system allow it, so even if the graphics 5487 * device allows PC8+, it doesn't mean the system will actually get to these 5488 * states. Our driver only allows PC8+ when going into runtime PM. 5489 * 5490 * The requirements for PC8+ are that all the outputs are disabled, the power 5491 * well is disabled and most interrupts are disabled, and these are also 5492 * requirements for runtime PM. When these conditions are met, we manually do 5493 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 5494 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 5495 * hang the machine. 5496 * 5497 * When we really reach PC8 or deeper states (not just when we allow it) we lose 5498 * the state of some registers, so when we come back from PC8+ we need to 5499 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 5500 * need to take care of the registers kept by RC6. Notice that this happens even 5501 * if we don't put the device in PCI D3 state (which is what currently happens 5502 * because of the runtime PM support). 5503 * 5504 * For more, read "Display Sequences for Package C8" on the hardware 5505 * documentation. 5506 */ 5507 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 5508 { 5509 u32 val; 5510 5511 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); 5512 5513 if (HAS_PCH_LPT_LP(dev_priv)) { 5514 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5515 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 5516 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5517 } 5518 5519 lpt_disable_clkout_dp(dev_priv); 5520 hsw_disable_lcpll(dev_priv, true, true); 5521 } 5522 5523 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 5524 { 5525 u32 val; 5526 5527 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); 5528 5529 hsw_restore_lcpll(dev_priv); 5530 intel_init_pch_refclk(dev_priv); 5531 5532 if (HAS_PCH_LPT_LP(dev_priv)) { 5533 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5534 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 5535 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5536 } 5537 } 5538 5539 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 5540 bool enable) 5541 { 5542 i915_reg_t reg; 5543 u32 reset_bits, val; 5544 5545 if (IS_IVYBRIDGE(dev_priv)) { 5546 reg = GEN7_MSG_CTL; 5547 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 5548 } else { 5549 reg = HSW_NDE_RSTWRN_OPT; 5550 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 5551 } 5552 5553 val = intel_de_read(dev_priv, reg); 5554 5555 if (enable) 5556 val |= reset_bits; 5557 else 5558 val &= ~reset_bits; 5559 5560 intel_de_write(dev_priv, reg, val); 5561 } 5562 5563 static void skl_display_core_init(struct drm_i915_private *dev_priv, 5564 bool resume) 5565 { 5566 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5567 struct i915_power_well *well; 5568 5569 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5570 5571 /* enable PCH reset handshake */ 5572 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5573 5574 if (!HAS_DISPLAY(dev_priv)) 5575 return; 5576 5577 /* enable PG1 and Misc I/O */ 5578 mutex_lock(&power_domains->lock); 5579 5580 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5581 intel_power_well_enable(dev_priv, well); 5582 5583 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 5584 intel_power_well_enable(dev_priv, well); 5585 5586 mutex_unlock(&power_domains->lock); 5587 5588 intel_cdclk_init_hw(dev_priv); 5589 5590 gen9_dbuf_enable(dev_priv); 5591 5592 if (resume && intel_dmc_has_payload(dev_priv)) 5593 intel_dmc_load_program(dev_priv); 5594 } 5595 5596 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 5597 { 5598 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5599 struct i915_power_well *well; 5600 5601 if (!HAS_DISPLAY(dev_priv)) 5602 return; 5603 5604 gen9_disable_dc_states(dev_priv); 5605 5606 gen9_dbuf_disable(dev_priv); 5607 5608 intel_cdclk_uninit_hw(dev_priv); 5609 5610 /* The spec doesn't call for removing the reset handshake flag */ 5611 /* disable PG1 and Misc I/O */ 5612 5613 mutex_lock(&power_domains->lock); 5614 5615 /* 5616 * BSpec says to keep the MISC IO power well enabled here, only 5617 * remove our request for power well 1. 5618 * Note that even though the driver's request is removed power well 1 5619 * may stay enabled after this due to DMC's own request on it. 5620 */ 5621 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5622 intel_power_well_disable(dev_priv, well); 5623 5624 mutex_unlock(&power_domains->lock); 5625 5626 usleep_range(10, 30); /* 10 us delay per Bspec */ 5627 } 5628 5629 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 5630 { 5631 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5632 struct i915_power_well *well; 5633 5634 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5635 5636 /* 5637 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 5638 * or else the reset will hang because there is no PCH to respond. 5639 * Move the handshake programming to initialization sequence. 5640 * Previously was left up to BIOS. 5641 */ 5642 intel_pch_reset_handshake(dev_priv, false); 5643 5644 if (!HAS_DISPLAY(dev_priv)) 5645 return; 5646 5647 /* Enable PG1 */ 5648 mutex_lock(&power_domains->lock); 5649 5650 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5651 intel_power_well_enable(dev_priv, well); 5652 5653 mutex_unlock(&power_domains->lock); 5654 5655 intel_cdclk_init_hw(dev_priv); 5656 5657 gen9_dbuf_enable(dev_priv); 5658 5659 if (resume && intel_dmc_has_payload(dev_priv)) 5660 intel_dmc_load_program(dev_priv); 5661 } 5662 5663 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 5664 { 5665 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5666 struct i915_power_well *well; 5667 5668 if (!HAS_DISPLAY(dev_priv)) 5669 return; 5670 5671 gen9_disable_dc_states(dev_priv); 5672 5673 gen9_dbuf_disable(dev_priv); 5674 5675 intel_cdclk_uninit_hw(dev_priv); 5676 5677 /* The spec doesn't call for removing the reset handshake flag */ 5678 5679 /* 5680 * Disable PW1 (PG1). 5681 * Note that even though the driver's request is removed power well 1 5682 * may stay enabled after this due to DMC's own request on it. 5683 */ 5684 mutex_lock(&power_domains->lock); 5685 5686 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5687 intel_power_well_disable(dev_priv, well); 5688 5689 mutex_unlock(&power_domains->lock); 5690 5691 usleep_range(10, 30); /* 10 us delay per Bspec */ 5692 } 5693 5694 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) 5695 { 5696 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5697 struct i915_power_well *well; 5698 5699 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5700 5701 /* 1. Enable PCH Reset Handshake */ 5702 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5703 5704 if (!HAS_DISPLAY(dev_priv)) 5705 return; 5706 5707 /* 2-3. */ 5708 intel_combo_phy_init(dev_priv); 5709 5710 /* 5711 * 4. Enable Power Well 1 (PG1). 5712 * The AUX IO power wells will be enabled on demand. 5713 */ 5714 mutex_lock(&power_domains->lock); 5715 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5716 intel_power_well_enable(dev_priv, well); 5717 mutex_unlock(&power_domains->lock); 5718 5719 /* 5. Enable CD clock */ 5720 intel_cdclk_init_hw(dev_priv); 5721 5722 /* 6. Enable DBUF */ 5723 gen9_dbuf_enable(dev_priv); 5724 5725 if (resume && intel_dmc_has_payload(dev_priv)) 5726 intel_dmc_load_program(dev_priv); 5727 } 5728 5729 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) 5730 { 5731 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5732 struct i915_power_well *well; 5733 5734 if (!HAS_DISPLAY(dev_priv)) 5735 return; 5736 5737 gen9_disable_dc_states(dev_priv); 5738 5739 /* 1. Disable all display engine functions -> aready done */ 5740 5741 /* 2. Disable DBUF */ 5742 gen9_dbuf_disable(dev_priv); 5743 5744 /* 3. Disable CD clock */ 5745 intel_cdclk_uninit_hw(dev_priv); 5746 5747 /* 5748 * 4. Disable Power Well 1 (PG1). 5749 * The AUX IO power wells are toggled on demand, so they are already 5750 * disabled at this point. 5751 */ 5752 mutex_lock(&power_domains->lock); 5753 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5754 intel_power_well_disable(dev_priv, well); 5755 mutex_unlock(&power_domains->lock); 5756 5757 usleep_range(10, 30); /* 10 us delay per Bspec */ 5758 5759 /* 5. */ 5760 intel_combo_phy_uninit(dev_priv); 5761 } 5762 5763 struct buddy_page_mask { 5764 u32 page_mask; 5765 u8 type; 5766 u8 num_channels; 5767 }; 5768 5769 static const struct buddy_page_mask tgl_buddy_page_masks[] = { 5770 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, 5771 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF }, 5772 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, 5773 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C }, 5774 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, 5775 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E }, 5776 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, 5777 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 }, 5778 {} 5779 }; 5780 5781 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { 5782 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, 5783 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, 5784 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 }, 5785 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 }, 5786 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, 5787 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, 5788 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 }, 5789 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 }, 5790 {} 5791 }; 5792 5793 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) 5794 { 5795 enum intel_dram_type type = dev_priv->dram_info.type; 5796 u8 num_channels = dev_priv->dram_info.num_channels; 5797 const struct buddy_page_mask *table; 5798 unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask; 5799 int config, i; 5800 5801 if (IS_ALDERLAKE_S(dev_priv) || 5802 IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) || 5803 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) 5804 /* Wa_1409767108:tgl,dg1,adl-s */ 5805 table = wa_1409767108_buddy_page_masks; 5806 else 5807 table = tgl_buddy_page_masks; 5808 5809 for (config = 0; table[config].page_mask != 0; config++) 5810 if (table[config].num_channels == num_channels && 5811 table[config].type == type) 5812 break; 5813 5814 if (table[config].page_mask == 0) { 5815 drm_dbg(&dev_priv->drm, 5816 "Unknown memory configuration; disabling address buddy logic.\n"); 5817 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) 5818 intel_de_write(dev_priv, BW_BUDDY_CTL(i), 5819 BW_BUDDY_DISABLE); 5820 } else { 5821 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { 5822 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i), 5823 table[config].page_mask); 5824 5825 /* Wa_22010178259:tgl,rkl */ 5826 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i), 5827 BW_BUDDY_TLB_REQ_TIMER_MASK, 5828 BW_BUDDY_TLB_REQ_TIMER(0x8)); 5829 } 5830 } 5831 } 5832 5833 static void icl_display_core_init(struct drm_i915_private *dev_priv, 5834 bool resume) 5835 { 5836 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5837 struct i915_power_well *well; 5838 u32 val; 5839 5840 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5841 5842 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ 5843 if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP && 5844 INTEL_PCH_TYPE(dev_priv) < PCH_DG1) 5845 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0, 5846 PCH_DPMGUNIT_CLOCK_GATE_DISABLE); 5847 5848 /* 1. Enable PCH reset handshake. */ 5849 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5850 5851 if (!HAS_DISPLAY(dev_priv)) 5852 return; 5853 5854 /* 2. Initialize all combo phys */ 5855 intel_combo_phy_init(dev_priv); 5856 5857 /* 5858 * 3. Enable Power Well 1 (PG1). 5859 * The AUX IO power wells will be enabled on demand. 5860 */ 5861 mutex_lock(&power_domains->lock); 5862 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5863 intel_power_well_enable(dev_priv, well); 5864 mutex_unlock(&power_domains->lock); 5865 5866 /* 4. Enable CDCLK. */ 5867 intel_cdclk_init_hw(dev_priv); 5868 5869 if (DISPLAY_VER(dev_priv) >= 12) 5870 gen12_dbuf_slices_config(dev_priv); 5871 5872 /* 5. Enable DBUF. */ 5873 gen9_dbuf_enable(dev_priv); 5874 5875 /* 6. Setup MBUS. */ 5876 icl_mbus_init(dev_priv); 5877 5878 /* 7. Program arbiter BW_BUDDY registers */ 5879 if (DISPLAY_VER(dev_priv) >= 12) 5880 tgl_bw_buddy_init(dev_priv); 5881 5882 if (resume && intel_dmc_has_payload(dev_priv)) 5883 intel_dmc_load_program(dev_priv); 5884 5885 /* Wa_14011508470 */ 5886 if (DISPLAY_VER(dev_priv) == 12) { 5887 val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | 5888 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR; 5889 intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val); 5890 } 5891 5892 /* Wa_14011503030:xelpd */ 5893 if (DISPLAY_VER(dev_priv) >= 13) 5894 intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0); 5895 } 5896 5897 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 5898 { 5899 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5900 struct i915_power_well *well; 5901 5902 if (!HAS_DISPLAY(dev_priv)) 5903 return; 5904 5905 gen9_disable_dc_states(dev_priv); 5906 5907 /* 1. Disable all display engine functions -> aready done */ 5908 5909 /* 2. Disable DBUF */ 5910 gen9_dbuf_disable(dev_priv); 5911 5912 /* 3. Disable CD clock */ 5913 intel_cdclk_uninit_hw(dev_priv); 5914 5915 /* 5916 * 4. Disable Power Well 1 (PG1). 5917 * The AUX IO power wells are toggled on demand, so they are already 5918 * disabled at this point. 5919 */ 5920 mutex_lock(&power_domains->lock); 5921 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5922 intel_power_well_disable(dev_priv, well); 5923 mutex_unlock(&power_domains->lock); 5924 5925 /* 5. */ 5926 intel_combo_phy_uninit(dev_priv); 5927 } 5928 5929 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 5930 { 5931 struct i915_power_well *cmn_bc = 5932 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5933 struct i915_power_well *cmn_d = 5934 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 5935 5936 /* 5937 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 5938 * workaround never ever read DISPLAY_PHY_CONTROL, and 5939 * instead maintain a shadow copy ourselves. Use the actual 5940 * power well state and lane status to reconstruct the 5941 * expected initial value. 5942 */ 5943 dev_priv->chv_phy_control = 5944 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 5945 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 5946 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 5947 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 5948 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 5949 5950 /* 5951 * If all lanes are disabled we leave the override disabled 5952 * with all power down bits cleared to match the state we 5953 * would use after disabling the port. Otherwise enable the 5954 * override and set the lane powerdown bits accding to the 5955 * current lane status. 5956 */ 5957 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 5958 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)); 5959 unsigned int mask; 5960 5961 mask = status & DPLL_PORTB_READY_MASK; 5962 if (mask == 0xf) 5963 mask = 0x0; 5964 else 5965 dev_priv->chv_phy_control |= 5966 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 5967 5968 dev_priv->chv_phy_control |= 5969 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 5970 5971 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 5972 if (mask == 0xf) 5973 mask = 0x0; 5974 else 5975 dev_priv->chv_phy_control |= 5976 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 5977 5978 dev_priv->chv_phy_control |= 5979 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 5980 5981 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 5982 5983 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 5984 } else { 5985 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 5986 } 5987 5988 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 5989 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS); 5990 unsigned int mask; 5991 5992 mask = status & DPLL_PORTD_READY_MASK; 5993 5994 if (mask == 0xf) 5995 mask = 0x0; 5996 else 5997 dev_priv->chv_phy_control |= 5998 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 5999 6000 dev_priv->chv_phy_control |= 6001 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 6002 6003 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 6004 6005 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 6006 } else { 6007 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 6008 } 6009 6010 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", 6011 dev_priv->chv_phy_control); 6012 6013 /* Defer application of initial phy_control to enabling the powerwell */ 6014 } 6015 6016 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 6017 { 6018 struct i915_power_well *cmn = 6019 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 6020 struct i915_power_well *disp2d = 6021 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 6022 6023 /* If the display might be already active skip this */ 6024 if (cmn->desc->ops->is_enabled(dev_priv, cmn) && 6025 disp2d->desc->ops->is_enabled(dev_priv, disp2d) && 6026 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST) 6027 return; 6028 6029 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); 6030 6031 /* cmnlane needs DPLL registers */ 6032 disp2d->desc->ops->enable(dev_priv, disp2d); 6033 6034 /* 6035 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 6036 * Need to assert and de-assert PHY SB reset by gating the 6037 * common lane power, then un-gating it. 6038 * Simply ungating isn't enough to reset the PHY enough to get 6039 * ports and lanes running. 6040 */ 6041 cmn->desc->ops->disable(dev_priv, cmn); 6042 } 6043 6044 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 6045 { 6046 bool ret; 6047 6048 vlv_punit_get(dev_priv); 6049 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 6050 vlv_punit_put(dev_priv); 6051 6052 return ret; 6053 } 6054 6055 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 6056 { 6057 drm_WARN(&dev_priv->drm, 6058 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 6059 "VED not power gated\n"); 6060 } 6061 6062 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 6063 { 6064 static const struct pci_device_id isp_ids[] = { 6065 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 6066 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 6067 {} 6068 }; 6069 6070 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && 6071 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 6072 "ISP not power gated\n"); 6073 } 6074 6075 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 6076 6077 /** 6078 * intel_power_domains_init_hw - initialize hardware power domain state 6079 * @i915: i915 device instance 6080 * @resume: Called from resume code paths or not 6081 * 6082 * This function initializes the hardware power domain state and enables all 6083 * power wells belonging to the INIT power domain. Power wells in other 6084 * domains (and not in the INIT domain) are referenced or disabled by 6085 * intel_modeset_readout_hw_state(). After that the reference count of each 6086 * power well must match its HW enabled state, see 6087 * intel_power_domains_verify_state(). 6088 * 6089 * It will return with power domains disabled (to be enabled later by 6090 * intel_power_domains_enable()) and must be paired with 6091 * intel_power_domains_driver_remove(). 6092 */ 6093 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 6094 { 6095 struct i915_power_domains *power_domains = &i915->power_domains; 6096 6097 power_domains->initializing = true; 6098 6099 if (DISPLAY_VER(i915) >= 11) { 6100 icl_display_core_init(i915, resume); 6101 } else if (IS_CANNONLAKE(i915)) { 6102 cnl_display_core_init(i915, resume); 6103 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6104 bxt_display_core_init(i915, resume); 6105 } else if (DISPLAY_VER(i915) == 9) { 6106 skl_display_core_init(i915, resume); 6107 } else if (IS_CHERRYVIEW(i915)) { 6108 mutex_lock(&power_domains->lock); 6109 chv_phy_control_init(i915); 6110 mutex_unlock(&power_domains->lock); 6111 assert_isp_power_gated(i915); 6112 } else if (IS_VALLEYVIEW(i915)) { 6113 mutex_lock(&power_domains->lock); 6114 vlv_cmnlane_wa(i915); 6115 mutex_unlock(&power_domains->lock); 6116 assert_ved_power_gated(i915); 6117 assert_isp_power_gated(i915); 6118 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 6119 hsw_assert_cdclk(i915); 6120 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 6121 } else if (IS_IVYBRIDGE(i915)) { 6122 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 6123 } 6124 6125 /* 6126 * Keep all power wells enabled for any dependent HW access during 6127 * initialization and to make sure we keep BIOS enabled display HW 6128 * resources powered until display HW readout is complete. We drop 6129 * this reference in intel_power_domains_enable(). 6130 */ 6131 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6132 power_domains->init_wakeref = 6133 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6134 6135 /* Disable power support if the user asked so. */ 6136 if (!i915->params.disable_power_well) { 6137 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); 6138 i915->power_domains.disable_wakeref = intel_display_power_get(i915, 6139 POWER_DOMAIN_INIT); 6140 } 6141 intel_power_domains_sync_hw(i915); 6142 6143 power_domains->initializing = false; 6144 } 6145 6146 /** 6147 * intel_power_domains_driver_remove - deinitialize hw power domain state 6148 * @i915: i915 device instance 6149 * 6150 * De-initializes the display power domain HW state. It also ensures that the 6151 * device stays powered up so that the driver can be reloaded. 6152 * 6153 * It must be called with power domains already disabled (after a call to 6154 * intel_power_domains_disable()) and must be paired with 6155 * intel_power_domains_init_hw(). 6156 */ 6157 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 6158 { 6159 intel_wakeref_t wakeref __maybe_unused = 6160 fetch_and_zero(&i915->power_domains.init_wakeref); 6161 6162 /* Remove the refcount we took to keep power well support disabled. */ 6163 if (!i915->params.disable_power_well) 6164 intel_display_power_put(i915, POWER_DOMAIN_INIT, 6165 fetch_and_zero(&i915->power_domains.disable_wakeref)); 6166 6167 intel_display_power_flush_work_sync(i915); 6168 6169 intel_power_domains_verify_state(i915); 6170 6171 /* Keep the power well enabled, but cancel its rpm wakeref. */ 6172 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 6173 } 6174 6175 /** 6176 * intel_power_domains_enable - enable toggling of display power wells 6177 * @i915: i915 device instance 6178 * 6179 * Enable the ondemand enabling/disabling of the display power wells. Note that 6180 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 6181 * only at specific points of the display modeset sequence, thus they are not 6182 * affected by the intel_power_domains_enable()/disable() calls. The purpose 6183 * of these function is to keep the rest of power wells enabled until the end 6184 * of display HW readout (which will acquire the power references reflecting 6185 * the current HW state). 6186 */ 6187 void intel_power_domains_enable(struct drm_i915_private *i915) 6188 { 6189 intel_wakeref_t wakeref __maybe_unused = 6190 fetch_and_zero(&i915->power_domains.init_wakeref); 6191 6192 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 6193 intel_power_domains_verify_state(i915); 6194 } 6195 6196 /** 6197 * intel_power_domains_disable - disable toggling of display power wells 6198 * @i915: i915 device instance 6199 * 6200 * Disable the ondemand enabling/disabling of the display power wells. See 6201 * intel_power_domains_enable() for which power wells this call controls. 6202 */ 6203 void intel_power_domains_disable(struct drm_i915_private *i915) 6204 { 6205 struct i915_power_domains *power_domains = &i915->power_domains; 6206 6207 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6208 power_domains->init_wakeref = 6209 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6210 6211 intel_power_domains_verify_state(i915); 6212 } 6213 6214 /** 6215 * intel_power_domains_suspend - suspend power domain state 6216 * @i915: i915 device instance 6217 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 6218 * 6219 * This function prepares the hardware power domain state before entering 6220 * system suspend. 6221 * 6222 * It must be called with power domains already disabled (after a call to 6223 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 6224 */ 6225 void intel_power_domains_suspend(struct drm_i915_private *i915, 6226 enum i915_drm_suspend_mode suspend_mode) 6227 { 6228 struct i915_power_domains *power_domains = &i915->power_domains; 6229 intel_wakeref_t wakeref __maybe_unused = 6230 fetch_and_zero(&power_domains->init_wakeref); 6231 6232 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 6233 6234 /* 6235 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 6236 * support don't manually deinit the power domains. This also means the 6237 * DMC firmware will stay active, it will power down any HW 6238 * resources as required and also enable deeper system power states 6239 * that would be blocked if the firmware was inactive. 6240 */ 6241 if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) && 6242 suspend_mode == I915_DRM_SUSPEND_IDLE && 6243 intel_dmc_has_payload(i915)) { 6244 intel_display_power_flush_work(i915); 6245 intel_power_domains_verify_state(i915); 6246 return; 6247 } 6248 6249 /* 6250 * Even if power well support was disabled we still want to disable 6251 * power wells if power domains must be deinitialized for suspend. 6252 */ 6253 if (!i915->params.disable_power_well) 6254 intel_display_power_put(i915, POWER_DOMAIN_INIT, 6255 fetch_and_zero(&i915->power_domains.disable_wakeref)); 6256 6257 intel_display_power_flush_work(i915); 6258 intel_power_domains_verify_state(i915); 6259 6260 if (DISPLAY_VER(i915) >= 11) 6261 icl_display_core_uninit(i915); 6262 else if (IS_CANNONLAKE(i915)) 6263 cnl_display_core_uninit(i915); 6264 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) 6265 bxt_display_core_uninit(i915); 6266 else if (DISPLAY_VER(i915) == 9) 6267 skl_display_core_uninit(i915); 6268 6269 power_domains->display_core_suspended = true; 6270 } 6271 6272 /** 6273 * intel_power_domains_resume - resume power domain state 6274 * @i915: i915 device instance 6275 * 6276 * This function resume the hardware power domain state during system resume. 6277 * 6278 * It will return with power domain support disabled (to be enabled later by 6279 * intel_power_domains_enable()) and must be paired with 6280 * intel_power_domains_suspend(). 6281 */ 6282 void intel_power_domains_resume(struct drm_i915_private *i915) 6283 { 6284 struct i915_power_domains *power_domains = &i915->power_domains; 6285 6286 if (power_domains->display_core_suspended) { 6287 intel_power_domains_init_hw(i915, true); 6288 power_domains->display_core_suspended = false; 6289 } else { 6290 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6291 power_domains->init_wakeref = 6292 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6293 } 6294 6295 intel_power_domains_verify_state(i915); 6296 } 6297 6298 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 6299 6300 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 6301 { 6302 struct i915_power_domains *power_domains = &i915->power_domains; 6303 struct i915_power_well *power_well; 6304 6305 for_each_power_well(i915, power_well) { 6306 enum intel_display_power_domain domain; 6307 6308 drm_dbg(&i915->drm, "%-25s %d\n", 6309 power_well->desc->name, power_well->count); 6310 6311 for_each_power_domain(domain, power_well->desc->domains) 6312 drm_dbg(&i915->drm, " %-23s %d\n", 6313 intel_display_power_domain_str(domain), 6314 power_domains->domain_use_count[domain]); 6315 } 6316 } 6317 6318 /** 6319 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 6320 * @i915: i915 device instance 6321 * 6322 * Verify if the reference count of each power well matches its HW enabled 6323 * state and the total refcount of the domains it belongs to. This must be 6324 * called after modeset HW state sanitization, which is responsible for 6325 * acquiring reference counts for any power wells in use and disabling the 6326 * ones left on by BIOS but not required by any active output. 6327 */ 6328 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 6329 { 6330 struct i915_power_domains *power_domains = &i915->power_domains; 6331 struct i915_power_well *power_well; 6332 bool dump_domain_info; 6333 6334 mutex_lock(&power_domains->lock); 6335 6336 verify_async_put_domains_state(power_domains); 6337 6338 dump_domain_info = false; 6339 for_each_power_well(i915, power_well) { 6340 enum intel_display_power_domain domain; 6341 int domains_count; 6342 bool enabled; 6343 6344 enabled = power_well->desc->ops->is_enabled(i915, power_well); 6345 if ((power_well->count || power_well->desc->always_on) != 6346 enabled) 6347 drm_err(&i915->drm, 6348 "power well %s state mismatch (refcount %d/enabled %d)", 6349 power_well->desc->name, 6350 power_well->count, enabled); 6351 6352 domains_count = 0; 6353 for_each_power_domain(domain, power_well->desc->domains) 6354 domains_count += power_domains->domain_use_count[domain]; 6355 6356 if (power_well->count != domains_count) { 6357 drm_err(&i915->drm, 6358 "power well %s refcount/domain refcount mismatch " 6359 "(refcount %d/domains refcount %d)\n", 6360 power_well->desc->name, power_well->count, 6361 domains_count); 6362 dump_domain_info = true; 6363 } 6364 } 6365 6366 if (dump_domain_info) { 6367 static bool dumped; 6368 6369 if (!dumped) { 6370 intel_power_domains_dump_info(i915); 6371 dumped = true; 6372 } 6373 } 6374 6375 mutex_unlock(&power_domains->lock); 6376 } 6377 6378 #else 6379 6380 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 6381 { 6382 } 6383 6384 #endif 6385 6386 void intel_display_power_suspend_late(struct drm_i915_private *i915) 6387 { 6388 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 6389 IS_BROXTON(i915)) { 6390 bxt_enable_dc9(i915); 6391 /* Tweaked Wa_14010685332:icp,jsp,mcc */ 6392 if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC) 6393 intel_de_rmw(i915, SOUTH_CHICKEN1, 6394 SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); 6395 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6396 hsw_enable_pc8(i915); 6397 } 6398 } 6399 6400 void intel_display_power_resume_early(struct drm_i915_private *i915) 6401 { 6402 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 6403 IS_BROXTON(i915)) { 6404 gen9_sanitize_dc_state(i915); 6405 bxt_disable_dc9(i915); 6406 /* Tweaked Wa_14010685332:icp,jsp,mcc */ 6407 if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC) 6408 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); 6409 6410 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6411 hsw_disable_pc8(i915); 6412 } 6413 } 6414 6415 void intel_display_power_suspend(struct drm_i915_private *i915) 6416 { 6417 if (DISPLAY_VER(i915) >= 11) { 6418 icl_display_core_uninit(i915); 6419 bxt_enable_dc9(i915); 6420 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6421 bxt_display_core_uninit(i915); 6422 bxt_enable_dc9(i915); 6423 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6424 hsw_enable_pc8(i915); 6425 } 6426 } 6427 6428 void intel_display_power_resume(struct drm_i915_private *i915) 6429 { 6430 if (DISPLAY_VER(i915) >= 11) { 6431 bxt_disable_dc9(i915); 6432 icl_display_core_init(i915, true); 6433 if (intel_dmc_has_payload(i915)) { 6434 if (i915->dmc.allowed_dc_mask & 6435 DC_STATE_EN_UPTO_DC6) 6436 skl_enable_dc6(i915); 6437 else if (i915->dmc.allowed_dc_mask & 6438 DC_STATE_EN_UPTO_DC5) 6439 gen9_enable_dc5(i915); 6440 } 6441 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6442 bxt_disable_dc9(i915); 6443 bxt_display_core_init(i915, true); 6444 if (intel_dmc_has_payload(i915) && 6445 (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 6446 gen9_enable_dc5(i915); 6447 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6448 hsw_disable_pc8(i915); 6449 } 6450 } 6451