1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_irq.h" 8 #include "intel_cdclk.h" 9 #include "intel_combo_phy.h" 10 #include "intel_crt.h" 11 #include "intel_de.h" 12 #include "intel_display_power.h" 13 #include "intel_display_types.h" 14 #include "intel_dmc.h" 15 #include "intel_dpio_phy.h" 16 #include "intel_dpll.h" 17 #include "intel_hotplug.h" 18 #include "intel_pcode.h" 19 #include "intel_pm.h" 20 #include "intel_pps.h" 21 #include "intel_snps_phy.h" 22 #include "intel_tc.h" 23 #include "intel_vga.h" 24 #include "vlv_sideband.h" 25 26 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 27 enum i915_power_well_id power_well_id); 28 29 const char * 30 intel_display_power_domain_str(enum intel_display_power_domain domain) 31 { 32 switch (domain) { 33 case POWER_DOMAIN_DISPLAY_CORE: 34 return "DISPLAY_CORE"; 35 case POWER_DOMAIN_PIPE_A: 36 return "PIPE_A"; 37 case POWER_DOMAIN_PIPE_B: 38 return "PIPE_B"; 39 case POWER_DOMAIN_PIPE_C: 40 return "PIPE_C"; 41 case POWER_DOMAIN_PIPE_D: 42 return "PIPE_D"; 43 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 44 return "PIPE_A_PANEL_FITTER"; 45 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 46 return "PIPE_B_PANEL_FITTER"; 47 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 48 return "PIPE_C_PANEL_FITTER"; 49 case POWER_DOMAIN_PIPE_D_PANEL_FITTER: 50 return "PIPE_D_PANEL_FITTER"; 51 case POWER_DOMAIN_TRANSCODER_A: 52 return "TRANSCODER_A"; 53 case POWER_DOMAIN_TRANSCODER_B: 54 return "TRANSCODER_B"; 55 case POWER_DOMAIN_TRANSCODER_C: 56 return "TRANSCODER_C"; 57 case POWER_DOMAIN_TRANSCODER_D: 58 return "TRANSCODER_D"; 59 case POWER_DOMAIN_TRANSCODER_EDP: 60 return "TRANSCODER_EDP"; 61 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 62 return "TRANSCODER_VDSC_PW2"; 63 case POWER_DOMAIN_TRANSCODER_DSI_A: 64 return "TRANSCODER_DSI_A"; 65 case POWER_DOMAIN_TRANSCODER_DSI_C: 66 return "TRANSCODER_DSI_C"; 67 case POWER_DOMAIN_PORT_DDI_A_LANES: 68 return "PORT_DDI_A_LANES"; 69 case POWER_DOMAIN_PORT_DDI_B_LANES: 70 return "PORT_DDI_B_LANES"; 71 case POWER_DOMAIN_PORT_DDI_C_LANES: 72 return "PORT_DDI_C_LANES"; 73 case POWER_DOMAIN_PORT_DDI_D_LANES: 74 return "PORT_DDI_D_LANES"; 75 case POWER_DOMAIN_PORT_DDI_E_LANES: 76 return "PORT_DDI_E_LANES"; 77 case POWER_DOMAIN_PORT_DDI_F_LANES: 78 return "PORT_DDI_F_LANES"; 79 case POWER_DOMAIN_PORT_DDI_G_LANES: 80 return "PORT_DDI_G_LANES"; 81 case POWER_DOMAIN_PORT_DDI_H_LANES: 82 return "PORT_DDI_H_LANES"; 83 case POWER_DOMAIN_PORT_DDI_I_LANES: 84 return "PORT_DDI_I_LANES"; 85 case POWER_DOMAIN_PORT_DDI_A_IO: 86 return "PORT_DDI_A_IO"; 87 case POWER_DOMAIN_PORT_DDI_B_IO: 88 return "PORT_DDI_B_IO"; 89 case POWER_DOMAIN_PORT_DDI_C_IO: 90 return "PORT_DDI_C_IO"; 91 case POWER_DOMAIN_PORT_DDI_D_IO: 92 return "PORT_DDI_D_IO"; 93 case POWER_DOMAIN_PORT_DDI_E_IO: 94 return "PORT_DDI_E_IO"; 95 case POWER_DOMAIN_PORT_DDI_F_IO: 96 return "PORT_DDI_F_IO"; 97 case POWER_DOMAIN_PORT_DDI_G_IO: 98 return "PORT_DDI_G_IO"; 99 case POWER_DOMAIN_PORT_DDI_H_IO: 100 return "PORT_DDI_H_IO"; 101 case POWER_DOMAIN_PORT_DDI_I_IO: 102 return "PORT_DDI_I_IO"; 103 case POWER_DOMAIN_PORT_DSI: 104 return "PORT_DSI"; 105 case POWER_DOMAIN_PORT_CRT: 106 return "PORT_CRT"; 107 case POWER_DOMAIN_PORT_OTHER: 108 return "PORT_OTHER"; 109 case POWER_DOMAIN_VGA: 110 return "VGA"; 111 case POWER_DOMAIN_AUDIO_MMIO: 112 return "AUDIO_MMIO"; 113 case POWER_DOMAIN_AUDIO_PLAYBACK: 114 return "AUDIO_PLAYBACK"; 115 case POWER_DOMAIN_AUX_A: 116 return "AUX_A"; 117 case POWER_DOMAIN_AUX_B: 118 return "AUX_B"; 119 case POWER_DOMAIN_AUX_C: 120 return "AUX_C"; 121 case POWER_DOMAIN_AUX_D: 122 return "AUX_D"; 123 case POWER_DOMAIN_AUX_E: 124 return "AUX_E"; 125 case POWER_DOMAIN_AUX_F: 126 return "AUX_F"; 127 case POWER_DOMAIN_AUX_G: 128 return "AUX_G"; 129 case POWER_DOMAIN_AUX_H: 130 return "AUX_H"; 131 case POWER_DOMAIN_AUX_I: 132 return "AUX_I"; 133 case POWER_DOMAIN_AUX_IO_A: 134 return "AUX_IO_A"; 135 case POWER_DOMAIN_AUX_C_TBT: 136 return "AUX_C_TBT"; 137 case POWER_DOMAIN_AUX_D_TBT: 138 return "AUX_D_TBT"; 139 case POWER_DOMAIN_AUX_E_TBT: 140 return "AUX_E_TBT"; 141 case POWER_DOMAIN_AUX_F_TBT: 142 return "AUX_F_TBT"; 143 case POWER_DOMAIN_AUX_G_TBT: 144 return "AUX_G_TBT"; 145 case POWER_DOMAIN_AUX_H_TBT: 146 return "AUX_H_TBT"; 147 case POWER_DOMAIN_AUX_I_TBT: 148 return "AUX_I_TBT"; 149 case POWER_DOMAIN_GMBUS: 150 return "GMBUS"; 151 case POWER_DOMAIN_INIT: 152 return "INIT"; 153 case POWER_DOMAIN_MODESET: 154 return "MODESET"; 155 case POWER_DOMAIN_GT_IRQ: 156 return "GT_IRQ"; 157 case POWER_DOMAIN_DPLL_DC_OFF: 158 return "DPLL_DC_OFF"; 159 case POWER_DOMAIN_TC_COLD_OFF: 160 return "TC_COLD_OFF"; 161 default: 162 MISSING_CASE(domain); 163 return "?"; 164 } 165 } 166 167 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 168 struct i915_power_well *power_well) 169 { 170 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name); 171 power_well->desc->ops->enable(dev_priv, power_well); 172 power_well->hw_enabled = true; 173 } 174 175 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 176 struct i915_power_well *power_well) 177 { 178 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name); 179 power_well->hw_enabled = false; 180 power_well->desc->ops->disable(dev_priv, power_well); 181 } 182 183 static void intel_power_well_get(struct drm_i915_private *dev_priv, 184 struct i915_power_well *power_well) 185 { 186 if (!power_well->count++) 187 intel_power_well_enable(dev_priv, power_well); 188 } 189 190 static void intel_power_well_put(struct drm_i915_private *dev_priv, 191 struct i915_power_well *power_well) 192 { 193 drm_WARN(&dev_priv->drm, !power_well->count, 194 "Use count on power well %s is already zero", 195 power_well->desc->name); 196 197 if (!--power_well->count) 198 intel_power_well_disable(dev_priv, power_well); 199 } 200 201 /** 202 * __intel_display_power_is_enabled - unlocked check for a power domain 203 * @dev_priv: i915 device instance 204 * @domain: power domain to check 205 * 206 * This is the unlocked version of intel_display_power_is_enabled() and should 207 * only be used from error capture and recovery code where deadlocks are 208 * possible. 209 * 210 * Returns: 211 * True when the power domain is enabled, false otherwise. 212 */ 213 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 214 enum intel_display_power_domain domain) 215 { 216 struct i915_power_well *power_well; 217 bool is_enabled; 218 219 if (dev_priv->runtime_pm.suspended) 220 return false; 221 222 is_enabled = true; 223 224 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { 225 if (power_well->desc->always_on) 226 continue; 227 228 if (!power_well->hw_enabled) { 229 is_enabled = false; 230 break; 231 } 232 } 233 234 return is_enabled; 235 } 236 237 /** 238 * intel_display_power_is_enabled - check for a power domain 239 * @dev_priv: i915 device instance 240 * @domain: power domain to check 241 * 242 * This function can be used to check the hw power domain state. It is mostly 243 * used in hardware state readout functions. Everywhere else code should rely 244 * upon explicit power domain reference counting to ensure that the hardware 245 * block is powered up before accessing it. 246 * 247 * Callers must hold the relevant modesetting locks to ensure that concurrent 248 * threads can't disable the power well while the caller tries to read a few 249 * registers. 250 * 251 * Returns: 252 * True when the power domain is enabled, false otherwise. 253 */ 254 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 255 enum intel_display_power_domain domain) 256 { 257 struct i915_power_domains *power_domains; 258 bool ret; 259 260 power_domains = &dev_priv->power_domains; 261 262 mutex_lock(&power_domains->lock); 263 ret = __intel_display_power_is_enabled(dev_priv, domain); 264 mutex_unlock(&power_domains->lock); 265 266 return ret; 267 } 268 269 /* 270 * Starting with Haswell, we have a "Power Down Well" that can be turned off 271 * when not needed anymore. We have 4 registers that can request the power well 272 * to be enabled, and it will only be disabled if none of the registers is 273 * requesting it to be enabled. 274 */ 275 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, 276 u8 irq_pipe_mask, bool has_vga) 277 { 278 if (has_vga) 279 intel_vga_reset_io_mem(dev_priv); 280 281 if (irq_pipe_mask) 282 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); 283 } 284 285 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, 286 u8 irq_pipe_mask) 287 { 288 if (irq_pipe_mask) 289 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); 290 } 291 292 #define ICL_AUX_PW_TO_CH(pw_idx) \ 293 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 294 295 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 296 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 297 298 static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) 299 { 300 int pw_idx = power_well->desc->hsw.idx; 301 302 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 303 ICL_AUX_PW_TO_CH(pw_idx); 304 } 305 306 static struct intel_digital_port * 307 aux_ch_to_digital_port(struct drm_i915_private *dev_priv, 308 enum aux_ch aux_ch) 309 { 310 struct intel_digital_port *dig_port = NULL; 311 struct intel_encoder *encoder; 312 313 for_each_intel_encoder(&dev_priv->drm, encoder) { 314 /* We'll check the MST primary port */ 315 if (encoder->type == INTEL_OUTPUT_DP_MST) 316 continue; 317 318 dig_port = enc_to_dig_port(encoder); 319 if (!dig_port) 320 continue; 321 322 if (dig_port->aux_ch != aux_ch) { 323 dig_port = NULL; 324 continue; 325 } 326 327 break; 328 } 329 330 return dig_port; 331 } 332 333 static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915, 334 const struct i915_power_well *power_well) 335 { 336 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 337 struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); 338 339 return intel_port_to_phy(i915, dig_port->base.port); 340 } 341 342 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, 343 struct i915_power_well *power_well, 344 bool timeout_expected) 345 { 346 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 347 int pw_idx = power_well->desc->hsw.idx; 348 int enable_delay = power_well->desc->hsw.fixed_enable_delay; 349 350 /* 351 * For some power wells we're not supposed to watch the status bit for 352 * an ack, but rather just wait a fixed amount of time and then 353 * proceed. This is only used on DG2. 354 */ 355 if (IS_DG2(dev_priv) && enable_delay) { 356 usleep_range(enable_delay, 2 * enable_delay); 357 return; 358 } 359 360 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 361 if (intel_de_wait_for_set(dev_priv, regs->driver, 362 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { 363 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", 364 power_well->desc->name); 365 366 drm_WARN_ON(&dev_priv->drm, !timeout_expected); 367 368 } 369 } 370 371 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, 372 const struct i915_power_well_regs *regs, 373 int pw_idx) 374 { 375 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 376 u32 ret; 377 378 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; 379 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; 380 if (regs->kvmr.reg) 381 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; 382 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; 383 384 return ret; 385 } 386 387 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, 388 struct i915_power_well *power_well) 389 { 390 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 391 int pw_idx = power_well->desc->hsw.idx; 392 bool disabled; 393 u32 reqs; 394 395 /* 396 * Bspec doesn't require waiting for PWs to get disabled, but still do 397 * this for paranoia. The known cases where a PW will be forced on: 398 * - a KVMR request on any power well via the KVMR request register 399 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 400 * DEBUG request registers 401 * Skip the wait in case any of the request bits are set and print a 402 * diagnostic message. 403 */ 404 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & 405 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 406 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); 407 if (disabled) 408 return; 409 410 drm_dbg_kms(&dev_priv->drm, 411 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 412 power_well->desc->name, 413 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 414 } 415 416 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, 417 enum skl_power_gate pg) 418 { 419 /* Timeout 5us for PG#0, for other PGs 1us */ 420 drm_WARN_ON(&dev_priv->drm, 421 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, 422 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 423 } 424 425 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 426 struct i915_power_well *power_well) 427 { 428 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 429 int pw_idx = power_well->desc->hsw.idx; 430 u32 val; 431 432 if (power_well->desc->hsw.has_fuses) { 433 enum skl_power_gate pg; 434 435 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 436 SKL_PW_CTL_IDX_TO_PG(pw_idx); 437 /* 438 * For PW1 we have to wait both for the PW0/PG0 fuse state 439 * before enabling the power well and PW1/PG1's own fuse 440 * state after the enabling. For all other power wells with 441 * fuses we only have to wait for that PW/PG's fuse state 442 * after the enabling. 443 */ 444 if (pg == SKL_PG1) 445 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 446 } 447 448 val = intel_de_read(dev_priv, regs->driver); 449 intel_de_write(dev_priv, regs->driver, 450 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 451 452 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 453 454 if (power_well->desc->hsw.has_fuses) { 455 enum skl_power_gate pg; 456 457 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 458 SKL_PW_CTL_IDX_TO_PG(pw_idx); 459 gen9_wait_for_power_well_fuses(dev_priv, pg); 460 } 461 462 hsw_power_well_post_enable(dev_priv, 463 power_well->desc->hsw.irq_pipe_mask, 464 power_well->desc->hsw.has_vga); 465 } 466 467 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 468 struct i915_power_well *power_well) 469 { 470 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 471 int pw_idx = power_well->desc->hsw.idx; 472 u32 val; 473 474 hsw_power_well_pre_disable(dev_priv, 475 power_well->desc->hsw.irq_pipe_mask); 476 477 val = intel_de_read(dev_priv, regs->driver); 478 intel_de_write(dev_priv, regs->driver, 479 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 480 hsw_wait_for_power_well_disable(dev_priv, power_well); 481 } 482 483 static void 484 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 485 struct i915_power_well *power_well) 486 { 487 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 488 int pw_idx = power_well->desc->hsw.idx; 489 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 490 u32 val; 491 492 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 493 494 val = intel_de_read(dev_priv, regs->driver); 495 intel_de_write(dev_priv, regs->driver, 496 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 497 498 if (DISPLAY_VER(dev_priv) < 12) { 499 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 500 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 501 val | ICL_LANE_ENABLE_AUX); 502 } 503 504 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 505 506 /* Display WA #1178: icl */ 507 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && 508 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { 509 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); 510 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; 511 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); 512 } 513 } 514 515 static void 516 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 517 struct i915_power_well *power_well) 518 { 519 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 520 int pw_idx = power_well->desc->hsw.idx; 521 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 522 u32 val; 523 524 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 525 526 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 527 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 528 val & ~ICL_LANE_ENABLE_AUX); 529 530 val = intel_de_read(dev_priv, regs->driver); 531 intel_de_write(dev_priv, regs->driver, 532 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 533 534 hsw_wait_for_power_well_disable(dev_priv, power_well); 535 } 536 537 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 538 539 static u64 async_put_domains_mask(struct i915_power_domains *power_domains); 540 541 static int power_well_async_ref_count(struct drm_i915_private *dev_priv, 542 struct i915_power_well *power_well) 543 { 544 int refs = hweight64(power_well->desc->domains & 545 async_put_domains_mask(&dev_priv->power_domains)); 546 547 drm_WARN_ON(&dev_priv->drm, refs > power_well->count); 548 549 return refs; 550 } 551 552 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 553 struct i915_power_well *power_well, 554 struct intel_digital_port *dig_port) 555 { 556 /* Bypass the check if all references are released asynchronously */ 557 if (power_well_async_ref_count(dev_priv, power_well) == 558 power_well->count) 559 return; 560 561 if (drm_WARN_ON(&dev_priv->drm, !dig_port)) 562 return; 563 564 if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) 565 return; 566 567 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); 568 } 569 570 #else 571 572 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 573 struct i915_power_well *power_well, 574 struct intel_digital_port *dig_port) 575 { 576 } 577 578 #endif 579 580 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 581 582 static void icl_tc_cold_exit(struct drm_i915_private *i915) 583 { 584 int ret, tries = 0; 585 586 while (1) { 587 ret = sandybridge_pcode_write_timeout(i915, 588 ICL_PCODE_EXIT_TCCOLD, 589 0, 250, 1); 590 if (ret != -EAGAIN || ++tries == 3) 591 break; 592 msleep(1); 593 } 594 595 /* Spec states that TC cold exit can take up to 1ms to complete */ 596 if (!ret) 597 msleep(1); 598 599 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ 600 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : 601 "succeeded"); 602 } 603 604 static void 605 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 606 struct i915_power_well *power_well) 607 { 608 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 609 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 610 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 611 bool is_tbt = power_well->desc->hsw.is_tc_tbt; 612 bool timeout_expected; 613 u32 val; 614 615 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 616 617 val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); 618 val &= ~DP_AUX_CH_CTL_TBT_IO; 619 if (is_tbt) 620 val |= DP_AUX_CH_CTL_TBT_IO; 621 intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); 622 623 val = intel_de_read(dev_priv, regs->driver); 624 intel_de_write(dev_priv, regs->driver, 625 val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx)); 626 627 /* 628 * An AUX timeout is expected if the TBT DP tunnel is down, 629 * or need to enable AUX on a legacy TypeC port as part of the TC-cold 630 * exit sequence. 631 */ 632 timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); 633 if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) 634 icl_tc_cold_exit(dev_priv); 635 636 hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); 637 638 if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) { 639 enum tc_port tc_port; 640 641 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); 642 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 643 HIP_INDEX_VAL(tc_port, 0x2)); 644 645 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), 646 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 647 drm_warn(&dev_priv->drm, 648 "Timeout waiting TC uC health\n"); 649 } 650 } 651 652 static void 653 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 654 struct i915_power_well *power_well) 655 { 656 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 657 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 658 659 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 660 661 hsw_power_well_disable(dev_priv, power_well); 662 } 663 664 static void 665 icl_aux_power_well_enable(struct drm_i915_private *dev_priv, 666 struct i915_power_well *power_well) 667 { 668 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 669 670 if (intel_phy_is_tc(dev_priv, phy)) 671 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); 672 else if (IS_ICELAKE(dev_priv)) 673 return icl_combo_phy_aux_power_well_enable(dev_priv, 674 power_well); 675 else 676 return hsw_power_well_enable(dev_priv, power_well); 677 } 678 679 static void 680 icl_aux_power_well_disable(struct drm_i915_private *dev_priv, 681 struct i915_power_well *power_well) 682 { 683 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 684 685 if (intel_phy_is_tc(dev_priv, phy)) 686 return icl_tc_phy_aux_power_well_disable(dev_priv, power_well); 687 else if (IS_ICELAKE(dev_priv)) 688 return icl_combo_phy_aux_power_well_disable(dev_priv, 689 power_well); 690 else 691 return hsw_power_well_disable(dev_priv, power_well); 692 } 693 694 /* 695 * We should only use the power well if we explicitly asked the hardware to 696 * enable it, so check if it's enabled and also check if we've requested it to 697 * be enabled. 698 */ 699 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 700 struct i915_power_well *power_well) 701 { 702 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 703 enum i915_power_well_id id = power_well->desc->id; 704 int pw_idx = power_well->desc->hsw.idx; 705 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 706 HSW_PWR_WELL_CTL_STATE(pw_idx); 707 u32 val; 708 709 val = intel_de_read(dev_priv, regs->driver); 710 711 /* 712 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 713 * and the MISC_IO PW will be not restored, so check instead for the 714 * BIOS's own request bits, which are forced-on for these power wells 715 * when exiting DC5/6. 716 */ 717 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) && 718 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 719 val |= intel_de_read(dev_priv, regs->bios); 720 721 return (val & mask) == mask; 722 } 723 724 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 725 { 726 drm_WARN_ONCE(&dev_priv->drm, 727 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), 728 "DC9 already programmed to be enabled.\n"); 729 drm_WARN_ONCE(&dev_priv->drm, 730 intel_de_read(dev_priv, DC_STATE_EN) & 731 DC_STATE_EN_UPTO_DC5, 732 "DC5 still not disabled to enable DC9.\n"); 733 drm_WARN_ONCE(&dev_priv->drm, 734 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & 735 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 736 "Power well 2 on.\n"); 737 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 738 "Interrupts not disabled yet.\n"); 739 740 /* 741 * TODO: check for the following to verify the conditions to enter DC9 742 * state are satisfied: 743 * 1] Check relevant display engine registers to verify if mode set 744 * disable sequence was followed. 745 * 2] Check if display uninitialize sequence is initialized. 746 */ 747 } 748 749 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 750 { 751 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 752 "Interrupts not disabled yet.\n"); 753 drm_WARN_ONCE(&dev_priv->drm, 754 intel_de_read(dev_priv, DC_STATE_EN) & 755 DC_STATE_EN_UPTO_DC5, 756 "DC5 still not disabled.\n"); 757 758 /* 759 * TODO: check for the following to verify DC9 state was indeed 760 * entered before programming to disable it: 761 * 1] Check relevant display engine registers to verify if mode 762 * set disable sequence was followed. 763 * 2] Check if display uninitialize sequence is initialized. 764 */ 765 } 766 767 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 768 u32 state) 769 { 770 int rewrites = 0; 771 int rereads = 0; 772 u32 v; 773 774 intel_de_write(dev_priv, DC_STATE_EN, state); 775 776 /* It has been observed that disabling the dc6 state sometimes 777 * doesn't stick and dmc keeps returning old value. Make sure 778 * the write really sticks enough times and also force rewrite until 779 * we are confident that state is exactly what we want. 780 */ 781 do { 782 v = intel_de_read(dev_priv, DC_STATE_EN); 783 784 if (v != state) { 785 intel_de_write(dev_priv, DC_STATE_EN, state); 786 rewrites++; 787 rereads = 0; 788 } else if (rereads++ > 5) { 789 break; 790 } 791 792 } while (rewrites < 100); 793 794 if (v != state) 795 drm_err(&dev_priv->drm, 796 "Writing dc state to 0x%x failed, now 0x%x\n", 797 state, v); 798 799 /* Most of the times we need one retry, avoid spam */ 800 if (rewrites > 1) 801 drm_dbg_kms(&dev_priv->drm, 802 "Rewrote dc state to 0x%x %d times\n", 803 state, rewrites); 804 } 805 806 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 807 { 808 u32 mask; 809 810 mask = DC_STATE_EN_UPTO_DC5; 811 812 if (DISPLAY_VER(dev_priv) >= 12) 813 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 814 | DC_STATE_EN_DC9; 815 else if (DISPLAY_VER(dev_priv) == 11) 816 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 817 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 818 mask |= DC_STATE_EN_DC9; 819 else 820 mask |= DC_STATE_EN_UPTO_DC6; 821 822 return mask; 823 } 824 825 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 826 { 827 u32 val; 828 829 if (!HAS_DISPLAY(dev_priv)) 830 return; 831 832 val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); 833 834 drm_dbg_kms(&dev_priv->drm, 835 "Resetting DC state tracking from %02x to %02x\n", 836 dev_priv->dmc.dc_state, val); 837 dev_priv->dmc.dc_state = val; 838 } 839 840 /** 841 * gen9_set_dc_state - set target display C power state 842 * @dev_priv: i915 device instance 843 * @state: target DC power state 844 * - DC_STATE_DISABLE 845 * - DC_STATE_EN_UPTO_DC5 846 * - DC_STATE_EN_UPTO_DC6 847 * - DC_STATE_EN_DC9 848 * 849 * Signal to DMC firmware/HW the target DC power state passed in @state. 850 * DMC/HW can turn off individual display clocks and power rails when entering 851 * a deeper DC power state (higher in number) and turns these back when exiting 852 * that state to a shallower power state (lower in number). The HW will decide 853 * when to actually enter a given state on an on-demand basis, for instance 854 * depending on the active state of display pipes. The state of display 855 * registers backed by affected power rails are saved/restored as needed. 856 * 857 * Based on the above enabling a deeper DC power state is asynchronous wrt. 858 * enabling it. Disabling a deeper power state is synchronous: for instance 859 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 860 * back on and register state is restored. This is guaranteed by the MMIO write 861 * to DC_STATE_EN blocking until the state is restored. 862 */ 863 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) 864 { 865 u32 val; 866 u32 mask; 867 868 if (!HAS_DISPLAY(dev_priv)) 869 return; 870 871 if (drm_WARN_ON_ONCE(&dev_priv->drm, 872 state & ~dev_priv->dmc.allowed_dc_mask)) 873 state &= dev_priv->dmc.allowed_dc_mask; 874 875 val = intel_de_read(dev_priv, DC_STATE_EN); 876 mask = gen9_dc_mask(dev_priv); 877 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", 878 val & mask, state); 879 880 /* Check if DMC is ignoring our DC state requests */ 881 if ((val & mask) != dev_priv->dmc.dc_state) 882 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", 883 dev_priv->dmc.dc_state, val & mask); 884 885 val &= ~mask; 886 val |= state; 887 888 gen9_write_dc_state(dev_priv, val); 889 890 dev_priv->dmc.dc_state = val & mask; 891 } 892 893 static u32 894 sanitize_target_dc_state(struct drm_i915_private *dev_priv, 895 u32 target_dc_state) 896 { 897 u32 states[] = { 898 DC_STATE_EN_UPTO_DC6, 899 DC_STATE_EN_UPTO_DC5, 900 DC_STATE_EN_DC3CO, 901 DC_STATE_DISABLE, 902 }; 903 int i; 904 905 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 906 if (target_dc_state != states[i]) 907 continue; 908 909 if (dev_priv->dmc.allowed_dc_mask & target_dc_state) 910 break; 911 912 target_dc_state = states[i + 1]; 913 } 914 915 return target_dc_state; 916 } 917 918 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) 919 { 920 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); 921 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); 922 } 923 924 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) 925 { 926 u32 val; 927 928 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); 929 val = intel_de_read(dev_priv, DC_STATE_EN); 930 val &= ~DC_STATE_DC3CO_STATUS; 931 intel_de_write(dev_priv, DC_STATE_EN, val); 932 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 933 /* 934 * Delay of 200us DC3CO Exit time B.Spec 49196 935 */ 936 usleep_range(200, 210); 937 } 938 939 static void bxt_enable_dc9(struct drm_i915_private *dev_priv) 940 { 941 assert_can_enable_dc9(dev_priv); 942 943 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); 944 /* 945 * Power sequencer reset is not needed on 946 * platforms with South Display Engine on PCH, 947 * because PPS registers are always on. 948 */ 949 if (!HAS_PCH_SPLIT(dev_priv)) 950 intel_pps_reset_all(dev_priv); 951 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 952 } 953 954 static void bxt_disable_dc9(struct drm_i915_private *dev_priv) 955 { 956 assert_can_disable_dc9(dev_priv); 957 958 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); 959 960 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 961 962 intel_pps_unlock_regs_wa(dev_priv); 963 } 964 965 static void assert_dmc_loaded(struct drm_i915_private *dev_priv) 966 { 967 drm_WARN_ONCE(&dev_priv->drm, 968 !intel_de_read(dev_priv, 969 DMC_PROGRAM(dev_priv->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), 970 "DMC program storage start is NULL\n"); 971 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE), 972 "DMC SSP Base Not fine\n"); 973 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL), 974 "DMC HTP Not fine\n"); 975 } 976 977 static struct i915_power_well * 978 lookup_power_well(struct drm_i915_private *dev_priv, 979 enum i915_power_well_id power_well_id) 980 { 981 struct i915_power_well *power_well; 982 983 for_each_power_well(dev_priv, power_well) 984 if (power_well->desc->id == power_well_id) 985 return power_well; 986 987 /* 988 * It's not feasible to add error checking code to the callers since 989 * this condition really shouldn't happen and it doesn't even make sense 990 * to abort things like display initialization sequences. Just return 991 * the first power well and hope the WARN gets reported so we can fix 992 * our driver. 993 */ 994 drm_WARN(&dev_priv->drm, 1, 995 "Power well %d not defined for this platform\n", 996 power_well_id); 997 return &dev_priv->power_domains.power_wells[0]; 998 } 999 1000 /** 1001 * intel_display_power_set_target_dc_state - Set target dc state. 1002 * @dev_priv: i915 device 1003 * @state: state which needs to be set as target_dc_state. 1004 * 1005 * This function set the "DC off" power well target_dc_state, 1006 * based upon this target_dc_stste, "DC off" power well will 1007 * enable desired DC state. 1008 */ 1009 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, 1010 u32 state) 1011 { 1012 struct i915_power_well *power_well; 1013 bool dc_off_enabled; 1014 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1015 1016 mutex_lock(&power_domains->lock); 1017 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); 1018 1019 if (drm_WARN_ON(&dev_priv->drm, !power_well)) 1020 goto unlock; 1021 1022 state = sanitize_target_dc_state(dev_priv, state); 1023 1024 if (state == dev_priv->dmc.target_dc_state) 1025 goto unlock; 1026 1027 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv, 1028 power_well); 1029 /* 1030 * If DC off power well is disabled, need to enable and disable the 1031 * DC off power well to effect target DC state. 1032 */ 1033 if (!dc_off_enabled) 1034 power_well->desc->ops->enable(dev_priv, power_well); 1035 1036 dev_priv->dmc.target_dc_state = state; 1037 1038 if (!dc_off_enabled) 1039 power_well->desc->ops->disable(dev_priv, power_well); 1040 1041 unlock: 1042 mutex_unlock(&power_domains->lock); 1043 } 1044 1045 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 1046 { 1047 enum i915_power_well_id high_pg; 1048 1049 /* Power wells at this level and above must be disabled for DC5 entry */ 1050 if (DISPLAY_VER(dev_priv) == 12) 1051 high_pg = ICL_DISP_PW_3; 1052 else 1053 high_pg = SKL_DISP_PW_2; 1054 1055 drm_WARN_ONCE(&dev_priv->drm, 1056 intel_display_power_well_is_enabled(dev_priv, high_pg), 1057 "Power wells above platform's DC5 limit still enabled.\n"); 1058 1059 drm_WARN_ONCE(&dev_priv->drm, 1060 (intel_de_read(dev_priv, DC_STATE_EN) & 1061 DC_STATE_EN_UPTO_DC5), 1062 "DC5 already programmed to be enabled.\n"); 1063 assert_rpm_wakelock_held(&dev_priv->runtime_pm); 1064 1065 assert_dmc_loaded(dev_priv); 1066 } 1067 1068 static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 1069 { 1070 assert_can_enable_dc5(dev_priv); 1071 1072 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); 1073 1074 /* Wa Display #1183: skl,kbl,cfl */ 1075 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 1076 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1077 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1078 1079 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 1080 } 1081 1082 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 1083 { 1084 drm_WARN_ONCE(&dev_priv->drm, 1085 intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 1086 "Backlight is not disabled.\n"); 1087 drm_WARN_ONCE(&dev_priv->drm, 1088 (intel_de_read(dev_priv, DC_STATE_EN) & 1089 DC_STATE_EN_UPTO_DC6), 1090 "DC6 already programmed to be enabled.\n"); 1091 1092 assert_dmc_loaded(dev_priv); 1093 } 1094 1095 static void skl_enable_dc6(struct drm_i915_private *dev_priv) 1096 { 1097 assert_can_enable_dc6(dev_priv); 1098 1099 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); 1100 1101 /* Wa Display #1183: skl,kbl,cfl */ 1102 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 1103 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1104 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1105 1106 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 1107 } 1108 1109 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 1110 struct i915_power_well *power_well) 1111 { 1112 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 1113 int pw_idx = power_well->desc->hsw.idx; 1114 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 1115 u32 bios_req = intel_de_read(dev_priv, regs->bios); 1116 1117 /* Take over the request bit if set by BIOS. */ 1118 if (bios_req & mask) { 1119 u32 drv_req = intel_de_read(dev_priv, regs->driver); 1120 1121 if (!(drv_req & mask)) 1122 intel_de_write(dev_priv, regs->driver, drv_req | mask); 1123 intel_de_write(dev_priv, regs->bios, bios_req & ~mask); 1124 } 1125 } 1126 1127 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1128 struct i915_power_well *power_well) 1129 { 1130 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); 1131 } 1132 1133 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1134 struct i915_power_well *power_well) 1135 { 1136 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); 1137 } 1138 1139 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 1140 struct i915_power_well *power_well) 1141 { 1142 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); 1143 } 1144 1145 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 1146 { 1147 struct i915_power_well *power_well; 1148 1149 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); 1150 if (power_well->count > 0) 1151 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1152 1153 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1154 if (power_well->count > 0) 1155 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1156 1157 if (IS_GEMINILAKE(dev_priv)) { 1158 power_well = lookup_power_well(dev_priv, 1159 GLK_DISP_PW_DPIO_CMN_C); 1160 if (power_well->count > 0) 1161 bxt_ddi_phy_verify_state(dev_priv, 1162 power_well->desc->bxt.phy); 1163 } 1164 } 1165 1166 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 1167 struct i915_power_well *power_well) 1168 { 1169 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && 1170 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); 1171 } 1172 1173 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 1174 { 1175 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); 1176 u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices; 1177 1178 drm_WARN(&dev_priv->drm, 1179 hw_enabled_dbuf_slices != enabled_dbuf_slices, 1180 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", 1181 hw_enabled_dbuf_slices, 1182 enabled_dbuf_slices); 1183 } 1184 1185 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) 1186 { 1187 struct intel_cdclk_config cdclk_config = {}; 1188 1189 if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) { 1190 tgl_disable_dc3co(dev_priv); 1191 return; 1192 } 1193 1194 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1195 1196 if (!HAS_DISPLAY(dev_priv)) 1197 return; 1198 1199 intel_cdclk_get_cdclk(dev_priv, &cdclk_config); 1200 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 1201 drm_WARN_ON(&dev_priv->drm, 1202 intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, 1203 &cdclk_config)); 1204 1205 gen9_assert_dbuf_enabled(dev_priv); 1206 1207 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 1208 bxt_verify_ddi_phy_power_wells(dev_priv); 1209 1210 if (DISPLAY_VER(dev_priv) >= 11) 1211 /* 1212 * DMC retains HW context only for port A, the other combo 1213 * PHY's HW context for port B is lost after DC transitions, 1214 * so we need to restore it manually. 1215 */ 1216 intel_combo_phy_init(dev_priv); 1217 } 1218 1219 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 1220 struct i915_power_well *power_well) 1221 { 1222 gen9_disable_dc_states(dev_priv); 1223 } 1224 1225 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 1226 struct i915_power_well *power_well) 1227 { 1228 if (!intel_dmc_has_payload(dev_priv)) 1229 return; 1230 1231 switch (dev_priv->dmc.target_dc_state) { 1232 case DC_STATE_EN_DC3CO: 1233 tgl_enable_dc3co(dev_priv); 1234 break; 1235 case DC_STATE_EN_UPTO_DC6: 1236 skl_enable_dc6(dev_priv); 1237 break; 1238 case DC_STATE_EN_UPTO_DC5: 1239 gen9_enable_dc5(dev_priv); 1240 break; 1241 } 1242 } 1243 1244 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, 1245 struct i915_power_well *power_well) 1246 { 1247 } 1248 1249 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 1250 struct i915_power_well *power_well) 1251 { 1252 } 1253 1254 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 1255 struct i915_power_well *power_well) 1256 { 1257 return true; 1258 } 1259 1260 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 1261 struct i915_power_well *power_well) 1262 { 1263 if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) 1264 i830_enable_pipe(dev_priv, PIPE_A); 1265 if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) 1266 i830_enable_pipe(dev_priv, PIPE_B); 1267 } 1268 1269 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, 1270 struct i915_power_well *power_well) 1271 { 1272 i830_disable_pipe(dev_priv, PIPE_B); 1273 i830_disable_pipe(dev_priv, PIPE_A); 1274 } 1275 1276 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 1277 struct i915_power_well *power_well) 1278 { 1279 return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && 1280 intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; 1281 } 1282 1283 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, 1284 struct i915_power_well *power_well) 1285 { 1286 if (power_well->count > 0) 1287 i830_pipes_power_well_enable(dev_priv, power_well); 1288 else 1289 i830_pipes_power_well_disable(dev_priv, power_well); 1290 } 1291 1292 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 1293 struct i915_power_well *power_well, bool enable) 1294 { 1295 int pw_idx = power_well->desc->vlv.idx; 1296 u32 mask; 1297 u32 state; 1298 u32 ctrl; 1299 1300 mask = PUNIT_PWRGT_MASK(pw_idx); 1301 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1302 PUNIT_PWRGT_PWR_GATE(pw_idx); 1303 1304 vlv_punit_get(dev_priv); 1305 1306 #define COND \ 1307 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1308 1309 if (COND) 1310 goto out; 1311 1312 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 1313 ctrl &= ~mask; 1314 ctrl |= state; 1315 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1316 1317 if (wait_for(COND, 100)) 1318 drm_err(&dev_priv->drm, 1319 "timeout setting power well state %08x (%08x)\n", 1320 state, 1321 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1322 1323 #undef COND 1324 1325 out: 1326 vlv_punit_put(dev_priv); 1327 } 1328 1329 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1330 struct i915_power_well *power_well) 1331 { 1332 vlv_set_power_well(dev_priv, power_well, true); 1333 } 1334 1335 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1336 struct i915_power_well *power_well) 1337 { 1338 vlv_set_power_well(dev_priv, power_well, false); 1339 } 1340 1341 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1342 struct i915_power_well *power_well) 1343 { 1344 int pw_idx = power_well->desc->vlv.idx; 1345 bool enabled = false; 1346 u32 mask; 1347 u32 state; 1348 u32 ctrl; 1349 1350 mask = PUNIT_PWRGT_MASK(pw_idx); 1351 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1352 1353 vlv_punit_get(dev_priv); 1354 1355 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1356 /* 1357 * We only ever set the power-on and power-gate states, anything 1358 * else is unexpected. 1359 */ 1360 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1361 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1362 if (state == ctrl) 1363 enabled = true; 1364 1365 /* 1366 * A transient state at this point would mean some unexpected party 1367 * is poking at the power controls too. 1368 */ 1369 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1370 drm_WARN_ON(&dev_priv->drm, ctrl != state); 1371 1372 vlv_punit_put(dev_priv); 1373 1374 return enabled; 1375 } 1376 1377 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1378 { 1379 u32 val; 1380 1381 /* 1382 * On driver load, a pipe may be active and driving a DSI display. 1383 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1384 * (and never recovering) in this case. intel_dsi_post_disable() will 1385 * clear it when we turn off the display. 1386 */ 1387 val = intel_de_read(dev_priv, DSPCLK_GATE_D); 1388 val &= DPOUNIT_CLOCK_GATE_DISABLE; 1389 val |= VRHUNIT_CLOCK_GATE_DISABLE; 1390 intel_de_write(dev_priv, DSPCLK_GATE_D, val); 1391 1392 /* 1393 * Disable trickle feed and enable pnd deadline calculation 1394 */ 1395 intel_de_write(dev_priv, MI_ARB_VLV, 1396 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1397 intel_de_write(dev_priv, CBR1_VLV, 0); 1398 1399 drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); 1400 intel_de_write(dev_priv, RAWCLK_FREQ_VLV, 1401 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 1402 1000)); 1403 } 1404 1405 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1406 { 1407 struct intel_encoder *encoder; 1408 enum pipe pipe; 1409 1410 /* 1411 * Enable the CRI clock source so we can get at the 1412 * display and the reference clock for VGA 1413 * hotplug / manual detection. Supposedly DSI also 1414 * needs the ref clock up and running. 1415 * 1416 * CHV DPLL B/C have some issues if VGA mode is enabled. 1417 */ 1418 for_each_pipe(dev_priv, pipe) { 1419 u32 val = intel_de_read(dev_priv, DPLL(pipe)); 1420 1421 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1422 if (pipe != PIPE_A) 1423 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1424 1425 intel_de_write(dev_priv, DPLL(pipe), val); 1426 } 1427 1428 vlv_init_display_clock_gating(dev_priv); 1429 1430 spin_lock_irq(&dev_priv->irq_lock); 1431 valleyview_enable_display_irqs(dev_priv); 1432 spin_unlock_irq(&dev_priv->irq_lock); 1433 1434 /* 1435 * During driver initialization/resume we can avoid restoring the 1436 * part of the HW/SW state that will be inited anyway explicitly. 1437 */ 1438 if (dev_priv->power_domains.initializing) 1439 return; 1440 1441 intel_hpd_init(dev_priv); 1442 intel_hpd_poll_disable(dev_priv); 1443 1444 /* Re-enable the ADPA, if we have one */ 1445 for_each_intel_encoder(&dev_priv->drm, encoder) { 1446 if (encoder->type == INTEL_OUTPUT_ANALOG) 1447 intel_crt_reset(&encoder->base); 1448 } 1449 1450 intel_vga_redisable_power_on(dev_priv); 1451 1452 intel_pps_unlock_regs_wa(dev_priv); 1453 } 1454 1455 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1456 { 1457 spin_lock_irq(&dev_priv->irq_lock); 1458 valleyview_disable_display_irqs(dev_priv); 1459 spin_unlock_irq(&dev_priv->irq_lock); 1460 1461 /* make sure we're done processing display irqs */ 1462 intel_synchronize_irq(dev_priv); 1463 1464 intel_pps_reset_all(dev_priv); 1465 1466 /* Prevent us from re-enabling polling on accident in late suspend */ 1467 if (!dev_priv->drm.dev->power.is_suspended) 1468 intel_hpd_poll_enable(dev_priv); 1469 } 1470 1471 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1472 struct i915_power_well *power_well) 1473 { 1474 vlv_set_power_well(dev_priv, power_well, true); 1475 1476 vlv_display_power_well_init(dev_priv); 1477 } 1478 1479 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1480 struct i915_power_well *power_well) 1481 { 1482 vlv_display_power_well_deinit(dev_priv); 1483 1484 vlv_set_power_well(dev_priv, power_well, false); 1485 } 1486 1487 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1488 struct i915_power_well *power_well) 1489 { 1490 /* since ref/cri clock was enabled */ 1491 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1492 1493 vlv_set_power_well(dev_priv, power_well, true); 1494 1495 /* 1496 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1497 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1498 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1499 * b. The other bits such as sfr settings / modesel may all 1500 * be set to 0. 1501 * 1502 * This should only be done on init and resume from S3 with 1503 * both PLLs disabled, or we risk losing DPIO and PLL 1504 * synchronization. 1505 */ 1506 intel_de_write(dev_priv, DPIO_CTL, 1507 intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); 1508 } 1509 1510 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1511 struct i915_power_well *power_well) 1512 { 1513 enum pipe pipe; 1514 1515 for_each_pipe(dev_priv, pipe) 1516 assert_pll_disabled(dev_priv, pipe); 1517 1518 /* Assert common reset */ 1519 intel_de_write(dev_priv, DPIO_CTL, 1520 intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); 1521 1522 vlv_set_power_well(dev_priv, power_well, false); 1523 } 1524 1525 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 1526 1527 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1528 1529 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1530 { 1531 struct i915_power_well *cmn_bc = 1532 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1533 struct i915_power_well *cmn_d = 1534 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1535 u32 phy_control = dev_priv->chv_phy_control; 1536 u32 phy_status = 0; 1537 u32 phy_status_mask = 0xffffffff; 1538 1539 /* 1540 * The BIOS can leave the PHY is some weird state 1541 * where it doesn't fully power down some parts. 1542 * Disable the asserts until the PHY has been fully 1543 * reset (ie. the power well has been disabled at 1544 * least once). 1545 */ 1546 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1547 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1548 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1549 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1550 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1551 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1552 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1553 1554 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1555 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1556 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1557 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1558 1559 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 1560 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1561 1562 /* this assumes override is only used to enable lanes */ 1563 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1564 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1565 1566 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1567 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1568 1569 /* CL1 is on whenever anything is on in either channel */ 1570 if (BITS_SET(phy_control, 1571 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1572 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1573 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1574 1575 /* 1576 * The DPLLB check accounts for the pipe B + port A usage 1577 * with CL2 powered up but all the lanes in the second channel 1578 * powered down. 1579 */ 1580 if (BITS_SET(phy_control, 1581 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1582 (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1583 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1584 1585 if (BITS_SET(phy_control, 1586 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1587 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1588 if (BITS_SET(phy_control, 1589 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1590 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1591 1592 if (BITS_SET(phy_control, 1593 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1594 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1595 if (BITS_SET(phy_control, 1596 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1597 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1598 } 1599 1600 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 1601 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1602 1603 /* this assumes override is only used to enable lanes */ 1604 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1605 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1606 1607 if (BITS_SET(phy_control, 1608 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1609 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1610 1611 if (BITS_SET(phy_control, 1612 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1613 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1614 if (BITS_SET(phy_control, 1615 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1616 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1617 } 1618 1619 phy_status &= phy_status_mask; 1620 1621 /* 1622 * The PHY may be busy with some initial calibration and whatnot, 1623 * so the power state can take a while to actually change. 1624 */ 1625 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, 1626 phy_status_mask, phy_status, 10)) 1627 drm_err(&dev_priv->drm, 1628 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1629 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, 1630 phy_status, dev_priv->chv_phy_control); 1631 } 1632 1633 #undef BITS_SET 1634 1635 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1636 struct i915_power_well *power_well) 1637 { 1638 enum dpio_phy phy; 1639 enum pipe pipe; 1640 u32 tmp; 1641 1642 drm_WARN_ON_ONCE(&dev_priv->drm, 1643 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1644 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1645 1646 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1647 pipe = PIPE_A; 1648 phy = DPIO_PHY0; 1649 } else { 1650 pipe = PIPE_C; 1651 phy = DPIO_PHY1; 1652 } 1653 1654 /* since ref/cri clock was enabled */ 1655 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1656 vlv_set_power_well(dev_priv, power_well, true); 1657 1658 /* Poll for phypwrgood signal */ 1659 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, 1660 PHY_POWERGOOD(phy), 1)) 1661 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", 1662 phy); 1663 1664 vlv_dpio_get(dev_priv); 1665 1666 /* Enable dynamic power down */ 1667 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1668 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1669 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1670 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1671 1672 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1673 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1674 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1675 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1676 } else { 1677 /* 1678 * Force the non-existing CL2 off. BXT does this 1679 * too, so maybe it saves some power even though 1680 * CL2 doesn't exist? 1681 */ 1682 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1683 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1684 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1685 } 1686 1687 vlv_dpio_put(dev_priv); 1688 1689 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1690 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1691 dev_priv->chv_phy_control); 1692 1693 drm_dbg_kms(&dev_priv->drm, 1694 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1695 phy, dev_priv->chv_phy_control); 1696 1697 assert_chv_phy_status(dev_priv); 1698 } 1699 1700 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1701 struct i915_power_well *power_well) 1702 { 1703 enum dpio_phy phy; 1704 1705 drm_WARN_ON_ONCE(&dev_priv->drm, 1706 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1707 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1708 1709 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1710 phy = DPIO_PHY0; 1711 assert_pll_disabled(dev_priv, PIPE_A); 1712 assert_pll_disabled(dev_priv, PIPE_B); 1713 } else { 1714 phy = DPIO_PHY1; 1715 assert_pll_disabled(dev_priv, PIPE_C); 1716 } 1717 1718 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1719 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1720 dev_priv->chv_phy_control); 1721 1722 vlv_set_power_well(dev_priv, power_well, false); 1723 1724 drm_dbg_kms(&dev_priv->drm, 1725 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1726 phy, dev_priv->chv_phy_control); 1727 1728 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1729 dev_priv->chv_phy_assert[phy] = true; 1730 1731 assert_chv_phy_status(dev_priv); 1732 } 1733 1734 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1735 enum dpio_channel ch, bool override, unsigned int mask) 1736 { 1737 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1738 u32 reg, val, expected, actual; 1739 1740 /* 1741 * The BIOS can leave the PHY is some weird state 1742 * where it doesn't fully power down some parts. 1743 * Disable the asserts until the PHY has been fully 1744 * reset (ie. the power well has been disabled at 1745 * least once). 1746 */ 1747 if (!dev_priv->chv_phy_assert[phy]) 1748 return; 1749 1750 if (ch == DPIO_CH0) 1751 reg = _CHV_CMN_DW0_CH0; 1752 else 1753 reg = _CHV_CMN_DW6_CH1; 1754 1755 vlv_dpio_get(dev_priv); 1756 val = vlv_dpio_read(dev_priv, pipe, reg); 1757 vlv_dpio_put(dev_priv); 1758 1759 /* 1760 * This assumes !override is only used when the port is disabled. 1761 * All lanes should power down even without the override when 1762 * the port is disabled. 1763 */ 1764 if (!override || mask == 0xf) { 1765 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1766 /* 1767 * If CH1 common lane is not active anymore 1768 * (eg. for pipe B DPLL) the entire channel will 1769 * shut down, which causes the common lane registers 1770 * to read as 0. That means we can't actually check 1771 * the lane power down status bits, but as the entire 1772 * register reads as 0 it's a good indication that the 1773 * channel is indeed entirely powered down. 1774 */ 1775 if (ch == DPIO_CH1 && val == 0) 1776 expected = 0; 1777 } else if (mask != 0x0) { 1778 expected = DPIO_ANYDL_POWERDOWN; 1779 } else { 1780 expected = 0; 1781 } 1782 1783 if (ch == DPIO_CH0) 1784 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1785 else 1786 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1787 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1788 1789 drm_WARN(&dev_priv->drm, actual != expected, 1790 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1791 !!(actual & DPIO_ALLDL_POWERDOWN), 1792 !!(actual & DPIO_ANYDL_POWERDOWN), 1793 !!(expected & DPIO_ALLDL_POWERDOWN), 1794 !!(expected & DPIO_ANYDL_POWERDOWN), 1795 reg, val); 1796 } 1797 1798 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1799 enum dpio_channel ch, bool override) 1800 { 1801 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1802 bool was_override; 1803 1804 mutex_lock(&power_domains->lock); 1805 1806 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1807 1808 if (override == was_override) 1809 goto out; 1810 1811 if (override) 1812 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1813 else 1814 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1815 1816 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1817 dev_priv->chv_phy_control); 1818 1819 drm_dbg_kms(&dev_priv->drm, 1820 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1821 phy, ch, dev_priv->chv_phy_control); 1822 1823 assert_chv_phy_status(dev_priv); 1824 1825 out: 1826 mutex_unlock(&power_domains->lock); 1827 1828 return was_override; 1829 } 1830 1831 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1832 bool override, unsigned int mask) 1833 { 1834 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1835 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1836 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); 1837 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); 1838 1839 mutex_lock(&power_domains->lock); 1840 1841 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1842 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1843 1844 if (override) 1845 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1846 else 1847 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1848 1849 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1850 dev_priv->chv_phy_control); 1851 1852 drm_dbg_kms(&dev_priv->drm, 1853 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1854 phy, ch, mask, dev_priv->chv_phy_control); 1855 1856 assert_chv_phy_status(dev_priv); 1857 1858 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1859 1860 mutex_unlock(&power_domains->lock); 1861 } 1862 1863 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1864 struct i915_power_well *power_well) 1865 { 1866 enum pipe pipe = PIPE_A; 1867 bool enabled; 1868 u32 state, ctrl; 1869 1870 vlv_punit_get(dev_priv); 1871 1872 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1873 /* 1874 * We only ever set the power-on and power-gate states, anything 1875 * else is unexpected. 1876 */ 1877 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && 1878 state != DP_SSS_PWR_GATE(pipe)); 1879 enabled = state == DP_SSS_PWR_ON(pipe); 1880 1881 /* 1882 * A transient state at this point would mean some unexpected party 1883 * is poking at the power controls too. 1884 */ 1885 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1886 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); 1887 1888 vlv_punit_put(dev_priv); 1889 1890 return enabled; 1891 } 1892 1893 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1894 struct i915_power_well *power_well, 1895 bool enable) 1896 { 1897 enum pipe pipe = PIPE_A; 1898 u32 state; 1899 u32 ctrl; 1900 1901 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1902 1903 vlv_punit_get(dev_priv); 1904 1905 #define COND \ 1906 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 1907 1908 if (COND) 1909 goto out; 1910 1911 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 1912 ctrl &= ~DP_SSC_MASK(pipe); 1913 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1914 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); 1915 1916 if (wait_for(COND, 100)) 1917 drm_err(&dev_priv->drm, 1918 "timeout setting power well state %08x (%08x)\n", 1919 state, 1920 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); 1921 1922 #undef COND 1923 1924 out: 1925 vlv_punit_put(dev_priv); 1926 } 1927 1928 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 1929 struct i915_power_well *power_well) 1930 { 1931 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1932 dev_priv->chv_phy_control); 1933 } 1934 1935 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1936 struct i915_power_well *power_well) 1937 { 1938 chv_set_pipe_power_well(dev_priv, power_well, true); 1939 1940 vlv_display_power_well_init(dev_priv); 1941 } 1942 1943 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1944 struct i915_power_well *power_well) 1945 { 1946 vlv_display_power_well_deinit(dev_priv); 1947 1948 chv_set_pipe_power_well(dev_priv, power_well, false); 1949 } 1950 1951 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) 1952 { 1953 return power_domains->async_put_domains[0] | 1954 power_domains->async_put_domains[1]; 1955 } 1956 1957 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 1958 1959 static bool 1960 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 1961 { 1962 struct drm_i915_private *i915 = container_of(power_domains, 1963 struct drm_i915_private, 1964 power_domains); 1965 return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] & 1966 power_domains->async_put_domains[1]); 1967 } 1968 1969 static bool 1970 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 1971 { 1972 struct drm_i915_private *i915 = container_of(power_domains, 1973 struct drm_i915_private, 1974 power_domains); 1975 enum intel_display_power_domain domain; 1976 bool err = false; 1977 1978 err |= !assert_async_put_domain_masks_disjoint(power_domains); 1979 err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref != 1980 !!__async_put_domains_mask(power_domains)); 1981 1982 for_each_power_domain(domain, __async_put_domains_mask(power_domains)) 1983 err |= drm_WARN_ON(&i915->drm, 1984 power_domains->domain_use_count[domain] != 1); 1985 1986 return !err; 1987 } 1988 1989 static void print_power_domains(struct i915_power_domains *power_domains, 1990 const char *prefix, u64 mask) 1991 { 1992 struct drm_i915_private *i915 = container_of(power_domains, 1993 struct drm_i915_private, 1994 power_domains); 1995 enum intel_display_power_domain domain; 1996 1997 drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask)); 1998 for_each_power_domain(domain, mask) 1999 drm_dbg(&i915->drm, "%s use_count %d\n", 2000 intel_display_power_domain_str(domain), 2001 power_domains->domain_use_count[domain]); 2002 } 2003 2004 static void 2005 print_async_put_domains_state(struct i915_power_domains *power_domains) 2006 { 2007 struct drm_i915_private *i915 = container_of(power_domains, 2008 struct drm_i915_private, 2009 power_domains); 2010 2011 drm_dbg(&i915->drm, "async_put_wakeref %u\n", 2012 power_domains->async_put_wakeref); 2013 2014 print_power_domains(power_domains, "async_put_domains[0]", 2015 power_domains->async_put_domains[0]); 2016 print_power_domains(power_domains, "async_put_domains[1]", 2017 power_domains->async_put_domains[1]); 2018 } 2019 2020 static void 2021 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2022 { 2023 if (!__async_put_domains_state_ok(power_domains)) 2024 print_async_put_domains_state(power_domains); 2025 } 2026 2027 #else 2028 2029 static void 2030 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 2031 { 2032 } 2033 2034 static void 2035 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2036 { 2037 } 2038 2039 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 2040 2041 static u64 async_put_domains_mask(struct i915_power_domains *power_domains) 2042 { 2043 assert_async_put_domain_masks_disjoint(power_domains); 2044 2045 return __async_put_domains_mask(power_domains); 2046 } 2047 2048 static void 2049 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 2050 enum intel_display_power_domain domain) 2051 { 2052 assert_async_put_domain_masks_disjoint(power_domains); 2053 2054 power_domains->async_put_domains[0] &= ~BIT_ULL(domain); 2055 power_domains->async_put_domains[1] &= ~BIT_ULL(domain); 2056 } 2057 2058 static bool 2059 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 2060 enum intel_display_power_domain domain) 2061 { 2062 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2063 bool ret = false; 2064 2065 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) 2066 goto out_verify; 2067 2068 async_put_domains_clear_domain(power_domains, domain); 2069 2070 ret = true; 2071 2072 if (async_put_domains_mask(power_domains)) 2073 goto out_verify; 2074 2075 cancel_delayed_work(&power_domains->async_put_work); 2076 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 2077 fetch_and_zero(&power_domains->async_put_wakeref)); 2078 out_verify: 2079 verify_async_put_domains_state(power_domains); 2080 2081 return ret; 2082 } 2083 2084 static void 2085 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 2086 enum intel_display_power_domain domain) 2087 { 2088 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2089 struct i915_power_well *power_well; 2090 2091 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 2092 return; 2093 2094 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) 2095 intel_power_well_get(dev_priv, power_well); 2096 2097 power_domains->domain_use_count[domain]++; 2098 } 2099 2100 /** 2101 * intel_display_power_get - grab a power domain reference 2102 * @dev_priv: i915 device instance 2103 * @domain: power domain to reference 2104 * 2105 * This function grabs a power domain reference for @domain and ensures that the 2106 * power domain and all its parents are powered up. Therefore users should only 2107 * grab a reference to the innermost power domain they need. 2108 * 2109 * Any power domain reference obtained by this function must have a symmetric 2110 * call to intel_display_power_put() to release the reference again. 2111 */ 2112 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 2113 enum intel_display_power_domain domain) 2114 { 2115 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2116 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2117 2118 mutex_lock(&power_domains->lock); 2119 __intel_display_power_get_domain(dev_priv, domain); 2120 mutex_unlock(&power_domains->lock); 2121 2122 return wakeref; 2123 } 2124 2125 /** 2126 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 2127 * @dev_priv: i915 device instance 2128 * @domain: power domain to reference 2129 * 2130 * This function grabs a power domain reference for @domain and ensures that the 2131 * power domain and all its parents are powered up. Therefore users should only 2132 * grab a reference to the innermost power domain they need. 2133 * 2134 * Any power domain reference obtained by this function must have a symmetric 2135 * call to intel_display_power_put() to release the reference again. 2136 */ 2137 intel_wakeref_t 2138 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 2139 enum intel_display_power_domain domain) 2140 { 2141 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2142 intel_wakeref_t wakeref; 2143 bool is_enabled; 2144 2145 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 2146 if (!wakeref) 2147 return false; 2148 2149 mutex_lock(&power_domains->lock); 2150 2151 if (__intel_display_power_is_enabled(dev_priv, domain)) { 2152 __intel_display_power_get_domain(dev_priv, domain); 2153 is_enabled = true; 2154 } else { 2155 is_enabled = false; 2156 } 2157 2158 mutex_unlock(&power_domains->lock); 2159 2160 if (!is_enabled) { 2161 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2162 wakeref = 0; 2163 } 2164 2165 return wakeref; 2166 } 2167 2168 static void 2169 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 2170 enum intel_display_power_domain domain) 2171 { 2172 struct i915_power_domains *power_domains; 2173 struct i915_power_well *power_well; 2174 const char *name = intel_display_power_domain_str(domain); 2175 2176 power_domains = &dev_priv->power_domains; 2177 2178 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], 2179 "Use count on domain %s is already zero\n", 2180 name); 2181 drm_WARN(&dev_priv->drm, 2182 async_put_domains_mask(power_domains) & BIT_ULL(domain), 2183 "Async disabling of domain %s is pending\n", 2184 name); 2185 2186 power_domains->domain_use_count[domain]--; 2187 2188 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) 2189 intel_power_well_put(dev_priv, power_well); 2190 } 2191 2192 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 2193 enum intel_display_power_domain domain) 2194 { 2195 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2196 2197 mutex_lock(&power_domains->lock); 2198 __intel_display_power_put_domain(dev_priv, domain); 2199 mutex_unlock(&power_domains->lock); 2200 } 2201 2202 static void 2203 queue_async_put_domains_work(struct i915_power_domains *power_domains, 2204 intel_wakeref_t wakeref) 2205 { 2206 struct drm_i915_private *i915 = container_of(power_domains, 2207 struct drm_i915_private, 2208 power_domains); 2209 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2210 power_domains->async_put_wakeref = wakeref; 2211 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, 2212 &power_domains->async_put_work, 2213 msecs_to_jiffies(100))); 2214 } 2215 2216 static void 2217 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) 2218 { 2219 struct drm_i915_private *dev_priv = 2220 container_of(power_domains, struct drm_i915_private, 2221 power_domains); 2222 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2223 enum intel_display_power_domain domain; 2224 intel_wakeref_t wakeref; 2225 2226 /* 2227 * The caller must hold already raw wakeref, upgrade that to a proper 2228 * wakeref to make the state checker happy about the HW access during 2229 * power well disabling. 2230 */ 2231 assert_rpm_raw_wakeref_held(rpm); 2232 wakeref = intel_runtime_pm_get(rpm); 2233 2234 for_each_power_domain(domain, mask) { 2235 /* Clear before put, so put's sanity check is happy. */ 2236 async_put_domains_clear_domain(power_domains, domain); 2237 __intel_display_power_put_domain(dev_priv, domain); 2238 } 2239 2240 intel_runtime_pm_put(rpm, wakeref); 2241 } 2242 2243 static void 2244 intel_display_power_put_async_work(struct work_struct *work) 2245 { 2246 struct drm_i915_private *dev_priv = 2247 container_of(work, struct drm_i915_private, 2248 power_domains.async_put_work.work); 2249 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2250 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2251 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 2252 intel_wakeref_t old_work_wakeref = 0; 2253 2254 mutex_lock(&power_domains->lock); 2255 2256 /* 2257 * Bail out if all the domain refs pending to be released were grabbed 2258 * by subsequent gets or a flush_work. 2259 */ 2260 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2261 if (!old_work_wakeref) 2262 goto out_verify; 2263 2264 release_async_put_domains(power_domains, 2265 power_domains->async_put_domains[0]); 2266 2267 /* Requeue the work if more domains were async put meanwhile. */ 2268 if (power_domains->async_put_domains[1]) { 2269 power_domains->async_put_domains[0] = 2270 fetch_and_zero(&power_domains->async_put_domains[1]); 2271 queue_async_put_domains_work(power_domains, 2272 fetch_and_zero(&new_work_wakeref)); 2273 } else { 2274 /* 2275 * Cancel the work that got queued after this one got dequeued, 2276 * since here we released the corresponding async-put reference. 2277 */ 2278 cancel_delayed_work(&power_domains->async_put_work); 2279 } 2280 2281 out_verify: 2282 verify_async_put_domains_state(power_domains); 2283 2284 mutex_unlock(&power_domains->lock); 2285 2286 if (old_work_wakeref) 2287 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 2288 if (new_work_wakeref) 2289 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 2290 } 2291 2292 /** 2293 * intel_display_power_put_async - release a power domain reference asynchronously 2294 * @i915: i915 device instance 2295 * @domain: power domain to reference 2296 * @wakeref: wakeref acquired for the reference that is being released 2297 * 2298 * This function drops the power domain reference obtained by 2299 * intel_display_power_get*() and schedules a work to power down the 2300 * corresponding hardware block if this is the last reference. 2301 */ 2302 void __intel_display_power_put_async(struct drm_i915_private *i915, 2303 enum intel_display_power_domain domain, 2304 intel_wakeref_t wakeref) 2305 { 2306 struct i915_power_domains *power_domains = &i915->power_domains; 2307 struct intel_runtime_pm *rpm = &i915->runtime_pm; 2308 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 2309 2310 mutex_lock(&power_domains->lock); 2311 2312 if (power_domains->domain_use_count[domain] > 1) { 2313 __intel_display_power_put_domain(i915, domain); 2314 2315 goto out_verify; 2316 } 2317 2318 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); 2319 2320 /* Let a pending work requeue itself or queue a new one. */ 2321 if (power_domains->async_put_wakeref) { 2322 power_domains->async_put_domains[1] |= BIT_ULL(domain); 2323 } else { 2324 power_domains->async_put_domains[0] |= BIT_ULL(domain); 2325 queue_async_put_domains_work(power_domains, 2326 fetch_and_zero(&work_wakeref)); 2327 } 2328 2329 out_verify: 2330 verify_async_put_domains_state(power_domains); 2331 2332 mutex_unlock(&power_domains->lock); 2333 2334 if (work_wakeref) 2335 intel_runtime_pm_put_raw(rpm, work_wakeref); 2336 2337 intel_runtime_pm_put(rpm, wakeref); 2338 } 2339 2340 /** 2341 * intel_display_power_flush_work - flushes the async display power disabling work 2342 * @i915: i915 device instance 2343 * 2344 * Flushes any pending work that was scheduled by a preceding 2345 * intel_display_power_put_async() call, completing the disabling of the 2346 * corresponding power domains. 2347 * 2348 * Note that the work handler function may still be running after this 2349 * function returns; to ensure that the work handler isn't running use 2350 * intel_display_power_flush_work_sync() instead. 2351 */ 2352 void intel_display_power_flush_work(struct drm_i915_private *i915) 2353 { 2354 struct i915_power_domains *power_domains = &i915->power_domains; 2355 intel_wakeref_t work_wakeref; 2356 2357 mutex_lock(&power_domains->lock); 2358 2359 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2360 if (!work_wakeref) 2361 goto out_verify; 2362 2363 release_async_put_domains(power_domains, 2364 async_put_domains_mask(power_domains)); 2365 cancel_delayed_work(&power_domains->async_put_work); 2366 2367 out_verify: 2368 verify_async_put_domains_state(power_domains); 2369 2370 mutex_unlock(&power_domains->lock); 2371 2372 if (work_wakeref) 2373 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 2374 } 2375 2376 /** 2377 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 2378 * @i915: i915 device instance 2379 * 2380 * Like intel_display_power_flush_work(), but also ensure that the work 2381 * handler function is not running any more when this function returns. 2382 */ 2383 static void 2384 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 2385 { 2386 struct i915_power_domains *power_domains = &i915->power_domains; 2387 2388 intel_display_power_flush_work(i915); 2389 cancel_delayed_work_sync(&power_domains->async_put_work); 2390 2391 verify_async_put_domains_state(power_domains); 2392 2393 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2394 } 2395 2396 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2397 /** 2398 * intel_display_power_put - release a power domain reference 2399 * @dev_priv: i915 device instance 2400 * @domain: power domain to reference 2401 * @wakeref: wakeref acquired for the reference that is being released 2402 * 2403 * This function drops the power domain reference obtained by 2404 * intel_display_power_get() and might power down the corresponding hardware 2405 * block right away if this is the last reference. 2406 */ 2407 void intel_display_power_put(struct drm_i915_private *dev_priv, 2408 enum intel_display_power_domain domain, 2409 intel_wakeref_t wakeref) 2410 { 2411 __intel_display_power_put(dev_priv, domain); 2412 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2413 } 2414 #else 2415 /** 2416 * intel_display_power_put_unchecked - release an unchecked power domain reference 2417 * @dev_priv: i915 device instance 2418 * @domain: power domain to reference 2419 * 2420 * This function drops the power domain reference obtained by 2421 * intel_display_power_get() and might power down the corresponding hardware 2422 * block right away if this is the last reference. 2423 * 2424 * This function is only for the power domain code's internal use to suppress wakeref 2425 * tracking when the correspondig debug kconfig option is disabled, should not 2426 * be used otherwise. 2427 */ 2428 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 2429 enum intel_display_power_domain domain) 2430 { 2431 __intel_display_power_put(dev_priv, domain); 2432 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 2433 } 2434 #endif 2435 2436 void 2437 intel_display_power_get_in_set(struct drm_i915_private *i915, 2438 struct intel_display_power_domain_set *power_domain_set, 2439 enum intel_display_power_domain domain) 2440 { 2441 intel_wakeref_t __maybe_unused wf; 2442 2443 drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); 2444 2445 wf = intel_display_power_get(i915, domain); 2446 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2447 power_domain_set->wakerefs[domain] = wf; 2448 #endif 2449 power_domain_set->mask |= BIT_ULL(domain); 2450 } 2451 2452 bool 2453 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, 2454 struct intel_display_power_domain_set *power_domain_set, 2455 enum intel_display_power_domain domain) 2456 { 2457 intel_wakeref_t wf; 2458 2459 drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); 2460 2461 wf = intel_display_power_get_if_enabled(i915, domain); 2462 if (!wf) 2463 return false; 2464 2465 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2466 power_domain_set->wakerefs[domain] = wf; 2467 #endif 2468 power_domain_set->mask |= BIT_ULL(domain); 2469 2470 return true; 2471 } 2472 2473 void 2474 intel_display_power_put_mask_in_set(struct drm_i915_private *i915, 2475 struct intel_display_power_domain_set *power_domain_set, 2476 u64 mask) 2477 { 2478 enum intel_display_power_domain domain; 2479 2480 drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask); 2481 2482 for_each_power_domain(domain, mask) { 2483 intel_wakeref_t __maybe_unused wf = -1; 2484 2485 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2486 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); 2487 #endif 2488 intel_display_power_put(i915, domain, wf); 2489 power_domain_set->mask &= ~BIT_ULL(domain); 2490 } 2491 } 2492 2493 #define I830_PIPES_POWER_DOMAINS ( \ 2494 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2495 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2496 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2497 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2498 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2499 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2500 BIT_ULL(POWER_DOMAIN_INIT)) 2501 2502 #define VLV_DISPLAY_POWER_DOMAINS ( \ 2503 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2504 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2505 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2506 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2507 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2508 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2509 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2510 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2511 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2512 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2513 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2514 BIT_ULL(POWER_DOMAIN_VGA) | \ 2515 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2516 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2517 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2518 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2519 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2520 BIT_ULL(POWER_DOMAIN_INIT)) 2521 2522 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2523 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2524 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2525 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2526 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2527 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2528 BIT_ULL(POWER_DOMAIN_INIT)) 2529 2530 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 2531 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2532 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2533 BIT_ULL(POWER_DOMAIN_INIT)) 2534 2535 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 2536 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2537 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2538 BIT_ULL(POWER_DOMAIN_INIT)) 2539 2540 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 2541 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2542 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2543 BIT_ULL(POWER_DOMAIN_INIT)) 2544 2545 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 2546 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2547 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2548 BIT_ULL(POWER_DOMAIN_INIT)) 2549 2550 #define CHV_DISPLAY_POWER_DOMAINS ( \ 2551 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2552 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2553 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2554 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2555 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2556 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2557 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2558 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2559 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2560 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2561 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2562 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2563 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2564 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2565 BIT_ULL(POWER_DOMAIN_VGA) | \ 2566 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2567 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2568 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2569 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2570 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2571 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2572 BIT_ULL(POWER_DOMAIN_INIT)) 2573 2574 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2575 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2576 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2577 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2578 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2579 BIT_ULL(POWER_DOMAIN_INIT)) 2580 2581 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 2582 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2583 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2584 BIT_ULL(POWER_DOMAIN_INIT)) 2585 2586 #define HSW_DISPLAY_POWER_DOMAINS ( \ 2587 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2588 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2589 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2590 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2591 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2592 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2593 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2594 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2595 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2596 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2597 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2598 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2599 BIT_ULL(POWER_DOMAIN_VGA) | \ 2600 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2601 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2602 BIT_ULL(POWER_DOMAIN_INIT)) 2603 2604 #define BDW_DISPLAY_POWER_DOMAINS ( \ 2605 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2606 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2607 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2608 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2609 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2610 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2611 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2612 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2613 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2614 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2615 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2616 BIT_ULL(POWER_DOMAIN_VGA) | \ 2617 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2618 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2619 BIT_ULL(POWER_DOMAIN_INIT)) 2620 2621 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2622 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2623 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2624 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2625 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2626 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2627 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2628 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2629 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2630 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2631 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2632 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2633 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2634 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2635 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2636 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2637 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2638 BIT_ULL(POWER_DOMAIN_VGA) | \ 2639 BIT_ULL(POWER_DOMAIN_INIT)) 2640 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ 2641 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2642 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ 2643 BIT_ULL(POWER_DOMAIN_INIT)) 2644 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2645 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2646 BIT_ULL(POWER_DOMAIN_INIT)) 2647 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2648 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2649 BIT_ULL(POWER_DOMAIN_INIT)) 2650 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ 2651 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2652 BIT_ULL(POWER_DOMAIN_INIT)) 2653 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2654 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2655 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2656 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2657 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2658 BIT_ULL(POWER_DOMAIN_INIT)) 2659 2660 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2661 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2662 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2663 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2664 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2665 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2666 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2667 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2668 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2669 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2670 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2671 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2672 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2673 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2674 BIT_ULL(POWER_DOMAIN_VGA) | \ 2675 BIT_ULL(POWER_DOMAIN_INIT)) 2676 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2677 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2678 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2679 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2680 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2681 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2682 BIT_ULL(POWER_DOMAIN_INIT)) 2683 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 2684 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2685 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2686 BIT_ULL(POWER_DOMAIN_INIT)) 2687 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 2688 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2689 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2690 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2691 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2692 BIT_ULL(POWER_DOMAIN_INIT)) 2693 2694 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2695 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2696 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2697 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2698 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2699 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2700 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2701 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2702 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2703 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2704 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2705 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2706 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2707 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2708 BIT_ULL(POWER_DOMAIN_VGA) | \ 2709 BIT_ULL(POWER_DOMAIN_INIT)) 2710 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ 2711 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2712 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2713 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2714 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2715 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2716 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ 2717 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2718 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2719 BIT_ULL(POWER_DOMAIN_INIT)) 2720 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ 2721 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2722 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2723 BIT_ULL(POWER_DOMAIN_INIT)) 2724 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ 2725 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2726 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2727 BIT_ULL(POWER_DOMAIN_INIT)) 2728 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2729 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2730 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2731 BIT_ULL(POWER_DOMAIN_INIT)) 2732 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2733 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2734 BIT_ULL(POWER_DOMAIN_INIT)) 2735 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2736 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2737 BIT_ULL(POWER_DOMAIN_INIT)) 2738 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2739 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2740 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2741 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2742 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2743 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2744 BIT_ULL(POWER_DOMAIN_INIT)) 2745 2746 /* 2747 * ICL PW_0/PG_0 domains (HW/DMC control): 2748 * - PCI 2749 * - clocks except port PLL 2750 * - central power except FBC 2751 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers 2752 * ICL PW_1/PG_1 domains (HW/DMC control): 2753 * - DBUF function 2754 * - PIPE_A and its planes, except VGA 2755 * - transcoder EDP + PSR 2756 * - transcoder DSI 2757 * - DDI_A 2758 * - FBC 2759 */ 2760 #define ICL_PW_4_POWER_DOMAINS ( \ 2761 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2762 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2763 BIT_ULL(POWER_DOMAIN_INIT)) 2764 /* VDSC/joining */ 2765 #define ICL_PW_3_POWER_DOMAINS ( \ 2766 ICL_PW_4_POWER_DOMAINS | \ 2767 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2768 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2769 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2770 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2771 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2772 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2773 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2774 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2775 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2776 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2777 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2778 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2779 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2780 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2781 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2782 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ 2783 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2784 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2785 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2786 BIT_ULL(POWER_DOMAIN_VGA) | \ 2787 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2788 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2789 BIT_ULL(POWER_DOMAIN_INIT)) 2790 /* 2791 * - transcoder WD 2792 * - KVMR (HW control) 2793 */ 2794 #define ICL_PW_2_POWER_DOMAINS ( \ 2795 ICL_PW_3_POWER_DOMAINS | \ 2796 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2797 BIT_ULL(POWER_DOMAIN_INIT)) 2798 /* 2799 * - KVMR (HW control) 2800 */ 2801 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2802 ICL_PW_2_POWER_DOMAINS | \ 2803 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2804 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2805 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \ 2806 BIT_ULL(POWER_DOMAIN_INIT)) 2807 2808 #define ICL_DDI_IO_A_POWER_DOMAINS ( \ 2809 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2810 #define ICL_DDI_IO_B_POWER_DOMAINS ( \ 2811 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2812 #define ICL_DDI_IO_C_POWER_DOMAINS ( \ 2813 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2814 #define ICL_DDI_IO_D_POWER_DOMAINS ( \ 2815 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2816 #define ICL_DDI_IO_E_POWER_DOMAINS ( \ 2817 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2818 #define ICL_DDI_IO_F_POWER_DOMAINS ( \ 2819 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2820 2821 #define ICL_AUX_A_IO_POWER_DOMAINS ( \ 2822 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2823 BIT_ULL(POWER_DOMAIN_AUX_A)) 2824 #define ICL_AUX_B_IO_POWER_DOMAINS ( \ 2825 BIT_ULL(POWER_DOMAIN_AUX_B)) 2826 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ 2827 BIT_ULL(POWER_DOMAIN_AUX_C)) 2828 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ 2829 BIT_ULL(POWER_DOMAIN_AUX_D)) 2830 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ 2831 BIT_ULL(POWER_DOMAIN_AUX_E)) 2832 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ 2833 BIT_ULL(POWER_DOMAIN_AUX_F)) 2834 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ 2835 BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) 2836 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ 2837 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2838 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ 2839 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2840 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ 2841 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2842 2843 #define TGL_PW_5_POWER_DOMAINS ( \ 2844 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 2845 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 2846 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 2847 BIT_ULL(POWER_DOMAIN_INIT)) 2848 2849 #define TGL_PW_4_POWER_DOMAINS ( \ 2850 TGL_PW_5_POWER_DOMAINS | \ 2851 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2852 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2853 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2854 BIT_ULL(POWER_DOMAIN_INIT)) 2855 2856 #define TGL_PW_3_POWER_DOMAINS ( \ 2857 TGL_PW_4_POWER_DOMAINS | \ 2858 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2859 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2860 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2861 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 2862 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 2863 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ 2864 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ 2865 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \ 2866 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \ 2867 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 2868 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 2869 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 2870 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 2871 BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ 2872 BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ 2873 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 2874 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 2875 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 2876 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 2877 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ 2878 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ 2879 BIT_ULL(POWER_DOMAIN_VGA) | \ 2880 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2881 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2882 BIT_ULL(POWER_DOMAIN_INIT)) 2883 2884 #define TGL_PW_2_POWER_DOMAINS ( \ 2885 TGL_PW_3_POWER_DOMAINS | \ 2886 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2887 BIT_ULL(POWER_DOMAIN_INIT)) 2888 2889 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2890 TGL_PW_3_POWER_DOMAINS | \ 2891 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2892 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2893 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2894 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2895 BIT_ULL(POWER_DOMAIN_INIT)) 2896 2897 #define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) 2898 #define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) 2899 #define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) 2900 #define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) 2901 #define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5) 2902 #define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6) 2903 2904 #define TGL_AUX_A_IO_POWER_DOMAINS ( \ 2905 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2906 BIT_ULL(POWER_DOMAIN_AUX_A)) 2907 #define TGL_AUX_B_IO_POWER_DOMAINS ( \ 2908 BIT_ULL(POWER_DOMAIN_AUX_B)) 2909 #define TGL_AUX_C_IO_POWER_DOMAINS ( \ 2910 BIT_ULL(POWER_DOMAIN_AUX_C)) 2911 2912 #define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) 2913 #define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) 2914 #define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) 2915 #define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) 2916 #define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5) 2917 #define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6) 2918 2919 #define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) 2920 #define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) 2921 #define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) 2922 #define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) 2923 #define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5) 2924 #define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6) 2925 2926 #define TGL_TC_COLD_OFF_POWER_DOMAINS ( \ 2927 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 2928 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 2929 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 2930 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 2931 BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ 2932 BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ 2933 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 2934 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 2935 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 2936 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 2937 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ 2938 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ 2939 BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) 2940 2941 #define RKL_PW_4_POWER_DOMAINS ( \ 2942 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2943 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2944 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2945 BIT_ULL(POWER_DOMAIN_INIT)) 2946 2947 #define RKL_PW_3_POWER_DOMAINS ( \ 2948 RKL_PW_4_POWER_DOMAINS | \ 2949 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2950 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2951 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2952 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2953 BIT_ULL(POWER_DOMAIN_VGA) | \ 2954 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2955 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 2956 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 2957 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 2958 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 2959 BIT_ULL(POWER_DOMAIN_INIT)) 2960 2961 /* 2962 * There is no PW_2/PG_2 on RKL. 2963 * 2964 * RKL PW_1/PG_1 domains (under HW/DMC control): 2965 * - DBUF function (note: registers are in PW0) 2966 * - PIPE_A and its planes and VDSC/joining, except VGA 2967 * - transcoder A 2968 * - DDI_A and DDI_B 2969 * - FBC 2970 * 2971 * RKL PW_0/PG_0 domains (under HW/DMC control): 2972 * - PCI 2973 * - clocks except port PLL 2974 * - shared functions: 2975 * * interrupts except pipe interrupts 2976 * * MBus except PIPE_MBUS_DBOX_CTL 2977 * * DBUF registers 2978 * - central power except FBC 2979 * - top-level GTC (DDI-level GTC is in the well associated with the DDI) 2980 */ 2981 2982 #define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2983 RKL_PW_3_POWER_DOMAINS | \ 2984 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2985 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2986 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2987 BIT_ULL(POWER_DOMAIN_INIT)) 2988 2989 /* 2990 * DG1 onwards Audio MMIO/VERBS lies in PG0 power well. 2991 */ 2992 #define DG1_PW_3_POWER_DOMAINS ( \ 2993 TGL_PW_4_POWER_DOMAINS | \ 2994 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2995 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2996 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2997 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 2998 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 2999 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3000 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3001 BIT_ULL(POWER_DOMAIN_VGA) | \ 3002 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 3003 BIT_ULL(POWER_DOMAIN_INIT)) 3004 3005 #define DG1_PW_2_POWER_DOMAINS ( \ 3006 DG1_PW_3_POWER_DOMAINS | \ 3007 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 3008 BIT_ULL(POWER_DOMAIN_INIT)) 3009 3010 #define DG1_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 3011 DG1_PW_3_POWER_DOMAINS | \ 3012 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 3013 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3014 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3015 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3016 BIT_ULL(POWER_DOMAIN_INIT)) 3017 3018 /* 3019 * XE_LPD Power Domains 3020 * 3021 * Previous platforms required that PG(n-1) be enabled before PG(n). That 3022 * dependency chain turns into a dependency tree on XE_LPD: 3023 * 3024 * PG0 3025 * | 3026 * --PG1-- 3027 * / \ 3028 * PGA --PG2-- 3029 * / | \ 3030 * PGB PGC PGD 3031 * 3032 * Power wells must be enabled from top to bottom and disabled from bottom 3033 * to top. This allows pipes to be power gated independently. 3034 */ 3035 3036 #define XELPD_PW_D_POWER_DOMAINS ( \ 3037 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 3038 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 3039 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 3040 BIT_ULL(POWER_DOMAIN_INIT)) 3041 3042 #define XELPD_PW_C_POWER_DOMAINS ( \ 3043 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 3044 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 3045 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 3046 BIT_ULL(POWER_DOMAIN_INIT)) 3047 3048 #define XELPD_PW_B_POWER_DOMAINS ( \ 3049 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 3050 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 3051 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 3052 BIT_ULL(POWER_DOMAIN_INIT)) 3053 3054 #define XELPD_PW_A_POWER_DOMAINS ( \ 3055 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 3056 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 3057 BIT_ULL(POWER_DOMAIN_INIT)) 3058 3059 #define XELPD_PW_2_POWER_DOMAINS ( \ 3060 XELPD_PW_B_POWER_DOMAINS | \ 3061 XELPD_PW_C_POWER_DOMAINS | \ 3062 XELPD_PW_D_POWER_DOMAINS | \ 3063 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 3064 BIT_ULL(POWER_DOMAIN_VGA) | \ 3065 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 3066 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) | \ 3067 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) | \ 3068 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 3069 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 3070 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ 3071 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ 3072 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 3073 BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) | \ 3074 BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) | \ 3075 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3076 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3077 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 3078 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 3079 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 3080 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 3081 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 3082 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 3083 BIT_ULL(POWER_DOMAIN_INIT)) 3084 3085 /* 3086 * XELPD PW_1/PG_1 domains (under HW/DMC control): 3087 * - DBUF function (registers are in PW0) 3088 * - Transcoder A 3089 * - DDI_A and DDI_B 3090 * 3091 * XELPD PW_0/PW_1 domains (under HW/DMC control): 3092 * - PCI 3093 * - Clocks except port PLL 3094 * - Shared functions: 3095 * * interrupts except pipe interrupts 3096 * * MBus except PIPE_MBUS_DBOX_CTL 3097 * * DBUF registers 3098 * - Central power except FBC 3099 * - Top-level GTC (DDI-level GTC is in the well associated with the DDI) 3100 */ 3101 3102 #define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 3103 XELPD_PW_2_POWER_DOMAINS | \ 3104 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 3105 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3106 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3107 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3108 BIT_ULL(POWER_DOMAIN_INIT)) 3109 3110 #define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) 3111 #define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) 3112 #define XELPD_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) 3113 #define XELPD_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) 3114 #define XELPD_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) 3115 #define XELPD_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) 3116 3117 #define XELPD_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) 3118 #define XELPD_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) 3119 #define XELPD_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) 3120 #define XELPD_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) 3121 3122 #define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD) 3123 #define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD) 3124 #define XELPD_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) 3125 #define XELPD_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) 3126 #define XELPD_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) 3127 #define XELPD_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) 3128 3129 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 3130 .sync_hw = i9xx_power_well_sync_hw_noop, 3131 .enable = i9xx_always_on_power_well_noop, 3132 .disable = i9xx_always_on_power_well_noop, 3133 .is_enabled = i9xx_always_on_power_well_enabled, 3134 }; 3135 3136 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 3137 .sync_hw = chv_pipe_power_well_sync_hw, 3138 .enable = chv_pipe_power_well_enable, 3139 .disable = chv_pipe_power_well_disable, 3140 .is_enabled = chv_pipe_power_well_enabled, 3141 }; 3142 3143 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 3144 .sync_hw = i9xx_power_well_sync_hw_noop, 3145 .enable = chv_dpio_cmn_power_well_enable, 3146 .disable = chv_dpio_cmn_power_well_disable, 3147 .is_enabled = vlv_power_well_enabled, 3148 }; 3149 3150 static const struct i915_power_well_desc i9xx_always_on_power_well[] = { 3151 { 3152 .name = "always-on", 3153 .always_on = true, 3154 .domains = POWER_DOMAIN_MASK, 3155 .ops = &i9xx_always_on_power_well_ops, 3156 .id = DISP_PW_ID_NONE, 3157 }, 3158 }; 3159 3160 static const struct i915_power_well_ops i830_pipes_power_well_ops = { 3161 .sync_hw = i830_pipes_power_well_sync_hw, 3162 .enable = i830_pipes_power_well_enable, 3163 .disable = i830_pipes_power_well_disable, 3164 .is_enabled = i830_pipes_power_well_enabled, 3165 }; 3166 3167 static const struct i915_power_well_desc i830_power_wells[] = { 3168 { 3169 .name = "always-on", 3170 .always_on = true, 3171 .domains = POWER_DOMAIN_MASK, 3172 .ops = &i9xx_always_on_power_well_ops, 3173 .id = DISP_PW_ID_NONE, 3174 }, 3175 { 3176 .name = "pipes", 3177 .domains = I830_PIPES_POWER_DOMAINS, 3178 .ops = &i830_pipes_power_well_ops, 3179 .id = DISP_PW_ID_NONE, 3180 }, 3181 }; 3182 3183 static const struct i915_power_well_ops hsw_power_well_ops = { 3184 .sync_hw = hsw_power_well_sync_hw, 3185 .enable = hsw_power_well_enable, 3186 .disable = hsw_power_well_disable, 3187 .is_enabled = hsw_power_well_enabled, 3188 }; 3189 3190 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 3191 .sync_hw = i9xx_power_well_sync_hw_noop, 3192 .enable = gen9_dc_off_power_well_enable, 3193 .disable = gen9_dc_off_power_well_disable, 3194 .is_enabled = gen9_dc_off_power_well_enabled, 3195 }; 3196 3197 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 3198 .sync_hw = i9xx_power_well_sync_hw_noop, 3199 .enable = bxt_dpio_cmn_power_well_enable, 3200 .disable = bxt_dpio_cmn_power_well_disable, 3201 .is_enabled = bxt_dpio_cmn_power_well_enabled, 3202 }; 3203 3204 static const struct i915_power_well_regs hsw_power_well_regs = { 3205 .bios = HSW_PWR_WELL_CTL1, 3206 .driver = HSW_PWR_WELL_CTL2, 3207 .kvmr = HSW_PWR_WELL_CTL3, 3208 .debug = HSW_PWR_WELL_CTL4, 3209 }; 3210 3211 static const struct i915_power_well_desc hsw_power_wells[] = { 3212 { 3213 .name = "always-on", 3214 .always_on = true, 3215 .domains = POWER_DOMAIN_MASK, 3216 .ops = &i9xx_always_on_power_well_ops, 3217 .id = DISP_PW_ID_NONE, 3218 }, 3219 { 3220 .name = "display", 3221 .domains = HSW_DISPLAY_POWER_DOMAINS, 3222 .ops = &hsw_power_well_ops, 3223 .id = HSW_DISP_PW_GLOBAL, 3224 { 3225 .hsw.regs = &hsw_power_well_regs, 3226 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3227 .hsw.has_vga = true, 3228 }, 3229 }, 3230 }; 3231 3232 static const struct i915_power_well_desc bdw_power_wells[] = { 3233 { 3234 .name = "always-on", 3235 .always_on = true, 3236 .domains = POWER_DOMAIN_MASK, 3237 .ops = &i9xx_always_on_power_well_ops, 3238 .id = DISP_PW_ID_NONE, 3239 }, 3240 { 3241 .name = "display", 3242 .domains = BDW_DISPLAY_POWER_DOMAINS, 3243 .ops = &hsw_power_well_ops, 3244 .id = HSW_DISP_PW_GLOBAL, 3245 { 3246 .hsw.regs = &hsw_power_well_regs, 3247 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3248 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3249 .hsw.has_vga = true, 3250 }, 3251 }, 3252 }; 3253 3254 static const struct i915_power_well_ops vlv_display_power_well_ops = { 3255 .sync_hw = i9xx_power_well_sync_hw_noop, 3256 .enable = vlv_display_power_well_enable, 3257 .disable = vlv_display_power_well_disable, 3258 .is_enabled = vlv_power_well_enabled, 3259 }; 3260 3261 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 3262 .sync_hw = i9xx_power_well_sync_hw_noop, 3263 .enable = vlv_dpio_cmn_power_well_enable, 3264 .disable = vlv_dpio_cmn_power_well_disable, 3265 .is_enabled = vlv_power_well_enabled, 3266 }; 3267 3268 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 3269 .sync_hw = i9xx_power_well_sync_hw_noop, 3270 .enable = vlv_power_well_enable, 3271 .disable = vlv_power_well_disable, 3272 .is_enabled = vlv_power_well_enabled, 3273 }; 3274 3275 static const struct i915_power_well_desc vlv_power_wells[] = { 3276 { 3277 .name = "always-on", 3278 .always_on = true, 3279 .domains = POWER_DOMAIN_MASK, 3280 .ops = &i9xx_always_on_power_well_ops, 3281 .id = DISP_PW_ID_NONE, 3282 }, 3283 { 3284 .name = "display", 3285 .domains = VLV_DISPLAY_POWER_DOMAINS, 3286 .ops = &vlv_display_power_well_ops, 3287 .id = VLV_DISP_PW_DISP2D, 3288 { 3289 .vlv.idx = PUNIT_PWGT_IDX_DISP2D, 3290 }, 3291 }, 3292 { 3293 .name = "dpio-tx-b-01", 3294 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3295 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3296 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3297 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3298 .ops = &vlv_dpio_power_well_ops, 3299 .id = DISP_PW_ID_NONE, 3300 { 3301 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, 3302 }, 3303 }, 3304 { 3305 .name = "dpio-tx-b-23", 3306 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3307 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3308 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3309 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3310 .ops = &vlv_dpio_power_well_ops, 3311 .id = DISP_PW_ID_NONE, 3312 { 3313 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, 3314 }, 3315 }, 3316 { 3317 .name = "dpio-tx-c-01", 3318 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3319 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3320 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3321 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3322 .ops = &vlv_dpio_power_well_ops, 3323 .id = DISP_PW_ID_NONE, 3324 { 3325 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, 3326 }, 3327 }, 3328 { 3329 .name = "dpio-tx-c-23", 3330 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3331 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3332 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3333 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3334 .ops = &vlv_dpio_power_well_ops, 3335 .id = DISP_PW_ID_NONE, 3336 { 3337 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, 3338 }, 3339 }, 3340 { 3341 .name = "dpio-common", 3342 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 3343 .ops = &vlv_dpio_cmn_power_well_ops, 3344 .id = VLV_DISP_PW_DPIO_CMN_BC, 3345 { 3346 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3347 }, 3348 }, 3349 }; 3350 3351 static const struct i915_power_well_desc chv_power_wells[] = { 3352 { 3353 .name = "always-on", 3354 .always_on = true, 3355 .domains = POWER_DOMAIN_MASK, 3356 .ops = &i9xx_always_on_power_well_ops, 3357 .id = DISP_PW_ID_NONE, 3358 }, 3359 { 3360 .name = "display", 3361 /* 3362 * Pipe A power well is the new disp2d well. Pipe B and C 3363 * power wells don't actually exist. Pipe A power well is 3364 * required for any pipe to work. 3365 */ 3366 .domains = CHV_DISPLAY_POWER_DOMAINS, 3367 .ops = &chv_pipe_power_well_ops, 3368 .id = DISP_PW_ID_NONE, 3369 }, 3370 { 3371 .name = "dpio-common-bc", 3372 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 3373 .ops = &chv_dpio_cmn_power_well_ops, 3374 .id = VLV_DISP_PW_DPIO_CMN_BC, 3375 { 3376 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3377 }, 3378 }, 3379 { 3380 .name = "dpio-common-d", 3381 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 3382 .ops = &chv_dpio_cmn_power_well_ops, 3383 .id = CHV_DISP_PW_DPIO_CMN_D, 3384 { 3385 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, 3386 }, 3387 }, 3388 }; 3389 3390 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 3391 enum i915_power_well_id power_well_id) 3392 { 3393 struct i915_power_well *power_well; 3394 bool ret; 3395 3396 power_well = lookup_power_well(dev_priv, power_well_id); 3397 ret = power_well->desc->ops->is_enabled(dev_priv, power_well); 3398 3399 return ret; 3400 } 3401 3402 static const struct i915_power_well_desc skl_power_wells[] = { 3403 { 3404 .name = "always-on", 3405 .always_on = true, 3406 .domains = POWER_DOMAIN_MASK, 3407 .ops = &i9xx_always_on_power_well_ops, 3408 .id = DISP_PW_ID_NONE, 3409 }, 3410 { 3411 .name = "power well 1", 3412 /* Handled by the DMC firmware */ 3413 .always_on = true, 3414 .domains = 0, 3415 .ops = &hsw_power_well_ops, 3416 .id = SKL_DISP_PW_1, 3417 { 3418 .hsw.regs = &hsw_power_well_regs, 3419 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3420 .hsw.has_fuses = true, 3421 }, 3422 }, 3423 { 3424 .name = "MISC IO power well", 3425 /* Handled by the DMC firmware */ 3426 .always_on = true, 3427 .domains = 0, 3428 .ops = &hsw_power_well_ops, 3429 .id = SKL_DISP_PW_MISC_IO, 3430 { 3431 .hsw.regs = &hsw_power_well_regs, 3432 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, 3433 }, 3434 }, 3435 { 3436 .name = "DC off", 3437 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 3438 .ops = &gen9_dc_off_power_well_ops, 3439 .id = SKL_DISP_DC_OFF, 3440 }, 3441 { 3442 .name = "power well 2", 3443 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3444 .ops = &hsw_power_well_ops, 3445 .id = SKL_DISP_PW_2, 3446 { 3447 .hsw.regs = &hsw_power_well_regs, 3448 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3449 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3450 .hsw.has_vga = true, 3451 .hsw.has_fuses = true, 3452 }, 3453 }, 3454 { 3455 .name = "DDI A/E IO power well", 3456 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, 3457 .ops = &hsw_power_well_ops, 3458 .id = DISP_PW_ID_NONE, 3459 { 3460 .hsw.regs = &hsw_power_well_regs, 3461 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, 3462 }, 3463 }, 3464 { 3465 .name = "DDI B IO power well", 3466 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3467 .ops = &hsw_power_well_ops, 3468 .id = DISP_PW_ID_NONE, 3469 { 3470 .hsw.regs = &hsw_power_well_regs, 3471 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3472 }, 3473 }, 3474 { 3475 .name = "DDI C IO power well", 3476 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3477 .ops = &hsw_power_well_ops, 3478 .id = DISP_PW_ID_NONE, 3479 { 3480 .hsw.regs = &hsw_power_well_regs, 3481 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3482 }, 3483 }, 3484 { 3485 .name = "DDI D IO power well", 3486 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, 3487 .ops = &hsw_power_well_ops, 3488 .id = DISP_PW_ID_NONE, 3489 { 3490 .hsw.regs = &hsw_power_well_regs, 3491 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3492 }, 3493 }, 3494 }; 3495 3496 static const struct i915_power_well_desc bxt_power_wells[] = { 3497 { 3498 .name = "always-on", 3499 .always_on = true, 3500 .domains = POWER_DOMAIN_MASK, 3501 .ops = &i9xx_always_on_power_well_ops, 3502 .id = DISP_PW_ID_NONE, 3503 }, 3504 { 3505 .name = "power well 1", 3506 /* Handled by the DMC firmware */ 3507 .always_on = true, 3508 .domains = 0, 3509 .ops = &hsw_power_well_ops, 3510 .id = SKL_DISP_PW_1, 3511 { 3512 .hsw.regs = &hsw_power_well_regs, 3513 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3514 .hsw.has_fuses = true, 3515 }, 3516 }, 3517 { 3518 .name = "DC off", 3519 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 3520 .ops = &gen9_dc_off_power_well_ops, 3521 .id = SKL_DISP_DC_OFF, 3522 }, 3523 { 3524 .name = "power well 2", 3525 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3526 .ops = &hsw_power_well_ops, 3527 .id = SKL_DISP_PW_2, 3528 { 3529 .hsw.regs = &hsw_power_well_regs, 3530 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3531 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3532 .hsw.has_vga = true, 3533 .hsw.has_fuses = true, 3534 }, 3535 }, 3536 { 3537 .name = "dpio-common-a", 3538 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 3539 .ops = &bxt_dpio_cmn_power_well_ops, 3540 .id = BXT_DISP_PW_DPIO_CMN_A, 3541 { 3542 .bxt.phy = DPIO_PHY1, 3543 }, 3544 }, 3545 { 3546 .name = "dpio-common-bc", 3547 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 3548 .ops = &bxt_dpio_cmn_power_well_ops, 3549 .id = VLV_DISP_PW_DPIO_CMN_BC, 3550 { 3551 .bxt.phy = DPIO_PHY0, 3552 }, 3553 }, 3554 }; 3555 3556 static const struct i915_power_well_desc glk_power_wells[] = { 3557 { 3558 .name = "always-on", 3559 .always_on = true, 3560 .domains = POWER_DOMAIN_MASK, 3561 .ops = &i9xx_always_on_power_well_ops, 3562 .id = DISP_PW_ID_NONE, 3563 }, 3564 { 3565 .name = "power well 1", 3566 /* Handled by the DMC firmware */ 3567 .always_on = true, 3568 .domains = 0, 3569 .ops = &hsw_power_well_ops, 3570 .id = SKL_DISP_PW_1, 3571 { 3572 .hsw.regs = &hsw_power_well_regs, 3573 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3574 .hsw.has_fuses = true, 3575 }, 3576 }, 3577 { 3578 .name = "DC off", 3579 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, 3580 .ops = &gen9_dc_off_power_well_ops, 3581 .id = SKL_DISP_DC_OFF, 3582 }, 3583 { 3584 .name = "power well 2", 3585 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3586 .ops = &hsw_power_well_ops, 3587 .id = SKL_DISP_PW_2, 3588 { 3589 .hsw.regs = &hsw_power_well_regs, 3590 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3591 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3592 .hsw.has_vga = true, 3593 .hsw.has_fuses = true, 3594 }, 3595 }, 3596 { 3597 .name = "dpio-common-a", 3598 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, 3599 .ops = &bxt_dpio_cmn_power_well_ops, 3600 .id = BXT_DISP_PW_DPIO_CMN_A, 3601 { 3602 .bxt.phy = DPIO_PHY1, 3603 }, 3604 }, 3605 { 3606 .name = "dpio-common-b", 3607 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, 3608 .ops = &bxt_dpio_cmn_power_well_ops, 3609 .id = VLV_DISP_PW_DPIO_CMN_BC, 3610 { 3611 .bxt.phy = DPIO_PHY0, 3612 }, 3613 }, 3614 { 3615 .name = "dpio-common-c", 3616 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, 3617 .ops = &bxt_dpio_cmn_power_well_ops, 3618 .id = GLK_DISP_PW_DPIO_CMN_C, 3619 { 3620 .bxt.phy = DPIO_PHY2, 3621 }, 3622 }, 3623 { 3624 .name = "AUX A", 3625 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, 3626 .ops = &hsw_power_well_ops, 3627 .id = DISP_PW_ID_NONE, 3628 { 3629 .hsw.regs = &hsw_power_well_regs, 3630 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3631 }, 3632 }, 3633 { 3634 .name = "AUX B", 3635 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, 3636 .ops = &hsw_power_well_ops, 3637 .id = DISP_PW_ID_NONE, 3638 { 3639 .hsw.regs = &hsw_power_well_regs, 3640 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3641 }, 3642 }, 3643 { 3644 .name = "AUX C", 3645 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, 3646 .ops = &hsw_power_well_ops, 3647 .id = DISP_PW_ID_NONE, 3648 { 3649 .hsw.regs = &hsw_power_well_regs, 3650 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3651 }, 3652 }, 3653 { 3654 .name = "DDI A IO power well", 3655 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, 3656 .ops = &hsw_power_well_ops, 3657 .id = DISP_PW_ID_NONE, 3658 { 3659 .hsw.regs = &hsw_power_well_regs, 3660 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3661 }, 3662 }, 3663 { 3664 .name = "DDI B IO power well", 3665 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3666 .ops = &hsw_power_well_ops, 3667 .id = DISP_PW_ID_NONE, 3668 { 3669 .hsw.regs = &hsw_power_well_regs, 3670 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3671 }, 3672 }, 3673 { 3674 .name = "DDI C IO power well", 3675 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3676 .ops = &hsw_power_well_ops, 3677 .id = DISP_PW_ID_NONE, 3678 { 3679 .hsw.regs = &hsw_power_well_regs, 3680 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3681 }, 3682 }, 3683 }; 3684 3685 static const struct i915_power_well_ops icl_aux_power_well_ops = { 3686 .sync_hw = hsw_power_well_sync_hw, 3687 .enable = icl_aux_power_well_enable, 3688 .disable = icl_aux_power_well_disable, 3689 .is_enabled = hsw_power_well_enabled, 3690 }; 3691 3692 static const struct i915_power_well_regs icl_aux_power_well_regs = { 3693 .bios = ICL_PWR_WELL_CTL_AUX1, 3694 .driver = ICL_PWR_WELL_CTL_AUX2, 3695 .debug = ICL_PWR_WELL_CTL_AUX4, 3696 }; 3697 3698 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 3699 .bios = ICL_PWR_WELL_CTL_DDI1, 3700 .driver = ICL_PWR_WELL_CTL_DDI2, 3701 .debug = ICL_PWR_WELL_CTL_DDI4, 3702 }; 3703 3704 static const struct i915_power_well_desc icl_power_wells[] = { 3705 { 3706 .name = "always-on", 3707 .always_on = true, 3708 .domains = POWER_DOMAIN_MASK, 3709 .ops = &i9xx_always_on_power_well_ops, 3710 .id = DISP_PW_ID_NONE, 3711 }, 3712 { 3713 .name = "power well 1", 3714 /* Handled by the DMC firmware */ 3715 .always_on = true, 3716 .domains = 0, 3717 .ops = &hsw_power_well_ops, 3718 .id = SKL_DISP_PW_1, 3719 { 3720 .hsw.regs = &hsw_power_well_regs, 3721 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3722 .hsw.has_fuses = true, 3723 }, 3724 }, 3725 { 3726 .name = "DC off", 3727 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, 3728 .ops = &gen9_dc_off_power_well_ops, 3729 .id = SKL_DISP_DC_OFF, 3730 }, 3731 { 3732 .name = "power well 2", 3733 .domains = ICL_PW_2_POWER_DOMAINS, 3734 .ops = &hsw_power_well_ops, 3735 .id = SKL_DISP_PW_2, 3736 { 3737 .hsw.regs = &hsw_power_well_regs, 3738 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3739 .hsw.has_fuses = true, 3740 }, 3741 }, 3742 { 3743 .name = "power well 3", 3744 .domains = ICL_PW_3_POWER_DOMAINS, 3745 .ops = &hsw_power_well_ops, 3746 .id = ICL_DISP_PW_3, 3747 { 3748 .hsw.regs = &hsw_power_well_regs, 3749 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3750 .hsw.irq_pipe_mask = BIT(PIPE_B), 3751 .hsw.has_vga = true, 3752 .hsw.has_fuses = true, 3753 }, 3754 }, 3755 { 3756 .name = "DDI A IO", 3757 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3758 .ops = &hsw_power_well_ops, 3759 .id = DISP_PW_ID_NONE, 3760 { 3761 .hsw.regs = &icl_ddi_power_well_regs, 3762 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3763 }, 3764 }, 3765 { 3766 .name = "DDI B IO", 3767 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3768 .ops = &hsw_power_well_ops, 3769 .id = DISP_PW_ID_NONE, 3770 { 3771 .hsw.regs = &icl_ddi_power_well_regs, 3772 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3773 }, 3774 }, 3775 { 3776 .name = "DDI C IO", 3777 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3778 .ops = &hsw_power_well_ops, 3779 .id = DISP_PW_ID_NONE, 3780 { 3781 .hsw.regs = &icl_ddi_power_well_regs, 3782 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3783 }, 3784 }, 3785 { 3786 .name = "DDI D IO", 3787 .domains = ICL_DDI_IO_D_POWER_DOMAINS, 3788 .ops = &hsw_power_well_ops, 3789 .id = DISP_PW_ID_NONE, 3790 { 3791 .hsw.regs = &icl_ddi_power_well_regs, 3792 .hsw.idx = ICL_PW_CTL_IDX_DDI_D, 3793 }, 3794 }, 3795 { 3796 .name = "DDI E IO", 3797 .domains = ICL_DDI_IO_E_POWER_DOMAINS, 3798 .ops = &hsw_power_well_ops, 3799 .id = DISP_PW_ID_NONE, 3800 { 3801 .hsw.regs = &icl_ddi_power_well_regs, 3802 .hsw.idx = ICL_PW_CTL_IDX_DDI_E, 3803 }, 3804 }, 3805 { 3806 .name = "DDI F IO", 3807 .domains = ICL_DDI_IO_F_POWER_DOMAINS, 3808 .ops = &hsw_power_well_ops, 3809 .id = DISP_PW_ID_NONE, 3810 { 3811 .hsw.regs = &icl_ddi_power_well_regs, 3812 .hsw.idx = ICL_PW_CTL_IDX_DDI_F, 3813 }, 3814 }, 3815 { 3816 .name = "AUX A", 3817 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 3818 .ops = &icl_aux_power_well_ops, 3819 .id = DISP_PW_ID_NONE, 3820 { 3821 .hsw.regs = &icl_aux_power_well_regs, 3822 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3823 }, 3824 }, 3825 { 3826 .name = "AUX B", 3827 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 3828 .ops = &icl_aux_power_well_ops, 3829 .id = DISP_PW_ID_NONE, 3830 { 3831 .hsw.regs = &icl_aux_power_well_regs, 3832 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3833 }, 3834 }, 3835 { 3836 .name = "AUX C TC1", 3837 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, 3838 .ops = &icl_aux_power_well_ops, 3839 .id = DISP_PW_ID_NONE, 3840 { 3841 .hsw.regs = &icl_aux_power_well_regs, 3842 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3843 .hsw.is_tc_tbt = false, 3844 }, 3845 }, 3846 { 3847 .name = "AUX D TC2", 3848 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, 3849 .ops = &icl_aux_power_well_ops, 3850 .id = DISP_PW_ID_NONE, 3851 { 3852 .hsw.regs = &icl_aux_power_well_regs, 3853 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 3854 .hsw.is_tc_tbt = false, 3855 }, 3856 }, 3857 { 3858 .name = "AUX E TC3", 3859 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, 3860 .ops = &icl_aux_power_well_ops, 3861 .id = DISP_PW_ID_NONE, 3862 { 3863 .hsw.regs = &icl_aux_power_well_regs, 3864 .hsw.idx = ICL_PW_CTL_IDX_AUX_E, 3865 .hsw.is_tc_tbt = false, 3866 }, 3867 }, 3868 { 3869 .name = "AUX F TC4", 3870 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, 3871 .ops = &icl_aux_power_well_ops, 3872 .id = DISP_PW_ID_NONE, 3873 { 3874 .hsw.regs = &icl_aux_power_well_regs, 3875 .hsw.idx = ICL_PW_CTL_IDX_AUX_F, 3876 .hsw.is_tc_tbt = false, 3877 }, 3878 }, 3879 { 3880 .name = "AUX C TBT1", 3881 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, 3882 .ops = &icl_aux_power_well_ops, 3883 .id = DISP_PW_ID_NONE, 3884 { 3885 .hsw.regs = &icl_aux_power_well_regs, 3886 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, 3887 .hsw.is_tc_tbt = true, 3888 }, 3889 }, 3890 { 3891 .name = "AUX D TBT2", 3892 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, 3893 .ops = &icl_aux_power_well_ops, 3894 .id = DISP_PW_ID_NONE, 3895 { 3896 .hsw.regs = &icl_aux_power_well_regs, 3897 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, 3898 .hsw.is_tc_tbt = true, 3899 }, 3900 }, 3901 { 3902 .name = "AUX E TBT3", 3903 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, 3904 .ops = &icl_aux_power_well_ops, 3905 .id = DISP_PW_ID_NONE, 3906 { 3907 .hsw.regs = &icl_aux_power_well_regs, 3908 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, 3909 .hsw.is_tc_tbt = true, 3910 }, 3911 }, 3912 { 3913 .name = "AUX F TBT4", 3914 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, 3915 .ops = &icl_aux_power_well_ops, 3916 .id = DISP_PW_ID_NONE, 3917 { 3918 .hsw.regs = &icl_aux_power_well_regs, 3919 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, 3920 .hsw.is_tc_tbt = true, 3921 }, 3922 }, 3923 { 3924 .name = "power well 4", 3925 .domains = ICL_PW_4_POWER_DOMAINS, 3926 .ops = &hsw_power_well_ops, 3927 .id = DISP_PW_ID_NONE, 3928 { 3929 .hsw.regs = &hsw_power_well_regs, 3930 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 3931 .hsw.has_fuses = true, 3932 .hsw.irq_pipe_mask = BIT(PIPE_C), 3933 }, 3934 }, 3935 }; 3936 3937 static void 3938 tgl_tc_cold_request(struct drm_i915_private *i915, bool block) 3939 { 3940 u8 tries = 0; 3941 int ret; 3942 3943 while (1) { 3944 u32 low_val; 3945 u32 high_val = 0; 3946 3947 if (block) 3948 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; 3949 else 3950 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; 3951 3952 /* 3953 * Spec states that we should timeout the request after 200us 3954 * but the function below will timeout after 500us 3955 */ 3956 ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, 3957 &high_val); 3958 if (ret == 0) { 3959 if (block && 3960 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) 3961 ret = -EIO; 3962 else 3963 break; 3964 } 3965 3966 if (++tries == 3) 3967 break; 3968 3969 msleep(1); 3970 } 3971 3972 if (ret) 3973 drm_err(&i915->drm, "TC cold %sblock failed\n", 3974 block ? "" : "un"); 3975 else 3976 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", 3977 block ? "" : "un"); 3978 } 3979 3980 static void 3981 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, 3982 struct i915_power_well *power_well) 3983 { 3984 tgl_tc_cold_request(i915, true); 3985 } 3986 3987 static void 3988 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, 3989 struct i915_power_well *power_well) 3990 { 3991 tgl_tc_cold_request(i915, false); 3992 } 3993 3994 static void 3995 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, 3996 struct i915_power_well *power_well) 3997 { 3998 if (power_well->count > 0) 3999 tgl_tc_cold_off_power_well_enable(i915, power_well); 4000 else 4001 tgl_tc_cold_off_power_well_disable(i915, power_well); 4002 } 4003 4004 static bool 4005 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, 4006 struct i915_power_well *power_well) 4007 { 4008 /* 4009 * Not the correctly implementation but there is no way to just read it 4010 * from PCODE, so returning count to avoid state mismatch errors 4011 */ 4012 return power_well->count; 4013 } 4014 4015 static const struct i915_power_well_ops tgl_tc_cold_off_ops = { 4016 .sync_hw = tgl_tc_cold_off_power_well_sync_hw, 4017 .enable = tgl_tc_cold_off_power_well_enable, 4018 .disable = tgl_tc_cold_off_power_well_disable, 4019 .is_enabled = tgl_tc_cold_off_power_well_is_enabled, 4020 }; 4021 4022 static const struct i915_power_well_desc tgl_power_wells[] = { 4023 { 4024 .name = "always-on", 4025 .always_on = true, 4026 .domains = POWER_DOMAIN_MASK, 4027 .ops = &i9xx_always_on_power_well_ops, 4028 .id = DISP_PW_ID_NONE, 4029 }, 4030 { 4031 .name = "power well 1", 4032 /* Handled by the DMC firmware */ 4033 .always_on = true, 4034 .domains = 0, 4035 .ops = &hsw_power_well_ops, 4036 .id = SKL_DISP_PW_1, 4037 { 4038 .hsw.regs = &hsw_power_well_regs, 4039 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4040 .hsw.has_fuses = true, 4041 }, 4042 }, 4043 { 4044 .name = "DC off", 4045 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, 4046 .ops = &gen9_dc_off_power_well_ops, 4047 .id = SKL_DISP_DC_OFF, 4048 }, 4049 { 4050 .name = "power well 2", 4051 .domains = TGL_PW_2_POWER_DOMAINS, 4052 .ops = &hsw_power_well_ops, 4053 .id = SKL_DISP_PW_2, 4054 { 4055 .hsw.regs = &hsw_power_well_regs, 4056 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4057 .hsw.has_fuses = true, 4058 }, 4059 }, 4060 { 4061 .name = "power well 3", 4062 .domains = TGL_PW_3_POWER_DOMAINS, 4063 .ops = &hsw_power_well_ops, 4064 .id = ICL_DISP_PW_3, 4065 { 4066 .hsw.regs = &hsw_power_well_regs, 4067 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4068 .hsw.irq_pipe_mask = BIT(PIPE_B), 4069 .hsw.has_vga = true, 4070 .hsw.has_fuses = true, 4071 }, 4072 }, 4073 { 4074 .name = "DDI A IO", 4075 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4076 .ops = &hsw_power_well_ops, 4077 .id = DISP_PW_ID_NONE, 4078 { 4079 .hsw.regs = &icl_ddi_power_well_regs, 4080 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4081 } 4082 }, 4083 { 4084 .name = "DDI B IO", 4085 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4086 .ops = &hsw_power_well_ops, 4087 .id = DISP_PW_ID_NONE, 4088 { 4089 .hsw.regs = &icl_ddi_power_well_regs, 4090 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4091 } 4092 }, 4093 { 4094 .name = "DDI C IO", 4095 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 4096 .ops = &hsw_power_well_ops, 4097 .id = DISP_PW_ID_NONE, 4098 { 4099 .hsw.regs = &icl_ddi_power_well_regs, 4100 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 4101 } 4102 }, 4103 { 4104 .name = "DDI IO TC1", 4105 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4106 .ops = &hsw_power_well_ops, 4107 .id = DISP_PW_ID_NONE, 4108 { 4109 .hsw.regs = &icl_ddi_power_well_regs, 4110 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4111 }, 4112 }, 4113 { 4114 .name = "DDI IO TC2", 4115 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4116 .ops = &hsw_power_well_ops, 4117 .id = DISP_PW_ID_NONE, 4118 { 4119 .hsw.regs = &icl_ddi_power_well_regs, 4120 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4121 }, 4122 }, 4123 { 4124 .name = "DDI IO TC3", 4125 .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, 4126 .ops = &hsw_power_well_ops, 4127 .id = DISP_PW_ID_NONE, 4128 { 4129 .hsw.regs = &icl_ddi_power_well_regs, 4130 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 4131 }, 4132 }, 4133 { 4134 .name = "DDI IO TC4", 4135 .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, 4136 .ops = &hsw_power_well_ops, 4137 .id = DISP_PW_ID_NONE, 4138 { 4139 .hsw.regs = &icl_ddi_power_well_regs, 4140 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 4141 }, 4142 }, 4143 { 4144 .name = "DDI IO TC5", 4145 .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, 4146 .ops = &hsw_power_well_ops, 4147 .id = DISP_PW_ID_NONE, 4148 { 4149 .hsw.regs = &icl_ddi_power_well_regs, 4150 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, 4151 }, 4152 }, 4153 { 4154 .name = "DDI IO TC6", 4155 .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, 4156 .ops = &hsw_power_well_ops, 4157 .id = DISP_PW_ID_NONE, 4158 { 4159 .hsw.regs = &icl_ddi_power_well_regs, 4160 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, 4161 }, 4162 }, 4163 { 4164 .name = "TC cold off", 4165 .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, 4166 .ops = &tgl_tc_cold_off_ops, 4167 .id = TGL_DISP_PW_TC_COLD_OFF, 4168 }, 4169 { 4170 .name = "AUX A", 4171 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 4172 .ops = &icl_aux_power_well_ops, 4173 .id = DISP_PW_ID_NONE, 4174 { 4175 .hsw.regs = &icl_aux_power_well_regs, 4176 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4177 }, 4178 }, 4179 { 4180 .name = "AUX B", 4181 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 4182 .ops = &icl_aux_power_well_ops, 4183 .id = DISP_PW_ID_NONE, 4184 { 4185 .hsw.regs = &icl_aux_power_well_regs, 4186 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4187 }, 4188 }, 4189 { 4190 .name = "AUX C", 4191 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 4192 .ops = &icl_aux_power_well_ops, 4193 .id = DISP_PW_ID_NONE, 4194 { 4195 .hsw.regs = &icl_aux_power_well_regs, 4196 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4197 }, 4198 }, 4199 { 4200 .name = "AUX USBC1", 4201 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4202 .ops = &icl_aux_power_well_ops, 4203 .id = DISP_PW_ID_NONE, 4204 { 4205 .hsw.regs = &icl_aux_power_well_regs, 4206 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4207 .hsw.is_tc_tbt = false, 4208 }, 4209 }, 4210 { 4211 .name = "AUX USBC2", 4212 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4213 .ops = &icl_aux_power_well_ops, 4214 .id = DISP_PW_ID_NONE, 4215 { 4216 .hsw.regs = &icl_aux_power_well_regs, 4217 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4218 .hsw.is_tc_tbt = false, 4219 }, 4220 }, 4221 { 4222 .name = "AUX USBC3", 4223 .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS, 4224 .ops = &icl_aux_power_well_ops, 4225 .id = DISP_PW_ID_NONE, 4226 { 4227 .hsw.regs = &icl_aux_power_well_regs, 4228 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 4229 .hsw.is_tc_tbt = false, 4230 }, 4231 }, 4232 { 4233 .name = "AUX USBC4", 4234 .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS, 4235 .ops = &icl_aux_power_well_ops, 4236 .id = DISP_PW_ID_NONE, 4237 { 4238 .hsw.regs = &icl_aux_power_well_regs, 4239 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 4240 .hsw.is_tc_tbt = false, 4241 }, 4242 }, 4243 { 4244 .name = "AUX USBC5", 4245 .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS, 4246 .ops = &icl_aux_power_well_ops, 4247 .id = DISP_PW_ID_NONE, 4248 { 4249 .hsw.regs = &icl_aux_power_well_regs, 4250 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, 4251 .hsw.is_tc_tbt = false, 4252 }, 4253 }, 4254 { 4255 .name = "AUX USBC6", 4256 .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS, 4257 .ops = &icl_aux_power_well_ops, 4258 .id = DISP_PW_ID_NONE, 4259 { 4260 .hsw.regs = &icl_aux_power_well_regs, 4261 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, 4262 .hsw.is_tc_tbt = false, 4263 }, 4264 }, 4265 { 4266 .name = "AUX TBT1", 4267 .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS, 4268 .ops = &icl_aux_power_well_ops, 4269 .id = DISP_PW_ID_NONE, 4270 { 4271 .hsw.regs = &icl_aux_power_well_regs, 4272 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 4273 .hsw.is_tc_tbt = true, 4274 }, 4275 }, 4276 { 4277 .name = "AUX TBT2", 4278 .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS, 4279 .ops = &icl_aux_power_well_ops, 4280 .id = DISP_PW_ID_NONE, 4281 { 4282 .hsw.regs = &icl_aux_power_well_regs, 4283 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 4284 .hsw.is_tc_tbt = true, 4285 }, 4286 }, 4287 { 4288 .name = "AUX TBT3", 4289 .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS, 4290 .ops = &icl_aux_power_well_ops, 4291 .id = DISP_PW_ID_NONE, 4292 { 4293 .hsw.regs = &icl_aux_power_well_regs, 4294 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 4295 .hsw.is_tc_tbt = true, 4296 }, 4297 }, 4298 { 4299 .name = "AUX TBT4", 4300 .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS, 4301 .ops = &icl_aux_power_well_ops, 4302 .id = DISP_PW_ID_NONE, 4303 { 4304 .hsw.regs = &icl_aux_power_well_regs, 4305 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 4306 .hsw.is_tc_tbt = true, 4307 }, 4308 }, 4309 { 4310 .name = "AUX TBT5", 4311 .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS, 4312 .ops = &icl_aux_power_well_ops, 4313 .id = DISP_PW_ID_NONE, 4314 { 4315 .hsw.regs = &icl_aux_power_well_regs, 4316 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, 4317 .hsw.is_tc_tbt = true, 4318 }, 4319 }, 4320 { 4321 .name = "AUX TBT6", 4322 .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS, 4323 .ops = &icl_aux_power_well_ops, 4324 .id = DISP_PW_ID_NONE, 4325 { 4326 .hsw.regs = &icl_aux_power_well_regs, 4327 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, 4328 .hsw.is_tc_tbt = true, 4329 }, 4330 }, 4331 { 4332 .name = "power well 4", 4333 .domains = TGL_PW_4_POWER_DOMAINS, 4334 .ops = &hsw_power_well_ops, 4335 .id = DISP_PW_ID_NONE, 4336 { 4337 .hsw.regs = &hsw_power_well_regs, 4338 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4339 .hsw.has_fuses = true, 4340 .hsw.irq_pipe_mask = BIT(PIPE_C), 4341 } 4342 }, 4343 { 4344 .name = "power well 5", 4345 .domains = TGL_PW_5_POWER_DOMAINS, 4346 .ops = &hsw_power_well_ops, 4347 .id = DISP_PW_ID_NONE, 4348 { 4349 .hsw.regs = &hsw_power_well_regs, 4350 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4351 .hsw.has_fuses = true, 4352 .hsw.irq_pipe_mask = BIT(PIPE_D), 4353 }, 4354 }, 4355 }; 4356 4357 static const struct i915_power_well_desc rkl_power_wells[] = { 4358 { 4359 .name = "always-on", 4360 .always_on = true, 4361 .domains = POWER_DOMAIN_MASK, 4362 .ops = &i9xx_always_on_power_well_ops, 4363 .id = DISP_PW_ID_NONE, 4364 }, 4365 { 4366 .name = "power well 1", 4367 /* Handled by the DMC firmware */ 4368 .always_on = true, 4369 .domains = 0, 4370 .ops = &hsw_power_well_ops, 4371 .id = SKL_DISP_PW_1, 4372 { 4373 .hsw.regs = &hsw_power_well_regs, 4374 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4375 .hsw.has_fuses = true, 4376 }, 4377 }, 4378 { 4379 .name = "DC off", 4380 .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS, 4381 .ops = &gen9_dc_off_power_well_ops, 4382 .id = SKL_DISP_DC_OFF, 4383 }, 4384 { 4385 .name = "power well 3", 4386 .domains = RKL_PW_3_POWER_DOMAINS, 4387 .ops = &hsw_power_well_ops, 4388 .id = ICL_DISP_PW_3, 4389 { 4390 .hsw.regs = &hsw_power_well_regs, 4391 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4392 .hsw.irq_pipe_mask = BIT(PIPE_B), 4393 .hsw.has_vga = true, 4394 .hsw.has_fuses = true, 4395 }, 4396 }, 4397 { 4398 .name = "power well 4", 4399 .domains = RKL_PW_4_POWER_DOMAINS, 4400 .ops = &hsw_power_well_ops, 4401 .id = DISP_PW_ID_NONE, 4402 { 4403 .hsw.regs = &hsw_power_well_regs, 4404 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4405 .hsw.has_fuses = true, 4406 .hsw.irq_pipe_mask = BIT(PIPE_C), 4407 } 4408 }, 4409 { 4410 .name = "DDI A IO", 4411 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4412 .ops = &hsw_power_well_ops, 4413 .id = DISP_PW_ID_NONE, 4414 { 4415 .hsw.regs = &icl_ddi_power_well_regs, 4416 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4417 } 4418 }, 4419 { 4420 .name = "DDI B IO", 4421 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4422 .ops = &hsw_power_well_ops, 4423 .id = DISP_PW_ID_NONE, 4424 { 4425 .hsw.regs = &icl_ddi_power_well_regs, 4426 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4427 } 4428 }, 4429 { 4430 .name = "DDI IO TC1", 4431 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4432 .ops = &hsw_power_well_ops, 4433 .id = DISP_PW_ID_NONE, 4434 { 4435 .hsw.regs = &icl_ddi_power_well_regs, 4436 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4437 }, 4438 }, 4439 { 4440 .name = "DDI IO TC2", 4441 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4442 .ops = &hsw_power_well_ops, 4443 .id = DISP_PW_ID_NONE, 4444 { 4445 .hsw.regs = &icl_ddi_power_well_regs, 4446 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4447 }, 4448 }, 4449 { 4450 .name = "AUX A", 4451 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 4452 .ops = &icl_aux_power_well_ops, 4453 .id = DISP_PW_ID_NONE, 4454 { 4455 .hsw.regs = &icl_aux_power_well_regs, 4456 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4457 }, 4458 }, 4459 { 4460 .name = "AUX B", 4461 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 4462 .ops = &icl_aux_power_well_ops, 4463 .id = DISP_PW_ID_NONE, 4464 { 4465 .hsw.regs = &icl_aux_power_well_regs, 4466 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4467 }, 4468 }, 4469 { 4470 .name = "AUX USBC1", 4471 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4472 .ops = &icl_aux_power_well_ops, 4473 .id = DISP_PW_ID_NONE, 4474 { 4475 .hsw.regs = &icl_aux_power_well_regs, 4476 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4477 }, 4478 }, 4479 { 4480 .name = "AUX USBC2", 4481 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4482 .ops = &icl_aux_power_well_ops, 4483 .id = DISP_PW_ID_NONE, 4484 { 4485 .hsw.regs = &icl_aux_power_well_regs, 4486 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4487 }, 4488 }, 4489 }; 4490 4491 static const struct i915_power_well_desc dg1_power_wells[] = { 4492 { 4493 .name = "always-on", 4494 .always_on = true, 4495 .domains = POWER_DOMAIN_MASK, 4496 .ops = &i9xx_always_on_power_well_ops, 4497 .id = DISP_PW_ID_NONE, 4498 }, 4499 { 4500 .name = "power well 1", 4501 /* Handled by the DMC firmware */ 4502 .always_on = true, 4503 .domains = 0, 4504 .ops = &hsw_power_well_ops, 4505 .id = SKL_DISP_PW_1, 4506 { 4507 .hsw.regs = &hsw_power_well_regs, 4508 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4509 .hsw.has_fuses = true, 4510 }, 4511 }, 4512 { 4513 .name = "DC off", 4514 .domains = DG1_DISPLAY_DC_OFF_POWER_DOMAINS, 4515 .ops = &gen9_dc_off_power_well_ops, 4516 .id = SKL_DISP_DC_OFF, 4517 }, 4518 { 4519 .name = "power well 2", 4520 .domains = DG1_PW_2_POWER_DOMAINS, 4521 .ops = &hsw_power_well_ops, 4522 .id = SKL_DISP_PW_2, 4523 { 4524 .hsw.regs = &hsw_power_well_regs, 4525 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4526 .hsw.has_fuses = true, 4527 }, 4528 }, 4529 { 4530 .name = "power well 3", 4531 .domains = DG1_PW_3_POWER_DOMAINS, 4532 .ops = &hsw_power_well_ops, 4533 .id = ICL_DISP_PW_3, 4534 { 4535 .hsw.regs = &hsw_power_well_regs, 4536 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4537 .hsw.irq_pipe_mask = BIT(PIPE_B), 4538 .hsw.has_vga = true, 4539 .hsw.has_fuses = true, 4540 }, 4541 }, 4542 { 4543 .name = "DDI A IO", 4544 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4545 .ops = &hsw_power_well_ops, 4546 .id = DISP_PW_ID_NONE, 4547 { 4548 .hsw.regs = &icl_ddi_power_well_regs, 4549 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4550 } 4551 }, 4552 { 4553 .name = "DDI B IO", 4554 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4555 .ops = &hsw_power_well_ops, 4556 .id = DISP_PW_ID_NONE, 4557 { 4558 .hsw.regs = &icl_ddi_power_well_regs, 4559 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4560 } 4561 }, 4562 { 4563 .name = "DDI IO TC1", 4564 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4565 .ops = &hsw_power_well_ops, 4566 .id = DISP_PW_ID_NONE, 4567 { 4568 .hsw.regs = &icl_ddi_power_well_regs, 4569 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4570 }, 4571 }, 4572 { 4573 .name = "DDI IO TC2", 4574 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4575 .ops = &hsw_power_well_ops, 4576 .id = DISP_PW_ID_NONE, 4577 { 4578 .hsw.regs = &icl_ddi_power_well_regs, 4579 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4580 }, 4581 }, 4582 { 4583 .name = "AUX A", 4584 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 4585 .ops = &icl_aux_power_well_ops, 4586 .id = DISP_PW_ID_NONE, 4587 { 4588 .hsw.regs = &icl_aux_power_well_regs, 4589 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4590 }, 4591 }, 4592 { 4593 .name = "AUX B", 4594 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 4595 .ops = &icl_aux_power_well_ops, 4596 .id = DISP_PW_ID_NONE, 4597 { 4598 .hsw.regs = &icl_aux_power_well_regs, 4599 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4600 }, 4601 }, 4602 { 4603 .name = "AUX USBC1", 4604 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4605 .ops = &icl_aux_power_well_ops, 4606 .id = DISP_PW_ID_NONE, 4607 { 4608 .hsw.regs = &icl_aux_power_well_regs, 4609 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4610 .hsw.is_tc_tbt = false, 4611 }, 4612 }, 4613 { 4614 .name = "AUX USBC2", 4615 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4616 .ops = &icl_aux_power_well_ops, 4617 .id = DISP_PW_ID_NONE, 4618 { 4619 .hsw.regs = &icl_aux_power_well_regs, 4620 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4621 .hsw.is_tc_tbt = false, 4622 }, 4623 }, 4624 { 4625 .name = "power well 4", 4626 .domains = TGL_PW_4_POWER_DOMAINS, 4627 .ops = &hsw_power_well_ops, 4628 .id = DISP_PW_ID_NONE, 4629 { 4630 .hsw.regs = &hsw_power_well_regs, 4631 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4632 .hsw.has_fuses = true, 4633 .hsw.irq_pipe_mask = BIT(PIPE_C), 4634 } 4635 }, 4636 { 4637 .name = "power well 5", 4638 .domains = TGL_PW_5_POWER_DOMAINS, 4639 .ops = &hsw_power_well_ops, 4640 .id = DISP_PW_ID_NONE, 4641 { 4642 .hsw.regs = &hsw_power_well_regs, 4643 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4644 .hsw.has_fuses = true, 4645 .hsw.irq_pipe_mask = BIT(PIPE_D), 4646 }, 4647 }, 4648 }; 4649 4650 static const struct i915_power_well_desc xelpd_power_wells[] = { 4651 { 4652 .name = "always-on", 4653 .always_on = true, 4654 .domains = POWER_DOMAIN_MASK, 4655 .ops = &i9xx_always_on_power_well_ops, 4656 .id = DISP_PW_ID_NONE, 4657 }, 4658 { 4659 .name = "power well 1", 4660 /* Handled by the DMC firmware */ 4661 .always_on = true, 4662 .domains = 0, 4663 .ops = &hsw_power_well_ops, 4664 .id = SKL_DISP_PW_1, 4665 { 4666 .hsw.regs = &hsw_power_well_regs, 4667 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4668 .hsw.has_fuses = true, 4669 }, 4670 }, 4671 { 4672 .name = "DC off", 4673 .domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS, 4674 .ops = &gen9_dc_off_power_well_ops, 4675 .id = SKL_DISP_DC_OFF, 4676 }, 4677 { 4678 .name = "power well 2", 4679 .domains = XELPD_PW_2_POWER_DOMAINS, 4680 .ops = &hsw_power_well_ops, 4681 .id = SKL_DISP_PW_2, 4682 { 4683 .hsw.regs = &hsw_power_well_regs, 4684 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4685 .hsw.has_vga = true, 4686 .hsw.has_fuses = true, 4687 }, 4688 }, 4689 { 4690 .name = "power well A", 4691 .domains = XELPD_PW_A_POWER_DOMAINS, 4692 .ops = &hsw_power_well_ops, 4693 .id = DISP_PW_ID_NONE, 4694 { 4695 .hsw.regs = &hsw_power_well_regs, 4696 .hsw.idx = XELPD_PW_CTL_IDX_PW_A, 4697 .hsw.irq_pipe_mask = BIT(PIPE_A), 4698 .hsw.has_fuses = true, 4699 }, 4700 }, 4701 { 4702 .name = "power well B", 4703 .domains = XELPD_PW_B_POWER_DOMAINS, 4704 .ops = &hsw_power_well_ops, 4705 .id = DISP_PW_ID_NONE, 4706 { 4707 .hsw.regs = &hsw_power_well_regs, 4708 .hsw.idx = XELPD_PW_CTL_IDX_PW_B, 4709 .hsw.irq_pipe_mask = BIT(PIPE_B), 4710 .hsw.has_fuses = true, 4711 }, 4712 }, 4713 { 4714 .name = "power well C", 4715 .domains = XELPD_PW_C_POWER_DOMAINS, 4716 .ops = &hsw_power_well_ops, 4717 .id = DISP_PW_ID_NONE, 4718 { 4719 .hsw.regs = &hsw_power_well_regs, 4720 .hsw.idx = XELPD_PW_CTL_IDX_PW_C, 4721 .hsw.irq_pipe_mask = BIT(PIPE_C), 4722 .hsw.has_fuses = true, 4723 }, 4724 }, 4725 { 4726 .name = "power well D", 4727 .domains = XELPD_PW_D_POWER_DOMAINS, 4728 .ops = &hsw_power_well_ops, 4729 .id = DISP_PW_ID_NONE, 4730 { 4731 .hsw.regs = &hsw_power_well_regs, 4732 .hsw.idx = XELPD_PW_CTL_IDX_PW_D, 4733 .hsw.irq_pipe_mask = BIT(PIPE_D), 4734 .hsw.has_fuses = true, 4735 }, 4736 }, 4737 { 4738 .name = "DDI A IO", 4739 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4740 .ops = &hsw_power_well_ops, 4741 .id = DISP_PW_ID_NONE, 4742 { 4743 .hsw.regs = &icl_ddi_power_well_regs, 4744 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4745 } 4746 }, 4747 { 4748 .name = "DDI B IO", 4749 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4750 .ops = &hsw_power_well_ops, 4751 .id = DISP_PW_ID_NONE, 4752 { 4753 .hsw.regs = &icl_ddi_power_well_regs, 4754 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4755 } 4756 }, 4757 { 4758 .name = "DDI C IO", 4759 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 4760 .ops = &hsw_power_well_ops, 4761 .id = DISP_PW_ID_NONE, 4762 { 4763 .hsw.regs = &icl_ddi_power_well_regs, 4764 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 4765 } 4766 }, 4767 { 4768 .name = "DDI IO D_XELPD", 4769 .domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS, 4770 .ops = &hsw_power_well_ops, 4771 .id = DISP_PW_ID_NONE, 4772 { 4773 .hsw.regs = &icl_ddi_power_well_regs, 4774 .hsw.idx = XELPD_PW_CTL_IDX_DDI_D, 4775 } 4776 }, 4777 { 4778 .name = "DDI IO E_XELPD", 4779 .domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS, 4780 .ops = &hsw_power_well_ops, 4781 .id = DISP_PW_ID_NONE, 4782 { 4783 .hsw.regs = &icl_ddi_power_well_regs, 4784 .hsw.idx = XELPD_PW_CTL_IDX_DDI_E, 4785 } 4786 }, 4787 { 4788 .name = "DDI IO TC1", 4789 .domains = XELPD_DDI_IO_TC1_POWER_DOMAINS, 4790 .ops = &hsw_power_well_ops, 4791 .id = DISP_PW_ID_NONE, 4792 { 4793 .hsw.regs = &icl_ddi_power_well_regs, 4794 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4795 } 4796 }, 4797 { 4798 .name = "DDI IO TC2", 4799 .domains = XELPD_DDI_IO_TC2_POWER_DOMAINS, 4800 .ops = &hsw_power_well_ops, 4801 .id = DISP_PW_ID_NONE, 4802 { 4803 .hsw.regs = &icl_ddi_power_well_regs, 4804 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4805 } 4806 }, 4807 { 4808 .name = "DDI IO TC3", 4809 .domains = XELPD_DDI_IO_TC3_POWER_DOMAINS, 4810 .ops = &hsw_power_well_ops, 4811 .id = DISP_PW_ID_NONE, 4812 { 4813 .hsw.regs = &icl_ddi_power_well_regs, 4814 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 4815 } 4816 }, 4817 { 4818 .name = "DDI IO TC4", 4819 .domains = XELPD_DDI_IO_TC4_POWER_DOMAINS, 4820 .ops = &hsw_power_well_ops, 4821 .id = DISP_PW_ID_NONE, 4822 { 4823 .hsw.regs = &icl_ddi_power_well_regs, 4824 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 4825 } 4826 }, 4827 { 4828 .name = "AUX A", 4829 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 4830 .ops = &icl_aux_power_well_ops, 4831 .id = DISP_PW_ID_NONE, 4832 { 4833 .hsw.regs = &icl_aux_power_well_regs, 4834 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4835 .hsw.fixed_enable_delay = 600, 4836 }, 4837 }, 4838 { 4839 .name = "AUX B", 4840 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 4841 .ops = &icl_aux_power_well_ops, 4842 .id = DISP_PW_ID_NONE, 4843 { 4844 .hsw.regs = &icl_aux_power_well_regs, 4845 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4846 .hsw.fixed_enable_delay = 600, 4847 }, 4848 }, 4849 { 4850 .name = "AUX C", 4851 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 4852 .ops = &icl_aux_power_well_ops, 4853 .id = DISP_PW_ID_NONE, 4854 { 4855 .hsw.regs = &icl_aux_power_well_regs, 4856 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4857 .hsw.fixed_enable_delay = 600, 4858 }, 4859 }, 4860 { 4861 .name = "AUX D_XELPD", 4862 .domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS, 4863 .ops = &icl_aux_power_well_ops, 4864 .id = DISP_PW_ID_NONE, 4865 { 4866 .hsw.regs = &icl_aux_power_well_regs, 4867 .hsw.idx = XELPD_PW_CTL_IDX_AUX_D, 4868 .hsw.fixed_enable_delay = 600, 4869 }, 4870 }, 4871 { 4872 .name = "AUX E_XELPD", 4873 .domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS, 4874 .ops = &icl_aux_power_well_ops, 4875 .id = DISP_PW_ID_NONE, 4876 { 4877 .hsw.regs = &icl_aux_power_well_regs, 4878 .hsw.idx = XELPD_PW_CTL_IDX_AUX_E, 4879 }, 4880 }, 4881 { 4882 .name = "AUX USBC1", 4883 .domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS, 4884 .ops = &icl_aux_power_well_ops, 4885 .id = DISP_PW_ID_NONE, 4886 { 4887 .hsw.regs = &icl_aux_power_well_regs, 4888 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4889 .hsw.fixed_enable_delay = 600, 4890 }, 4891 }, 4892 { 4893 .name = "AUX USBC2", 4894 .domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS, 4895 .ops = &icl_aux_power_well_ops, 4896 .id = DISP_PW_ID_NONE, 4897 { 4898 .hsw.regs = &icl_aux_power_well_regs, 4899 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4900 }, 4901 }, 4902 { 4903 .name = "AUX USBC3", 4904 .domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS, 4905 .ops = &icl_aux_power_well_ops, 4906 .id = DISP_PW_ID_NONE, 4907 { 4908 .hsw.regs = &icl_aux_power_well_regs, 4909 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 4910 }, 4911 }, 4912 { 4913 .name = "AUX USBC4", 4914 .domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS, 4915 .ops = &icl_aux_power_well_ops, 4916 .id = DISP_PW_ID_NONE, 4917 { 4918 .hsw.regs = &icl_aux_power_well_regs, 4919 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 4920 }, 4921 }, 4922 { 4923 .name = "AUX TBT1", 4924 .domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS, 4925 .ops = &icl_aux_power_well_ops, 4926 .id = DISP_PW_ID_NONE, 4927 { 4928 .hsw.regs = &icl_aux_power_well_regs, 4929 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 4930 .hsw.is_tc_tbt = true, 4931 }, 4932 }, 4933 { 4934 .name = "AUX TBT2", 4935 .domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS, 4936 .ops = &icl_aux_power_well_ops, 4937 .id = DISP_PW_ID_NONE, 4938 { 4939 .hsw.regs = &icl_aux_power_well_regs, 4940 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 4941 .hsw.is_tc_tbt = true, 4942 }, 4943 }, 4944 { 4945 .name = "AUX TBT3", 4946 .domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS, 4947 .ops = &icl_aux_power_well_ops, 4948 .id = DISP_PW_ID_NONE, 4949 { 4950 .hsw.regs = &icl_aux_power_well_regs, 4951 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 4952 .hsw.is_tc_tbt = true, 4953 }, 4954 }, 4955 { 4956 .name = "AUX TBT4", 4957 .domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS, 4958 .ops = &icl_aux_power_well_ops, 4959 .id = DISP_PW_ID_NONE, 4960 { 4961 .hsw.regs = &icl_aux_power_well_regs, 4962 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 4963 .hsw.is_tc_tbt = true, 4964 }, 4965 }, 4966 }; 4967 4968 static int 4969 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 4970 int disable_power_well) 4971 { 4972 if (disable_power_well >= 0) 4973 return !!disable_power_well; 4974 4975 return 1; 4976 } 4977 4978 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 4979 int enable_dc) 4980 { 4981 u32 mask; 4982 int requested_dc; 4983 int max_dc; 4984 4985 if (!HAS_DISPLAY(dev_priv)) 4986 return 0; 4987 4988 if (IS_DG1(dev_priv)) 4989 max_dc = 3; 4990 else if (DISPLAY_VER(dev_priv) >= 12) 4991 max_dc = 4; 4992 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 4993 max_dc = 1; 4994 else if (DISPLAY_VER(dev_priv) >= 9) 4995 max_dc = 2; 4996 else 4997 max_dc = 0; 4998 4999 /* 5000 * DC9 has a separate HW flow from the rest of the DC states, 5001 * not depending on the DMC firmware. It's needed by system 5002 * suspend/resume, so allow it unconditionally. 5003 */ 5004 mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || 5005 DISPLAY_VER(dev_priv) >= 11 ? 5006 DC_STATE_EN_DC9 : 0; 5007 5008 if (!dev_priv->params.disable_power_well) 5009 max_dc = 0; 5010 5011 if (enable_dc >= 0 && enable_dc <= max_dc) { 5012 requested_dc = enable_dc; 5013 } else if (enable_dc == -1) { 5014 requested_dc = max_dc; 5015 } else if (enable_dc > max_dc && enable_dc <= 4) { 5016 drm_dbg_kms(&dev_priv->drm, 5017 "Adjusting requested max DC state (%d->%d)\n", 5018 enable_dc, max_dc); 5019 requested_dc = max_dc; 5020 } else { 5021 drm_err(&dev_priv->drm, 5022 "Unexpected value for enable_dc (%d)\n", enable_dc); 5023 requested_dc = max_dc; 5024 } 5025 5026 switch (requested_dc) { 5027 case 4: 5028 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 5029 break; 5030 case 3: 5031 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 5032 break; 5033 case 2: 5034 mask |= DC_STATE_EN_UPTO_DC6; 5035 break; 5036 case 1: 5037 mask |= DC_STATE_EN_UPTO_DC5; 5038 break; 5039 } 5040 5041 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); 5042 5043 return mask; 5044 } 5045 5046 static int 5047 __set_power_wells(struct i915_power_domains *power_domains, 5048 const struct i915_power_well_desc *power_well_descs, 5049 int power_well_descs_sz, u64 skip_mask) 5050 { 5051 struct drm_i915_private *i915 = container_of(power_domains, 5052 struct drm_i915_private, 5053 power_domains); 5054 u64 power_well_ids = 0; 5055 int power_well_count = 0; 5056 int i, plt_idx = 0; 5057 5058 for (i = 0; i < power_well_descs_sz; i++) 5059 if (!(BIT_ULL(power_well_descs[i].id) & skip_mask)) 5060 power_well_count++; 5061 5062 power_domains->power_well_count = power_well_count; 5063 power_domains->power_wells = 5064 kcalloc(power_well_count, 5065 sizeof(*power_domains->power_wells), 5066 GFP_KERNEL); 5067 if (!power_domains->power_wells) 5068 return -ENOMEM; 5069 5070 for (i = 0; i < power_well_descs_sz; i++) { 5071 enum i915_power_well_id id = power_well_descs[i].id; 5072 5073 if (BIT_ULL(id) & skip_mask) 5074 continue; 5075 5076 power_domains->power_wells[plt_idx++].desc = 5077 &power_well_descs[i]; 5078 5079 if (id == DISP_PW_ID_NONE) 5080 continue; 5081 5082 drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8); 5083 drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id)); 5084 power_well_ids |= BIT_ULL(id); 5085 } 5086 5087 return 0; 5088 } 5089 5090 #define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \ 5091 __set_power_wells(power_domains, __power_well_descs, \ 5092 ARRAY_SIZE(__power_well_descs), skip_mask) 5093 5094 #define set_power_wells(power_domains, __power_well_descs) \ 5095 set_power_wells_mask(power_domains, __power_well_descs, 0) 5096 5097 /** 5098 * intel_power_domains_init - initializes the power domain structures 5099 * @dev_priv: i915 device instance 5100 * 5101 * Initializes the power domain structures for @dev_priv depending upon the 5102 * supported platform. 5103 */ 5104 int intel_power_domains_init(struct drm_i915_private *dev_priv) 5105 { 5106 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5107 int err; 5108 5109 dev_priv->params.disable_power_well = 5110 sanitize_disable_power_well_option(dev_priv, 5111 dev_priv->params.disable_power_well); 5112 dev_priv->dmc.allowed_dc_mask = 5113 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); 5114 5115 dev_priv->dmc.target_dc_state = 5116 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 5117 5118 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); 5119 5120 mutex_init(&power_domains->lock); 5121 5122 INIT_DELAYED_WORK(&power_domains->async_put_work, 5123 intel_display_power_put_async_work); 5124 5125 /* 5126 * The enabling order will be from lower to higher indexed wells, 5127 * the disabling order is reversed. 5128 */ 5129 if (!HAS_DISPLAY(dev_priv)) { 5130 power_domains->power_well_count = 0; 5131 err = 0; 5132 } else if (DISPLAY_VER(dev_priv) >= 13) { 5133 err = set_power_wells(power_domains, xelpd_power_wells); 5134 } else if (IS_DG1(dev_priv)) { 5135 err = set_power_wells(power_domains, dg1_power_wells); 5136 } else if (IS_ALDERLAKE_S(dev_priv)) { 5137 err = set_power_wells_mask(power_domains, tgl_power_wells, 5138 BIT_ULL(TGL_DISP_PW_TC_COLD_OFF)); 5139 } else if (IS_ROCKETLAKE(dev_priv)) { 5140 err = set_power_wells(power_domains, rkl_power_wells); 5141 } else if (DISPLAY_VER(dev_priv) == 12) { 5142 err = set_power_wells(power_domains, tgl_power_wells); 5143 } else if (DISPLAY_VER(dev_priv) == 11) { 5144 err = set_power_wells(power_domains, icl_power_wells); 5145 } else if (IS_GEMINILAKE(dev_priv)) { 5146 err = set_power_wells(power_domains, glk_power_wells); 5147 } else if (IS_BROXTON(dev_priv)) { 5148 err = set_power_wells(power_domains, bxt_power_wells); 5149 } else if (DISPLAY_VER(dev_priv) == 9) { 5150 err = set_power_wells(power_domains, skl_power_wells); 5151 } else if (IS_CHERRYVIEW(dev_priv)) { 5152 err = set_power_wells(power_domains, chv_power_wells); 5153 } else if (IS_BROADWELL(dev_priv)) { 5154 err = set_power_wells(power_domains, bdw_power_wells); 5155 } else if (IS_HASWELL(dev_priv)) { 5156 err = set_power_wells(power_domains, hsw_power_wells); 5157 } else if (IS_VALLEYVIEW(dev_priv)) { 5158 err = set_power_wells(power_domains, vlv_power_wells); 5159 } else if (IS_I830(dev_priv)) { 5160 err = set_power_wells(power_domains, i830_power_wells); 5161 } else { 5162 err = set_power_wells(power_domains, i9xx_always_on_power_well); 5163 } 5164 5165 return err; 5166 } 5167 5168 /** 5169 * intel_power_domains_cleanup - clean up power domains resources 5170 * @dev_priv: i915 device instance 5171 * 5172 * Release any resources acquired by intel_power_domains_init() 5173 */ 5174 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 5175 { 5176 kfree(dev_priv->power_domains.power_wells); 5177 } 5178 5179 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 5180 { 5181 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5182 struct i915_power_well *power_well; 5183 5184 mutex_lock(&power_domains->lock); 5185 for_each_power_well(dev_priv, power_well) { 5186 power_well->desc->ops->sync_hw(dev_priv, power_well); 5187 power_well->hw_enabled = 5188 power_well->desc->ops->is_enabled(dev_priv, power_well); 5189 } 5190 mutex_unlock(&power_domains->lock); 5191 } 5192 5193 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, 5194 enum dbuf_slice slice, bool enable) 5195 { 5196 i915_reg_t reg = DBUF_CTL_S(slice); 5197 bool state; 5198 5199 intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST, 5200 enable ? DBUF_POWER_REQUEST : 0); 5201 intel_de_posting_read(dev_priv, reg); 5202 udelay(10); 5203 5204 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; 5205 drm_WARN(&dev_priv->drm, enable != state, 5206 "DBuf slice %d power %s timeout!\n", 5207 slice, enabledisable(enable)); 5208 } 5209 5210 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, 5211 u8 req_slices) 5212 { 5213 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5214 u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask; 5215 enum dbuf_slice slice; 5216 5217 drm_WARN(&dev_priv->drm, req_slices & ~slice_mask, 5218 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n", 5219 req_slices, slice_mask); 5220 5221 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", 5222 req_slices); 5223 5224 /* 5225 * Might be running this in parallel to gen9_dc_off_power_well_enable 5226 * being called from intel_dp_detect for instance, 5227 * which causes assertion triggered by race condition, 5228 * as gen9_assert_dbuf_enabled might preempt this when registers 5229 * were already updated, while dev_priv was not. 5230 */ 5231 mutex_lock(&power_domains->lock); 5232 5233 for_each_dbuf_slice(dev_priv, slice) 5234 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); 5235 5236 dev_priv->dbuf.enabled_slices = req_slices; 5237 5238 mutex_unlock(&power_domains->lock); 5239 } 5240 5241 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 5242 { 5243 dev_priv->dbuf.enabled_slices = 5244 intel_enabled_dbuf_slices_mask(dev_priv); 5245 5246 /* 5247 * Just power up at least 1 slice, we will 5248 * figure out later which slices we have and what we need. 5249 */ 5250 gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | 5251 dev_priv->dbuf.enabled_slices); 5252 } 5253 5254 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 5255 { 5256 gen9_dbuf_slices_update(dev_priv, 0); 5257 } 5258 5259 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) 5260 { 5261 enum dbuf_slice slice; 5262 5263 if (IS_ALDERLAKE_P(dev_priv)) 5264 return; 5265 5266 for_each_dbuf_slice(dev_priv, slice) 5267 intel_de_rmw(dev_priv, DBUF_CTL_S(slice), 5268 DBUF_TRACKER_STATE_SERVICE_MASK, 5269 DBUF_TRACKER_STATE_SERVICE(8)); 5270 } 5271 5272 static void icl_mbus_init(struct drm_i915_private *dev_priv) 5273 { 5274 unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask; 5275 u32 mask, val, i; 5276 5277 if (IS_ALDERLAKE_P(dev_priv)) 5278 return; 5279 5280 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | 5281 MBUS_ABOX_BT_CREDIT_POOL2_MASK | 5282 MBUS_ABOX_B_CREDIT_MASK | 5283 MBUS_ABOX_BW_CREDIT_MASK; 5284 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 5285 MBUS_ABOX_BT_CREDIT_POOL2(16) | 5286 MBUS_ABOX_B_CREDIT(1) | 5287 MBUS_ABOX_BW_CREDIT(1); 5288 5289 /* 5290 * gen12 platforms that use abox1 and abox2 for pixel data reads still 5291 * expect us to program the abox_ctl0 register as well, even though 5292 * we don't have to program other instance-0 registers like BW_BUDDY. 5293 */ 5294 if (DISPLAY_VER(dev_priv) == 12) 5295 abox_regs |= BIT(0); 5296 5297 for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) 5298 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val); 5299 } 5300 5301 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 5302 { 5303 u32 val = intel_de_read(dev_priv, LCPLL_CTL); 5304 5305 /* 5306 * The LCPLL register should be turned on by the BIOS. For now 5307 * let's just check its state and print errors in case 5308 * something is wrong. Don't even try to turn it on. 5309 */ 5310 5311 if (val & LCPLL_CD_SOURCE_FCLK) 5312 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); 5313 5314 if (val & LCPLL_PLL_DISABLE) 5315 drm_err(&dev_priv->drm, "LCPLL is disabled\n"); 5316 5317 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 5318 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); 5319 } 5320 5321 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 5322 { 5323 struct drm_device *dev = &dev_priv->drm; 5324 struct intel_crtc *crtc; 5325 5326 for_each_intel_crtc(dev, crtc) 5327 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 5328 pipe_name(crtc->pipe)); 5329 5330 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), 5331 "Display power well on\n"); 5332 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, 5333 "SPLL enabled\n"); 5334 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 5335 "WRPLL1 enabled\n"); 5336 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 5337 "WRPLL2 enabled\n"); 5338 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, 5339 "Panel power on\n"); 5340 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 5341 "CPU PWM1 enabled\n"); 5342 if (IS_HASWELL(dev_priv)) 5343 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 5344 "CPU PWM2 enabled\n"); 5345 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 5346 "PCH PWM1 enabled\n"); 5347 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 5348 "Utility pin enabled\n"); 5349 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, 5350 "PCH GTC enabled\n"); 5351 5352 /* 5353 * In theory we can still leave IRQs enabled, as long as only the HPD 5354 * interrupts remain enabled. We used to check for that, but since it's 5355 * gen-specific and since we only disable LCPLL after we fully disable 5356 * the interrupts, the check below should be enough. 5357 */ 5358 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 5359 } 5360 5361 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 5362 { 5363 if (IS_HASWELL(dev_priv)) 5364 return intel_de_read(dev_priv, D_COMP_HSW); 5365 else 5366 return intel_de_read(dev_priv, D_COMP_BDW); 5367 } 5368 5369 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 5370 { 5371 if (IS_HASWELL(dev_priv)) { 5372 if (sandybridge_pcode_write(dev_priv, 5373 GEN6_PCODE_WRITE_D_COMP, val)) 5374 drm_dbg_kms(&dev_priv->drm, 5375 "Failed to write to D_COMP\n"); 5376 } else { 5377 intel_de_write(dev_priv, D_COMP_BDW, val); 5378 intel_de_posting_read(dev_priv, D_COMP_BDW); 5379 } 5380 } 5381 5382 /* 5383 * This function implements pieces of two sequences from BSpec: 5384 * - Sequence for display software to disable LCPLL 5385 * - Sequence for display software to allow package C8+ 5386 * The steps implemented here are just the steps that actually touch the LCPLL 5387 * register. Callers should take care of disabling all the display engine 5388 * functions, doing the mode unset, fixing interrupts, etc. 5389 */ 5390 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 5391 bool switch_to_fclk, bool allow_power_down) 5392 { 5393 u32 val; 5394 5395 assert_can_disable_lcpll(dev_priv); 5396 5397 val = intel_de_read(dev_priv, LCPLL_CTL); 5398 5399 if (switch_to_fclk) { 5400 val |= LCPLL_CD_SOURCE_FCLK; 5401 intel_de_write(dev_priv, LCPLL_CTL, val); 5402 5403 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & 5404 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 5405 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); 5406 5407 val = intel_de_read(dev_priv, LCPLL_CTL); 5408 } 5409 5410 val |= LCPLL_PLL_DISABLE; 5411 intel_de_write(dev_priv, LCPLL_CTL, val); 5412 intel_de_posting_read(dev_priv, LCPLL_CTL); 5413 5414 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 5415 drm_err(&dev_priv->drm, "LCPLL still locked\n"); 5416 5417 val = hsw_read_dcomp(dev_priv); 5418 val |= D_COMP_COMP_DISABLE; 5419 hsw_write_dcomp(dev_priv, val); 5420 ndelay(100); 5421 5422 if (wait_for((hsw_read_dcomp(dev_priv) & 5423 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 5424 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); 5425 5426 if (allow_power_down) { 5427 val = intel_de_read(dev_priv, LCPLL_CTL); 5428 val |= LCPLL_POWER_DOWN_ALLOW; 5429 intel_de_write(dev_priv, LCPLL_CTL, val); 5430 intel_de_posting_read(dev_priv, LCPLL_CTL); 5431 } 5432 } 5433 5434 /* 5435 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 5436 * source. 5437 */ 5438 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 5439 { 5440 u32 val; 5441 5442 val = intel_de_read(dev_priv, LCPLL_CTL); 5443 5444 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 5445 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 5446 return; 5447 5448 /* 5449 * Make sure we're not on PC8 state before disabling PC8, otherwise 5450 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 5451 */ 5452 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 5453 5454 if (val & LCPLL_POWER_DOWN_ALLOW) { 5455 val &= ~LCPLL_POWER_DOWN_ALLOW; 5456 intel_de_write(dev_priv, LCPLL_CTL, val); 5457 intel_de_posting_read(dev_priv, LCPLL_CTL); 5458 } 5459 5460 val = hsw_read_dcomp(dev_priv); 5461 val |= D_COMP_COMP_FORCE; 5462 val &= ~D_COMP_COMP_DISABLE; 5463 hsw_write_dcomp(dev_priv, val); 5464 5465 val = intel_de_read(dev_priv, LCPLL_CTL); 5466 val &= ~LCPLL_PLL_DISABLE; 5467 intel_de_write(dev_priv, LCPLL_CTL, val); 5468 5469 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 5470 drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); 5471 5472 if (val & LCPLL_CD_SOURCE_FCLK) { 5473 val = intel_de_read(dev_priv, LCPLL_CTL); 5474 val &= ~LCPLL_CD_SOURCE_FCLK; 5475 intel_de_write(dev_priv, LCPLL_CTL, val); 5476 5477 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & 5478 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 5479 drm_err(&dev_priv->drm, 5480 "Switching back to LCPLL failed\n"); 5481 } 5482 5483 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 5484 5485 intel_update_cdclk(dev_priv); 5486 intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); 5487 } 5488 5489 /* 5490 * Package states C8 and deeper are really deep PC states that can only be 5491 * reached when all the devices on the system allow it, so even if the graphics 5492 * device allows PC8+, it doesn't mean the system will actually get to these 5493 * states. Our driver only allows PC8+ when going into runtime PM. 5494 * 5495 * The requirements for PC8+ are that all the outputs are disabled, the power 5496 * well is disabled and most interrupts are disabled, and these are also 5497 * requirements for runtime PM. When these conditions are met, we manually do 5498 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 5499 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 5500 * hang the machine. 5501 * 5502 * When we really reach PC8 or deeper states (not just when we allow it) we lose 5503 * the state of some registers, so when we come back from PC8+ we need to 5504 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 5505 * need to take care of the registers kept by RC6. Notice that this happens even 5506 * if we don't put the device in PCI D3 state (which is what currently happens 5507 * because of the runtime PM support). 5508 * 5509 * For more, read "Display Sequences for Package C8" on the hardware 5510 * documentation. 5511 */ 5512 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 5513 { 5514 u32 val; 5515 5516 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); 5517 5518 if (HAS_PCH_LPT_LP(dev_priv)) { 5519 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5520 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 5521 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5522 } 5523 5524 lpt_disable_clkout_dp(dev_priv); 5525 hsw_disable_lcpll(dev_priv, true, true); 5526 } 5527 5528 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 5529 { 5530 u32 val; 5531 5532 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); 5533 5534 hsw_restore_lcpll(dev_priv); 5535 intel_init_pch_refclk(dev_priv); 5536 5537 if (HAS_PCH_LPT_LP(dev_priv)) { 5538 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5539 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 5540 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5541 } 5542 } 5543 5544 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 5545 bool enable) 5546 { 5547 i915_reg_t reg; 5548 u32 reset_bits, val; 5549 5550 if (IS_IVYBRIDGE(dev_priv)) { 5551 reg = GEN7_MSG_CTL; 5552 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 5553 } else { 5554 reg = HSW_NDE_RSTWRN_OPT; 5555 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 5556 } 5557 5558 val = intel_de_read(dev_priv, reg); 5559 5560 if (enable) 5561 val |= reset_bits; 5562 else 5563 val &= ~reset_bits; 5564 5565 intel_de_write(dev_priv, reg, val); 5566 } 5567 5568 static void skl_display_core_init(struct drm_i915_private *dev_priv, 5569 bool resume) 5570 { 5571 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5572 struct i915_power_well *well; 5573 5574 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5575 5576 /* enable PCH reset handshake */ 5577 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5578 5579 if (!HAS_DISPLAY(dev_priv)) 5580 return; 5581 5582 /* enable PG1 and Misc I/O */ 5583 mutex_lock(&power_domains->lock); 5584 5585 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5586 intel_power_well_enable(dev_priv, well); 5587 5588 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 5589 intel_power_well_enable(dev_priv, well); 5590 5591 mutex_unlock(&power_domains->lock); 5592 5593 intel_cdclk_init_hw(dev_priv); 5594 5595 gen9_dbuf_enable(dev_priv); 5596 5597 if (resume && intel_dmc_has_payload(dev_priv)) 5598 intel_dmc_load_program(dev_priv); 5599 } 5600 5601 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 5602 { 5603 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5604 struct i915_power_well *well; 5605 5606 if (!HAS_DISPLAY(dev_priv)) 5607 return; 5608 5609 gen9_disable_dc_states(dev_priv); 5610 5611 gen9_dbuf_disable(dev_priv); 5612 5613 intel_cdclk_uninit_hw(dev_priv); 5614 5615 /* The spec doesn't call for removing the reset handshake flag */ 5616 /* disable PG1 and Misc I/O */ 5617 5618 mutex_lock(&power_domains->lock); 5619 5620 /* 5621 * BSpec says to keep the MISC IO power well enabled here, only 5622 * remove our request for power well 1. 5623 * Note that even though the driver's request is removed power well 1 5624 * may stay enabled after this due to DMC's own request on it. 5625 */ 5626 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5627 intel_power_well_disable(dev_priv, well); 5628 5629 mutex_unlock(&power_domains->lock); 5630 5631 usleep_range(10, 30); /* 10 us delay per Bspec */ 5632 } 5633 5634 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 5635 { 5636 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5637 struct i915_power_well *well; 5638 5639 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5640 5641 /* 5642 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 5643 * or else the reset will hang because there is no PCH to respond. 5644 * Move the handshake programming to initialization sequence. 5645 * Previously was left up to BIOS. 5646 */ 5647 intel_pch_reset_handshake(dev_priv, false); 5648 5649 if (!HAS_DISPLAY(dev_priv)) 5650 return; 5651 5652 /* Enable PG1 */ 5653 mutex_lock(&power_domains->lock); 5654 5655 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5656 intel_power_well_enable(dev_priv, well); 5657 5658 mutex_unlock(&power_domains->lock); 5659 5660 intel_cdclk_init_hw(dev_priv); 5661 5662 gen9_dbuf_enable(dev_priv); 5663 5664 if (resume && intel_dmc_has_payload(dev_priv)) 5665 intel_dmc_load_program(dev_priv); 5666 } 5667 5668 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 5669 { 5670 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5671 struct i915_power_well *well; 5672 5673 if (!HAS_DISPLAY(dev_priv)) 5674 return; 5675 5676 gen9_disable_dc_states(dev_priv); 5677 5678 gen9_dbuf_disable(dev_priv); 5679 5680 intel_cdclk_uninit_hw(dev_priv); 5681 5682 /* The spec doesn't call for removing the reset handshake flag */ 5683 5684 /* 5685 * Disable PW1 (PG1). 5686 * Note that even though the driver's request is removed power well 1 5687 * may stay enabled after this due to DMC's own request on it. 5688 */ 5689 mutex_lock(&power_domains->lock); 5690 5691 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5692 intel_power_well_disable(dev_priv, well); 5693 5694 mutex_unlock(&power_domains->lock); 5695 5696 usleep_range(10, 30); /* 10 us delay per Bspec */ 5697 } 5698 5699 struct buddy_page_mask { 5700 u32 page_mask; 5701 u8 type; 5702 u8 num_channels; 5703 }; 5704 5705 static const struct buddy_page_mask tgl_buddy_page_masks[] = { 5706 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, 5707 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF }, 5708 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, 5709 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C }, 5710 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, 5711 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E }, 5712 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, 5713 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 }, 5714 {} 5715 }; 5716 5717 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { 5718 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, 5719 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, 5720 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 }, 5721 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 }, 5722 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, 5723 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, 5724 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 }, 5725 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 }, 5726 {} 5727 }; 5728 5729 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) 5730 { 5731 enum intel_dram_type type = dev_priv->dram_info.type; 5732 u8 num_channels = dev_priv->dram_info.num_channels; 5733 const struct buddy_page_mask *table; 5734 unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask; 5735 int config, i; 5736 5737 /* BW_BUDDY registers are not used on dgpu's beyond DG1 */ 5738 if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv)) 5739 return; 5740 5741 if (IS_ALDERLAKE_S(dev_priv) || 5742 IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 5743 IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 5744 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) 5745 /* Wa_1409767108:tgl,dg1,adl-s */ 5746 table = wa_1409767108_buddy_page_masks; 5747 else 5748 table = tgl_buddy_page_masks; 5749 5750 for (config = 0; table[config].page_mask != 0; config++) 5751 if (table[config].num_channels == num_channels && 5752 table[config].type == type) 5753 break; 5754 5755 if (table[config].page_mask == 0) { 5756 drm_dbg(&dev_priv->drm, 5757 "Unknown memory configuration; disabling address buddy logic.\n"); 5758 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) 5759 intel_de_write(dev_priv, BW_BUDDY_CTL(i), 5760 BW_BUDDY_DISABLE); 5761 } else { 5762 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { 5763 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i), 5764 table[config].page_mask); 5765 5766 /* Wa_22010178259:tgl,dg1,rkl,adl-s */ 5767 if (DISPLAY_VER(dev_priv) == 12) 5768 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i), 5769 BW_BUDDY_TLB_REQ_TIMER_MASK, 5770 BW_BUDDY_TLB_REQ_TIMER(0x8)); 5771 } 5772 } 5773 } 5774 5775 static void icl_display_core_init(struct drm_i915_private *dev_priv, 5776 bool resume) 5777 { 5778 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5779 struct i915_power_well *well; 5780 u32 val; 5781 5782 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5783 5784 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ 5785 if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP && 5786 INTEL_PCH_TYPE(dev_priv) < PCH_DG1) 5787 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0, 5788 PCH_DPMGUNIT_CLOCK_GATE_DISABLE); 5789 5790 /* 1. Enable PCH reset handshake. */ 5791 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5792 5793 if (!HAS_DISPLAY(dev_priv)) 5794 return; 5795 5796 /* 2. Initialize all combo phys */ 5797 intel_combo_phy_init(dev_priv); 5798 5799 /* 5800 * 3. Enable Power Well 1 (PG1). 5801 * The AUX IO power wells will be enabled on demand. 5802 */ 5803 mutex_lock(&power_domains->lock); 5804 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5805 intel_power_well_enable(dev_priv, well); 5806 mutex_unlock(&power_domains->lock); 5807 5808 /* 4. Enable CDCLK. */ 5809 intel_cdclk_init_hw(dev_priv); 5810 5811 if (DISPLAY_VER(dev_priv) >= 12) 5812 gen12_dbuf_slices_config(dev_priv); 5813 5814 /* 5. Enable DBUF. */ 5815 gen9_dbuf_enable(dev_priv); 5816 5817 /* 6. Setup MBUS. */ 5818 icl_mbus_init(dev_priv); 5819 5820 /* 7. Program arbiter BW_BUDDY registers */ 5821 if (DISPLAY_VER(dev_priv) >= 12) 5822 tgl_bw_buddy_init(dev_priv); 5823 5824 /* 8. Ensure PHYs have completed calibration and adaptation */ 5825 if (IS_DG2(dev_priv)) 5826 intel_snps_phy_wait_for_calibration(dev_priv); 5827 5828 if (resume && intel_dmc_has_payload(dev_priv)) 5829 intel_dmc_load_program(dev_priv); 5830 5831 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */ 5832 if (DISPLAY_VER(dev_priv) >= 12) { 5833 val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | 5834 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR; 5835 intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val); 5836 } 5837 5838 /* Wa_14011503030:xelpd */ 5839 if (DISPLAY_VER(dev_priv) >= 13) 5840 intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0); 5841 } 5842 5843 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 5844 { 5845 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5846 struct i915_power_well *well; 5847 5848 if (!HAS_DISPLAY(dev_priv)) 5849 return; 5850 5851 gen9_disable_dc_states(dev_priv); 5852 5853 /* 1. Disable all display engine functions -> aready done */ 5854 5855 /* 2. Disable DBUF */ 5856 gen9_dbuf_disable(dev_priv); 5857 5858 /* 3. Disable CD clock */ 5859 intel_cdclk_uninit_hw(dev_priv); 5860 5861 /* 5862 * 4. Disable Power Well 1 (PG1). 5863 * The AUX IO power wells are toggled on demand, so they are already 5864 * disabled at this point. 5865 */ 5866 mutex_lock(&power_domains->lock); 5867 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5868 intel_power_well_disable(dev_priv, well); 5869 mutex_unlock(&power_domains->lock); 5870 5871 /* 5. */ 5872 intel_combo_phy_uninit(dev_priv); 5873 } 5874 5875 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 5876 { 5877 struct i915_power_well *cmn_bc = 5878 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5879 struct i915_power_well *cmn_d = 5880 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 5881 5882 /* 5883 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 5884 * workaround never ever read DISPLAY_PHY_CONTROL, and 5885 * instead maintain a shadow copy ourselves. Use the actual 5886 * power well state and lane status to reconstruct the 5887 * expected initial value. 5888 */ 5889 dev_priv->chv_phy_control = 5890 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 5891 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 5892 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 5893 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 5894 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 5895 5896 /* 5897 * If all lanes are disabled we leave the override disabled 5898 * with all power down bits cleared to match the state we 5899 * would use after disabling the port. Otherwise enable the 5900 * override and set the lane powerdown bits accding to the 5901 * current lane status. 5902 */ 5903 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 5904 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)); 5905 unsigned int mask; 5906 5907 mask = status & DPLL_PORTB_READY_MASK; 5908 if (mask == 0xf) 5909 mask = 0x0; 5910 else 5911 dev_priv->chv_phy_control |= 5912 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 5913 5914 dev_priv->chv_phy_control |= 5915 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 5916 5917 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 5918 if (mask == 0xf) 5919 mask = 0x0; 5920 else 5921 dev_priv->chv_phy_control |= 5922 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 5923 5924 dev_priv->chv_phy_control |= 5925 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 5926 5927 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 5928 5929 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 5930 } else { 5931 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 5932 } 5933 5934 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 5935 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS); 5936 unsigned int mask; 5937 5938 mask = status & DPLL_PORTD_READY_MASK; 5939 5940 if (mask == 0xf) 5941 mask = 0x0; 5942 else 5943 dev_priv->chv_phy_control |= 5944 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 5945 5946 dev_priv->chv_phy_control |= 5947 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 5948 5949 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 5950 5951 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 5952 } else { 5953 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 5954 } 5955 5956 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", 5957 dev_priv->chv_phy_control); 5958 5959 /* Defer application of initial phy_control to enabling the powerwell */ 5960 } 5961 5962 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 5963 { 5964 struct i915_power_well *cmn = 5965 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5966 struct i915_power_well *disp2d = 5967 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 5968 5969 /* If the display might be already active skip this */ 5970 if (cmn->desc->ops->is_enabled(dev_priv, cmn) && 5971 disp2d->desc->ops->is_enabled(dev_priv, disp2d) && 5972 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST) 5973 return; 5974 5975 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); 5976 5977 /* cmnlane needs DPLL registers */ 5978 disp2d->desc->ops->enable(dev_priv, disp2d); 5979 5980 /* 5981 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 5982 * Need to assert and de-assert PHY SB reset by gating the 5983 * common lane power, then un-gating it. 5984 * Simply ungating isn't enough to reset the PHY enough to get 5985 * ports and lanes running. 5986 */ 5987 cmn->desc->ops->disable(dev_priv, cmn); 5988 } 5989 5990 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 5991 { 5992 bool ret; 5993 5994 vlv_punit_get(dev_priv); 5995 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 5996 vlv_punit_put(dev_priv); 5997 5998 return ret; 5999 } 6000 6001 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 6002 { 6003 drm_WARN(&dev_priv->drm, 6004 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 6005 "VED not power gated\n"); 6006 } 6007 6008 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 6009 { 6010 static const struct pci_device_id isp_ids[] = { 6011 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 6012 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 6013 {} 6014 }; 6015 6016 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && 6017 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 6018 "ISP not power gated\n"); 6019 } 6020 6021 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 6022 6023 /** 6024 * intel_power_domains_init_hw - initialize hardware power domain state 6025 * @i915: i915 device instance 6026 * @resume: Called from resume code paths or not 6027 * 6028 * This function initializes the hardware power domain state and enables all 6029 * power wells belonging to the INIT power domain. Power wells in other 6030 * domains (and not in the INIT domain) are referenced or disabled by 6031 * intel_modeset_readout_hw_state(). After that the reference count of each 6032 * power well must match its HW enabled state, see 6033 * intel_power_domains_verify_state(). 6034 * 6035 * It will return with power domains disabled (to be enabled later by 6036 * intel_power_domains_enable()) and must be paired with 6037 * intel_power_domains_driver_remove(). 6038 */ 6039 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 6040 { 6041 struct i915_power_domains *power_domains = &i915->power_domains; 6042 6043 power_domains->initializing = true; 6044 6045 if (DISPLAY_VER(i915) >= 11) { 6046 icl_display_core_init(i915, resume); 6047 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6048 bxt_display_core_init(i915, resume); 6049 } else if (DISPLAY_VER(i915) == 9) { 6050 skl_display_core_init(i915, resume); 6051 } else if (IS_CHERRYVIEW(i915)) { 6052 mutex_lock(&power_domains->lock); 6053 chv_phy_control_init(i915); 6054 mutex_unlock(&power_domains->lock); 6055 assert_isp_power_gated(i915); 6056 } else if (IS_VALLEYVIEW(i915)) { 6057 mutex_lock(&power_domains->lock); 6058 vlv_cmnlane_wa(i915); 6059 mutex_unlock(&power_domains->lock); 6060 assert_ved_power_gated(i915); 6061 assert_isp_power_gated(i915); 6062 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 6063 hsw_assert_cdclk(i915); 6064 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 6065 } else if (IS_IVYBRIDGE(i915)) { 6066 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 6067 } 6068 6069 /* 6070 * Keep all power wells enabled for any dependent HW access during 6071 * initialization and to make sure we keep BIOS enabled display HW 6072 * resources powered until display HW readout is complete. We drop 6073 * this reference in intel_power_domains_enable(). 6074 */ 6075 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6076 power_domains->init_wakeref = 6077 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6078 6079 /* Disable power support if the user asked so. */ 6080 if (!i915->params.disable_power_well) { 6081 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); 6082 i915->power_domains.disable_wakeref = intel_display_power_get(i915, 6083 POWER_DOMAIN_INIT); 6084 } 6085 intel_power_domains_sync_hw(i915); 6086 6087 power_domains->initializing = false; 6088 } 6089 6090 /** 6091 * intel_power_domains_driver_remove - deinitialize hw power domain state 6092 * @i915: i915 device instance 6093 * 6094 * De-initializes the display power domain HW state. It also ensures that the 6095 * device stays powered up so that the driver can be reloaded. 6096 * 6097 * It must be called with power domains already disabled (after a call to 6098 * intel_power_domains_disable()) and must be paired with 6099 * intel_power_domains_init_hw(). 6100 */ 6101 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 6102 { 6103 intel_wakeref_t wakeref __maybe_unused = 6104 fetch_and_zero(&i915->power_domains.init_wakeref); 6105 6106 /* Remove the refcount we took to keep power well support disabled. */ 6107 if (!i915->params.disable_power_well) 6108 intel_display_power_put(i915, POWER_DOMAIN_INIT, 6109 fetch_and_zero(&i915->power_domains.disable_wakeref)); 6110 6111 intel_display_power_flush_work_sync(i915); 6112 6113 intel_power_domains_verify_state(i915); 6114 6115 /* Keep the power well enabled, but cancel its rpm wakeref. */ 6116 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 6117 } 6118 6119 /** 6120 * intel_power_domains_enable - enable toggling of display power wells 6121 * @i915: i915 device instance 6122 * 6123 * Enable the ondemand enabling/disabling of the display power wells. Note that 6124 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 6125 * only at specific points of the display modeset sequence, thus they are not 6126 * affected by the intel_power_domains_enable()/disable() calls. The purpose 6127 * of these function is to keep the rest of power wells enabled until the end 6128 * of display HW readout (which will acquire the power references reflecting 6129 * the current HW state). 6130 */ 6131 void intel_power_domains_enable(struct drm_i915_private *i915) 6132 { 6133 intel_wakeref_t wakeref __maybe_unused = 6134 fetch_and_zero(&i915->power_domains.init_wakeref); 6135 6136 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 6137 intel_power_domains_verify_state(i915); 6138 } 6139 6140 /** 6141 * intel_power_domains_disable - disable toggling of display power wells 6142 * @i915: i915 device instance 6143 * 6144 * Disable the ondemand enabling/disabling of the display power wells. See 6145 * intel_power_domains_enable() for which power wells this call controls. 6146 */ 6147 void intel_power_domains_disable(struct drm_i915_private *i915) 6148 { 6149 struct i915_power_domains *power_domains = &i915->power_domains; 6150 6151 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6152 power_domains->init_wakeref = 6153 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6154 6155 intel_power_domains_verify_state(i915); 6156 } 6157 6158 /** 6159 * intel_power_domains_suspend - suspend power domain state 6160 * @i915: i915 device instance 6161 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 6162 * 6163 * This function prepares the hardware power domain state before entering 6164 * system suspend. 6165 * 6166 * It must be called with power domains already disabled (after a call to 6167 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 6168 */ 6169 void intel_power_domains_suspend(struct drm_i915_private *i915, 6170 enum i915_drm_suspend_mode suspend_mode) 6171 { 6172 struct i915_power_domains *power_domains = &i915->power_domains; 6173 intel_wakeref_t wakeref __maybe_unused = 6174 fetch_and_zero(&power_domains->init_wakeref); 6175 6176 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 6177 6178 /* 6179 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 6180 * support don't manually deinit the power domains. This also means the 6181 * DMC firmware will stay active, it will power down any HW 6182 * resources as required and also enable deeper system power states 6183 * that would be blocked if the firmware was inactive. 6184 */ 6185 if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) && 6186 suspend_mode == I915_DRM_SUSPEND_IDLE && 6187 intel_dmc_has_payload(i915)) { 6188 intel_display_power_flush_work(i915); 6189 intel_power_domains_verify_state(i915); 6190 return; 6191 } 6192 6193 /* 6194 * Even if power well support was disabled we still want to disable 6195 * power wells if power domains must be deinitialized for suspend. 6196 */ 6197 if (!i915->params.disable_power_well) 6198 intel_display_power_put(i915, POWER_DOMAIN_INIT, 6199 fetch_and_zero(&i915->power_domains.disable_wakeref)); 6200 6201 intel_display_power_flush_work(i915); 6202 intel_power_domains_verify_state(i915); 6203 6204 if (DISPLAY_VER(i915) >= 11) 6205 icl_display_core_uninit(i915); 6206 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) 6207 bxt_display_core_uninit(i915); 6208 else if (DISPLAY_VER(i915) == 9) 6209 skl_display_core_uninit(i915); 6210 6211 power_domains->display_core_suspended = true; 6212 } 6213 6214 /** 6215 * intel_power_domains_resume - resume power domain state 6216 * @i915: i915 device instance 6217 * 6218 * This function resume the hardware power domain state during system resume. 6219 * 6220 * It will return with power domain support disabled (to be enabled later by 6221 * intel_power_domains_enable()) and must be paired with 6222 * intel_power_domains_suspend(). 6223 */ 6224 void intel_power_domains_resume(struct drm_i915_private *i915) 6225 { 6226 struct i915_power_domains *power_domains = &i915->power_domains; 6227 6228 if (power_domains->display_core_suspended) { 6229 intel_power_domains_init_hw(i915, true); 6230 power_domains->display_core_suspended = false; 6231 } else { 6232 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6233 power_domains->init_wakeref = 6234 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6235 } 6236 6237 intel_power_domains_verify_state(i915); 6238 } 6239 6240 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 6241 6242 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 6243 { 6244 struct i915_power_domains *power_domains = &i915->power_domains; 6245 struct i915_power_well *power_well; 6246 6247 for_each_power_well(i915, power_well) { 6248 enum intel_display_power_domain domain; 6249 6250 drm_dbg(&i915->drm, "%-25s %d\n", 6251 power_well->desc->name, power_well->count); 6252 6253 for_each_power_domain(domain, power_well->desc->domains) 6254 drm_dbg(&i915->drm, " %-23s %d\n", 6255 intel_display_power_domain_str(domain), 6256 power_domains->domain_use_count[domain]); 6257 } 6258 } 6259 6260 /** 6261 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 6262 * @i915: i915 device instance 6263 * 6264 * Verify if the reference count of each power well matches its HW enabled 6265 * state and the total refcount of the domains it belongs to. This must be 6266 * called after modeset HW state sanitization, which is responsible for 6267 * acquiring reference counts for any power wells in use and disabling the 6268 * ones left on by BIOS but not required by any active output. 6269 */ 6270 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 6271 { 6272 struct i915_power_domains *power_domains = &i915->power_domains; 6273 struct i915_power_well *power_well; 6274 bool dump_domain_info; 6275 6276 mutex_lock(&power_domains->lock); 6277 6278 verify_async_put_domains_state(power_domains); 6279 6280 dump_domain_info = false; 6281 for_each_power_well(i915, power_well) { 6282 enum intel_display_power_domain domain; 6283 int domains_count; 6284 bool enabled; 6285 6286 enabled = power_well->desc->ops->is_enabled(i915, power_well); 6287 if ((power_well->count || power_well->desc->always_on) != 6288 enabled) 6289 drm_err(&i915->drm, 6290 "power well %s state mismatch (refcount %d/enabled %d)", 6291 power_well->desc->name, 6292 power_well->count, enabled); 6293 6294 domains_count = 0; 6295 for_each_power_domain(domain, power_well->desc->domains) 6296 domains_count += power_domains->domain_use_count[domain]; 6297 6298 if (power_well->count != domains_count) { 6299 drm_err(&i915->drm, 6300 "power well %s refcount/domain refcount mismatch " 6301 "(refcount %d/domains refcount %d)\n", 6302 power_well->desc->name, power_well->count, 6303 domains_count); 6304 dump_domain_info = true; 6305 } 6306 } 6307 6308 if (dump_domain_info) { 6309 static bool dumped; 6310 6311 if (!dumped) { 6312 intel_power_domains_dump_info(i915); 6313 dumped = true; 6314 } 6315 } 6316 6317 mutex_unlock(&power_domains->lock); 6318 } 6319 6320 #else 6321 6322 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 6323 { 6324 } 6325 6326 #endif 6327 6328 void intel_display_power_suspend_late(struct drm_i915_private *i915) 6329 { 6330 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 6331 IS_BROXTON(i915)) { 6332 bxt_enable_dc9(i915); 6333 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6334 hsw_enable_pc8(i915); 6335 } 6336 6337 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 6338 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 6339 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); 6340 } 6341 6342 void intel_display_power_resume_early(struct drm_i915_private *i915) 6343 { 6344 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 6345 IS_BROXTON(i915)) { 6346 gen9_sanitize_dc_state(i915); 6347 bxt_disable_dc9(i915); 6348 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6349 hsw_disable_pc8(i915); 6350 } 6351 6352 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 6353 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 6354 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); 6355 } 6356 6357 void intel_display_power_suspend(struct drm_i915_private *i915) 6358 { 6359 if (DISPLAY_VER(i915) >= 11) { 6360 icl_display_core_uninit(i915); 6361 bxt_enable_dc9(i915); 6362 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6363 bxt_display_core_uninit(i915); 6364 bxt_enable_dc9(i915); 6365 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6366 hsw_enable_pc8(i915); 6367 } 6368 } 6369 6370 void intel_display_power_resume(struct drm_i915_private *i915) 6371 { 6372 if (DISPLAY_VER(i915) >= 11) { 6373 bxt_disable_dc9(i915); 6374 icl_display_core_init(i915, true); 6375 if (intel_dmc_has_payload(i915)) { 6376 if (i915->dmc.allowed_dc_mask & 6377 DC_STATE_EN_UPTO_DC6) 6378 skl_enable_dc6(i915); 6379 else if (i915->dmc.allowed_dc_mask & 6380 DC_STATE_EN_UPTO_DC5) 6381 gen9_enable_dc5(i915); 6382 } 6383 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6384 bxt_disable_dc9(i915); 6385 bxt_display_core_init(i915, true); 6386 if (intel_dmc_has_payload(i915) && 6387 (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 6388 gen9_enable_dc5(i915); 6389 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6390 hsw_disable_pc8(i915); 6391 } 6392 } 6393