1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "display/intel_crt.h" 7 8 #include "i915_drv.h" 9 #include "i915_irq.h" 10 #include "intel_cdclk.h" 11 #include "intel_combo_phy.h" 12 #include "intel_csr.h" 13 #include "intel_display_power.h" 14 #include "intel_display_types.h" 15 #include "intel_dpio_phy.h" 16 #include "intel_hotplug.h" 17 #include "intel_pm.h" 18 #include "intel_pps.h" 19 #include "intel_sideband.h" 20 #include "intel_tc.h" 21 #include "intel_vga.h" 22 23 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 24 enum i915_power_well_id power_well_id); 25 26 const char * 27 intel_display_power_domain_str(enum intel_display_power_domain domain) 28 { 29 switch (domain) { 30 case POWER_DOMAIN_DISPLAY_CORE: 31 return "DISPLAY_CORE"; 32 case POWER_DOMAIN_PIPE_A: 33 return "PIPE_A"; 34 case POWER_DOMAIN_PIPE_B: 35 return "PIPE_B"; 36 case POWER_DOMAIN_PIPE_C: 37 return "PIPE_C"; 38 case POWER_DOMAIN_PIPE_D: 39 return "PIPE_D"; 40 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 41 return "PIPE_A_PANEL_FITTER"; 42 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 43 return "PIPE_B_PANEL_FITTER"; 44 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 45 return "PIPE_C_PANEL_FITTER"; 46 case POWER_DOMAIN_PIPE_D_PANEL_FITTER: 47 return "PIPE_D_PANEL_FITTER"; 48 case POWER_DOMAIN_TRANSCODER_A: 49 return "TRANSCODER_A"; 50 case POWER_DOMAIN_TRANSCODER_B: 51 return "TRANSCODER_B"; 52 case POWER_DOMAIN_TRANSCODER_C: 53 return "TRANSCODER_C"; 54 case POWER_DOMAIN_TRANSCODER_D: 55 return "TRANSCODER_D"; 56 case POWER_DOMAIN_TRANSCODER_EDP: 57 return "TRANSCODER_EDP"; 58 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 59 return "TRANSCODER_VDSC_PW2"; 60 case POWER_DOMAIN_TRANSCODER_DSI_A: 61 return "TRANSCODER_DSI_A"; 62 case POWER_DOMAIN_TRANSCODER_DSI_C: 63 return "TRANSCODER_DSI_C"; 64 case POWER_DOMAIN_PORT_DDI_A_LANES: 65 return "PORT_DDI_A_LANES"; 66 case POWER_DOMAIN_PORT_DDI_B_LANES: 67 return "PORT_DDI_B_LANES"; 68 case POWER_DOMAIN_PORT_DDI_C_LANES: 69 return "PORT_DDI_C_LANES"; 70 case POWER_DOMAIN_PORT_DDI_D_LANES: 71 return "PORT_DDI_D_LANES"; 72 case POWER_DOMAIN_PORT_DDI_E_LANES: 73 return "PORT_DDI_E_LANES"; 74 case POWER_DOMAIN_PORT_DDI_F_LANES: 75 return "PORT_DDI_F_LANES"; 76 case POWER_DOMAIN_PORT_DDI_G_LANES: 77 return "PORT_DDI_G_LANES"; 78 case POWER_DOMAIN_PORT_DDI_H_LANES: 79 return "PORT_DDI_H_LANES"; 80 case POWER_DOMAIN_PORT_DDI_I_LANES: 81 return "PORT_DDI_I_LANES"; 82 case POWER_DOMAIN_PORT_DDI_A_IO: 83 return "PORT_DDI_A_IO"; 84 case POWER_DOMAIN_PORT_DDI_B_IO: 85 return "PORT_DDI_B_IO"; 86 case POWER_DOMAIN_PORT_DDI_C_IO: 87 return "PORT_DDI_C_IO"; 88 case POWER_DOMAIN_PORT_DDI_D_IO: 89 return "PORT_DDI_D_IO"; 90 case POWER_DOMAIN_PORT_DDI_E_IO: 91 return "PORT_DDI_E_IO"; 92 case POWER_DOMAIN_PORT_DDI_F_IO: 93 return "PORT_DDI_F_IO"; 94 case POWER_DOMAIN_PORT_DDI_G_IO: 95 return "PORT_DDI_G_IO"; 96 case POWER_DOMAIN_PORT_DDI_H_IO: 97 return "PORT_DDI_H_IO"; 98 case POWER_DOMAIN_PORT_DDI_I_IO: 99 return "PORT_DDI_I_IO"; 100 case POWER_DOMAIN_PORT_DSI: 101 return "PORT_DSI"; 102 case POWER_DOMAIN_PORT_CRT: 103 return "PORT_CRT"; 104 case POWER_DOMAIN_PORT_OTHER: 105 return "PORT_OTHER"; 106 case POWER_DOMAIN_VGA: 107 return "VGA"; 108 case POWER_DOMAIN_AUDIO: 109 return "AUDIO"; 110 case POWER_DOMAIN_AUX_A: 111 return "AUX_A"; 112 case POWER_DOMAIN_AUX_B: 113 return "AUX_B"; 114 case POWER_DOMAIN_AUX_C: 115 return "AUX_C"; 116 case POWER_DOMAIN_AUX_D: 117 return "AUX_D"; 118 case POWER_DOMAIN_AUX_E: 119 return "AUX_E"; 120 case POWER_DOMAIN_AUX_F: 121 return "AUX_F"; 122 case POWER_DOMAIN_AUX_G: 123 return "AUX_G"; 124 case POWER_DOMAIN_AUX_H: 125 return "AUX_H"; 126 case POWER_DOMAIN_AUX_I: 127 return "AUX_I"; 128 case POWER_DOMAIN_AUX_IO_A: 129 return "AUX_IO_A"; 130 case POWER_DOMAIN_AUX_C_TBT: 131 return "AUX_C_TBT"; 132 case POWER_DOMAIN_AUX_D_TBT: 133 return "AUX_D_TBT"; 134 case POWER_DOMAIN_AUX_E_TBT: 135 return "AUX_E_TBT"; 136 case POWER_DOMAIN_AUX_F_TBT: 137 return "AUX_F_TBT"; 138 case POWER_DOMAIN_AUX_G_TBT: 139 return "AUX_G_TBT"; 140 case POWER_DOMAIN_AUX_H_TBT: 141 return "AUX_H_TBT"; 142 case POWER_DOMAIN_AUX_I_TBT: 143 return "AUX_I_TBT"; 144 case POWER_DOMAIN_GMBUS: 145 return "GMBUS"; 146 case POWER_DOMAIN_INIT: 147 return "INIT"; 148 case POWER_DOMAIN_MODESET: 149 return "MODESET"; 150 case POWER_DOMAIN_GT_IRQ: 151 return "GT_IRQ"; 152 case POWER_DOMAIN_DPLL_DC_OFF: 153 return "DPLL_DC_OFF"; 154 case POWER_DOMAIN_TC_COLD_OFF: 155 return "TC_COLD_OFF"; 156 default: 157 MISSING_CASE(domain); 158 return "?"; 159 } 160 } 161 162 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 163 struct i915_power_well *power_well) 164 { 165 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name); 166 power_well->desc->ops->enable(dev_priv, power_well); 167 power_well->hw_enabled = true; 168 } 169 170 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 171 struct i915_power_well *power_well) 172 { 173 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name); 174 power_well->hw_enabled = false; 175 power_well->desc->ops->disable(dev_priv, power_well); 176 } 177 178 static void intel_power_well_get(struct drm_i915_private *dev_priv, 179 struct i915_power_well *power_well) 180 { 181 if (!power_well->count++) 182 intel_power_well_enable(dev_priv, power_well); 183 } 184 185 static void intel_power_well_put(struct drm_i915_private *dev_priv, 186 struct i915_power_well *power_well) 187 { 188 drm_WARN(&dev_priv->drm, !power_well->count, 189 "Use count on power well %s is already zero", 190 power_well->desc->name); 191 192 if (!--power_well->count) 193 intel_power_well_disable(dev_priv, power_well); 194 } 195 196 /** 197 * __intel_display_power_is_enabled - unlocked check for a power domain 198 * @dev_priv: i915 device instance 199 * @domain: power domain to check 200 * 201 * This is the unlocked version of intel_display_power_is_enabled() and should 202 * only be used from error capture and recovery code where deadlocks are 203 * possible. 204 * 205 * Returns: 206 * True when the power domain is enabled, false otherwise. 207 */ 208 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 209 enum intel_display_power_domain domain) 210 { 211 struct i915_power_well *power_well; 212 bool is_enabled; 213 214 if (dev_priv->runtime_pm.suspended) 215 return false; 216 217 is_enabled = true; 218 219 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { 220 if (power_well->desc->always_on) 221 continue; 222 223 if (!power_well->hw_enabled) { 224 is_enabled = false; 225 break; 226 } 227 } 228 229 return is_enabled; 230 } 231 232 /** 233 * intel_display_power_is_enabled - check for a power domain 234 * @dev_priv: i915 device instance 235 * @domain: power domain to check 236 * 237 * This function can be used to check the hw power domain state. It is mostly 238 * used in hardware state readout functions. Everywhere else code should rely 239 * upon explicit power domain reference counting to ensure that the hardware 240 * block is powered up before accessing it. 241 * 242 * Callers must hold the relevant modesetting locks to ensure that concurrent 243 * threads can't disable the power well while the caller tries to read a few 244 * registers. 245 * 246 * Returns: 247 * True when the power domain is enabled, false otherwise. 248 */ 249 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 250 enum intel_display_power_domain domain) 251 { 252 struct i915_power_domains *power_domains; 253 bool ret; 254 255 power_domains = &dev_priv->power_domains; 256 257 mutex_lock(&power_domains->lock); 258 ret = __intel_display_power_is_enabled(dev_priv, domain); 259 mutex_unlock(&power_domains->lock); 260 261 return ret; 262 } 263 264 /* 265 * Starting with Haswell, we have a "Power Down Well" that can be turned off 266 * when not needed anymore. We have 4 registers that can request the power well 267 * to be enabled, and it will only be disabled if none of the registers is 268 * requesting it to be enabled. 269 */ 270 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, 271 u8 irq_pipe_mask, bool has_vga) 272 { 273 if (has_vga) 274 intel_vga_reset_io_mem(dev_priv); 275 276 if (irq_pipe_mask) 277 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); 278 } 279 280 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, 281 u8 irq_pipe_mask) 282 { 283 if (irq_pipe_mask) 284 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); 285 } 286 287 #define ICL_AUX_PW_TO_CH(pw_idx) \ 288 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 289 290 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 291 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 292 293 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv, 294 struct i915_power_well *power_well) 295 { 296 int pw_idx = power_well->desc->hsw.idx; 297 298 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 299 ICL_AUX_PW_TO_CH(pw_idx); 300 } 301 302 static struct intel_digital_port * 303 aux_ch_to_digital_port(struct drm_i915_private *dev_priv, 304 enum aux_ch aux_ch) 305 { 306 struct intel_digital_port *dig_port = NULL; 307 struct intel_encoder *encoder; 308 309 for_each_intel_encoder(&dev_priv->drm, encoder) { 310 /* We'll check the MST primary port */ 311 if (encoder->type == INTEL_OUTPUT_DP_MST) 312 continue; 313 314 dig_port = enc_to_dig_port(encoder); 315 if (!dig_port) 316 continue; 317 318 if (dig_port->aux_ch != aux_ch) { 319 dig_port = NULL; 320 continue; 321 } 322 323 break; 324 } 325 326 return dig_port; 327 } 328 329 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, 330 struct i915_power_well *power_well, 331 bool timeout_expected) 332 { 333 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 334 int pw_idx = power_well->desc->hsw.idx; 335 336 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 337 if (intel_de_wait_for_set(dev_priv, regs->driver, 338 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { 339 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", 340 power_well->desc->name); 341 342 drm_WARN_ON(&dev_priv->drm, !timeout_expected); 343 344 } 345 } 346 347 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, 348 const struct i915_power_well_regs *regs, 349 int pw_idx) 350 { 351 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 352 u32 ret; 353 354 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; 355 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; 356 if (regs->kvmr.reg) 357 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; 358 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; 359 360 return ret; 361 } 362 363 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, 364 struct i915_power_well *power_well) 365 { 366 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 367 int pw_idx = power_well->desc->hsw.idx; 368 bool disabled; 369 u32 reqs; 370 371 /* 372 * Bspec doesn't require waiting for PWs to get disabled, but still do 373 * this for paranoia. The known cases where a PW will be forced on: 374 * - a KVMR request on any power well via the KVMR request register 375 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 376 * DEBUG request registers 377 * Skip the wait in case any of the request bits are set and print a 378 * diagnostic message. 379 */ 380 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & 381 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 382 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); 383 if (disabled) 384 return; 385 386 drm_dbg_kms(&dev_priv->drm, 387 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 388 power_well->desc->name, 389 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 390 } 391 392 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, 393 enum skl_power_gate pg) 394 { 395 /* Timeout 5us for PG#0, for other PGs 1us */ 396 drm_WARN_ON(&dev_priv->drm, 397 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, 398 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 399 } 400 401 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 402 struct i915_power_well *power_well) 403 { 404 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 405 int pw_idx = power_well->desc->hsw.idx; 406 u32 val; 407 408 if (power_well->desc->hsw.has_fuses) { 409 enum skl_power_gate pg; 410 411 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 412 SKL_PW_CTL_IDX_TO_PG(pw_idx); 413 /* 414 * For PW1 we have to wait both for the PW0/PG0 fuse state 415 * before enabling the power well and PW1/PG1's own fuse 416 * state after the enabling. For all other power wells with 417 * fuses we only have to wait for that PW/PG's fuse state 418 * after the enabling. 419 */ 420 if (pg == SKL_PG1) 421 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 422 } 423 424 val = intel_de_read(dev_priv, regs->driver); 425 intel_de_write(dev_priv, regs->driver, 426 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 427 428 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 429 430 /* Display WA #1178: cnl */ 431 if (IS_CANNONLAKE(dev_priv) && 432 pw_idx >= GLK_PW_CTL_IDX_AUX_B && 433 pw_idx <= CNL_PW_CTL_IDX_AUX_F) { 434 u32 val; 435 436 val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx)); 437 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS; 438 intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val); 439 } 440 441 if (power_well->desc->hsw.has_fuses) { 442 enum skl_power_gate pg; 443 444 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 445 SKL_PW_CTL_IDX_TO_PG(pw_idx); 446 gen9_wait_for_power_well_fuses(dev_priv, pg); 447 } 448 449 hsw_power_well_post_enable(dev_priv, 450 power_well->desc->hsw.irq_pipe_mask, 451 power_well->desc->hsw.has_vga); 452 } 453 454 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 455 struct i915_power_well *power_well) 456 { 457 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 458 int pw_idx = power_well->desc->hsw.idx; 459 u32 val; 460 461 hsw_power_well_pre_disable(dev_priv, 462 power_well->desc->hsw.irq_pipe_mask); 463 464 val = intel_de_read(dev_priv, regs->driver); 465 intel_de_write(dev_priv, regs->driver, 466 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 467 hsw_wait_for_power_well_disable(dev_priv, power_well); 468 } 469 470 #define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) 471 472 static void 473 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 474 struct i915_power_well *power_well) 475 { 476 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 477 int pw_idx = power_well->desc->hsw.idx; 478 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); 479 u32 val; 480 481 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 482 483 val = intel_de_read(dev_priv, regs->driver); 484 intel_de_write(dev_priv, regs->driver, 485 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 486 487 if (DISPLAY_VER(dev_priv) < 12) { 488 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 489 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 490 val | ICL_LANE_ENABLE_AUX); 491 } 492 493 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 494 495 /* Display WA #1178: icl */ 496 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && 497 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { 498 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); 499 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; 500 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); 501 } 502 } 503 504 static void 505 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 506 struct i915_power_well *power_well) 507 { 508 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 509 int pw_idx = power_well->desc->hsw.idx; 510 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); 511 u32 val; 512 513 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 514 515 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 516 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 517 val & ~ICL_LANE_ENABLE_AUX); 518 519 val = intel_de_read(dev_priv, regs->driver); 520 intel_de_write(dev_priv, regs->driver, 521 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 522 523 hsw_wait_for_power_well_disable(dev_priv, power_well); 524 } 525 526 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 527 528 static u64 async_put_domains_mask(struct i915_power_domains *power_domains); 529 530 static int power_well_async_ref_count(struct drm_i915_private *dev_priv, 531 struct i915_power_well *power_well) 532 { 533 int refs = hweight64(power_well->desc->domains & 534 async_put_domains_mask(&dev_priv->power_domains)); 535 536 drm_WARN_ON(&dev_priv->drm, refs > power_well->count); 537 538 return refs; 539 } 540 541 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 542 struct i915_power_well *power_well, 543 struct intel_digital_port *dig_port) 544 { 545 /* Bypass the check if all references are released asynchronously */ 546 if (power_well_async_ref_count(dev_priv, power_well) == 547 power_well->count) 548 return; 549 550 if (drm_WARN_ON(&dev_priv->drm, !dig_port)) 551 return; 552 553 if (IS_DISPLAY_VER(dev_priv, 11) && dig_port->tc_legacy_port) 554 return; 555 556 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); 557 } 558 559 #else 560 561 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 562 struct i915_power_well *power_well, 563 struct intel_digital_port *dig_port) 564 { 565 } 566 567 #endif 568 569 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 570 571 static void icl_tc_cold_exit(struct drm_i915_private *i915) 572 { 573 int ret, tries = 0; 574 575 while (1) { 576 ret = sandybridge_pcode_write_timeout(i915, 577 ICL_PCODE_EXIT_TCCOLD, 578 0, 250, 1); 579 if (ret != -EAGAIN || ++tries == 3) 580 break; 581 msleep(1); 582 } 583 584 /* Spec states that TC cold exit can take up to 1ms to complete */ 585 if (!ret) 586 msleep(1); 587 588 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ 589 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : 590 "succeeded"); 591 } 592 593 static void 594 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 595 struct i915_power_well *power_well) 596 { 597 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 598 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 599 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 600 bool is_tbt = power_well->desc->hsw.is_tc_tbt; 601 bool timeout_expected; 602 u32 val; 603 604 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 605 606 val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); 607 val &= ~DP_AUX_CH_CTL_TBT_IO; 608 if (is_tbt) 609 val |= DP_AUX_CH_CTL_TBT_IO; 610 intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); 611 612 val = intel_de_read(dev_priv, regs->driver); 613 intel_de_write(dev_priv, regs->driver, 614 val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx)); 615 616 /* 617 * An AUX timeout is expected if the TBT DP tunnel is down, 618 * or need to enable AUX on a legacy TypeC port as part of the TC-cold 619 * exit sequence. 620 */ 621 timeout_expected = is_tbt; 622 if (IS_DISPLAY_VER(dev_priv, 11) && dig_port->tc_legacy_port) { 623 icl_tc_cold_exit(dev_priv); 624 timeout_expected = true; 625 } 626 627 hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); 628 629 if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) { 630 enum tc_port tc_port; 631 632 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); 633 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 634 HIP_INDEX_VAL(tc_port, 0x2)); 635 636 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), 637 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 638 drm_warn(&dev_priv->drm, 639 "Timeout waiting TC uC health\n"); 640 } 641 } 642 643 static void 644 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 645 struct i915_power_well *power_well) 646 { 647 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 648 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 649 650 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 651 652 hsw_power_well_disable(dev_priv, power_well); 653 } 654 655 static void 656 icl_aux_power_well_enable(struct drm_i915_private *dev_priv, 657 struct i915_power_well *power_well) 658 { 659 int pw_idx = power_well->desc->hsw.idx; 660 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */ 661 bool is_tbt = power_well->desc->hsw.is_tc_tbt; 662 663 if (is_tbt || intel_phy_is_tc(dev_priv, phy)) 664 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); 665 else if (IS_ICELAKE(dev_priv)) 666 return icl_combo_phy_aux_power_well_enable(dev_priv, 667 power_well); 668 else 669 return hsw_power_well_enable(dev_priv, power_well); 670 } 671 672 static void 673 icl_aux_power_well_disable(struct drm_i915_private *dev_priv, 674 struct i915_power_well *power_well) 675 { 676 int pw_idx = power_well->desc->hsw.idx; 677 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */ 678 bool is_tbt = power_well->desc->hsw.is_tc_tbt; 679 680 if (is_tbt || intel_phy_is_tc(dev_priv, phy)) 681 return icl_tc_phy_aux_power_well_disable(dev_priv, power_well); 682 else if (IS_ICELAKE(dev_priv)) 683 return icl_combo_phy_aux_power_well_disable(dev_priv, 684 power_well); 685 else 686 return hsw_power_well_disable(dev_priv, power_well); 687 } 688 689 /* 690 * We should only use the power well if we explicitly asked the hardware to 691 * enable it, so check if it's enabled and also check if we've requested it to 692 * be enabled. 693 */ 694 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 695 struct i915_power_well *power_well) 696 { 697 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 698 enum i915_power_well_id id = power_well->desc->id; 699 int pw_idx = power_well->desc->hsw.idx; 700 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 701 HSW_PWR_WELL_CTL_STATE(pw_idx); 702 u32 val; 703 704 val = intel_de_read(dev_priv, regs->driver); 705 706 /* 707 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 708 * and the MISC_IO PW will be not restored, so check instead for the 709 * BIOS's own request bits, which are forced-on for these power wells 710 * when exiting DC5/6. 711 */ 712 if (IS_DISPLAY_VER(dev_priv, 9) && !IS_GEN9_LP(dev_priv) && 713 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 714 val |= intel_de_read(dev_priv, regs->bios); 715 716 return (val & mask) == mask; 717 } 718 719 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 720 { 721 drm_WARN_ONCE(&dev_priv->drm, 722 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), 723 "DC9 already programmed to be enabled.\n"); 724 drm_WARN_ONCE(&dev_priv->drm, 725 intel_de_read(dev_priv, DC_STATE_EN) & 726 DC_STATE_EN_UPTO_DC5, 727 "DC5 still not disabled to enable DC9.\n"); 728 drm_WARN_ONCE(&dev_priv->drm, 729 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & 730 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 731 "Power well 2 on.\n"); 732 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 733 "Interrupts not disabled yet.\n"); 734 735 /* 736 * TODO: check for the following to verify the conditions to enter DC9 737 * state are satisfied: 738 * 1] Check relevant display engine registers to verify if mode set 739 * disable sequence was followed. 740 * 2] Check if display uninitialize sequence is initialized. 741 */ 742 } 743 744 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 745 { 746 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 747 "Interrupts not disabled yet.\n"); 748 drm_WARN_ONCE(&dev_priv->drm, 749 intel_de_read(dev_priv, DC_STATE_EN) & 750 DC_STATE_EN_UPTO_DC5, 751 "DC5 still not disabled.\n"); 752 753 /* 754 * TODO: check for the following to verify DC9 state was indeed 755 * entered before programming to disable it: 756 * 1] Check relevant display engine registers to verify if mode 757 * set disable sequence was followed. 758 * 2] Check if display uninitialize sequence is initialized. 759 */ 760 } 761 762 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 763 u32 state) 764 { 765 int rewrites = 0; 766 int rereads = 0; 767 u32 v; 768 769 intel_de_write(dev_priv, DC_STATE_EN, state); 770 771 /* It has been observed that disabling the dc6 state sometimes 772 * doesn't stick and dmc keeps returning old value. Make sure 773 * the write really sticks enough times and also force rewrite until 774 * we are confident that state is exactly what we want. 775 */ 776 do { 777 v = intel_de_read(dev_priv, DC_STATE_EN); 778 779 if (v != state) { 780 intel_de_write(dev_priv, DC_STATE_EN, state); 781 rewrites++; 782 rereads = 0; 783 } else if (rereads++ > 5) { 784 break; 785 } 786 787 } while (rewrites < 100); 788 789 if (v != state) 790 drm_err(&dev_priv->drm, 791 "Writing dc state to 0x%x failed, now 0x%x\n", 792 state, v); 793 794 /* Most of the times we need one retry, avoid spam */ 795 if (rewrites > 1) 796 drm_dbg_kms(&dev_priv->drm, 797 "Rewrote dc state to 0x%x %d times\n", 798 state, rewrites); 799 } 800 801 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 802 { 803 u32 mask; 804 805 mask = DC_STATE_EN_UPTO_DC5; 806 807 if (DISPLAY_VER(dev_priv) >= 12) 808 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 809 | DC_STATE_EN_DC9; 810 else if (IS_DISPLAY_VER(dev_priv, 11)) 811 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 812 else if (IS_GEN9_LP(dev_priv)) 813 mask |= DC_STATE_EN_DC9; 814 else 815 mask |= DC_STATE_EN_UPTO_DC6; 816 817 return mask; 818 } 819 820 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 821 { 822 u32 val; 823 824 val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); 825 826 drm_dbg_kms(&dev_priv->drm, 827 "Resetting DC state tracking from %02x to %02x\n", 828 dev_priv->csr.dc_state, val); 829 dev_priv->csr.dc_state = val; 830 } 831 832 /** 833 * gen9_set_dc_state - set target display C power state 834 * @dev_priv: i915 device instance 835 * @state: target DC power state 836 * - DC_STATE_DISABLE 837 * - DC_STATE_EN_UPTO_DC5 838 * - DC_STATE_EN_UPTO_DC6 839 * - DC_STATE_EN_DC9 840 * 841 * Signal to DMC firmware/HW the target DC power state passed in @state. 842 * DMC/HW can turn off individual display clocks and power rails when entering 843 * a deeper DC power state (higher in number) and turns these back when exiting 844 * that state to a shallower power state (lower in number). The HW will decide 845 * when to actually enter a given state on an on-demand basis, for instance 846 * depending on the active state of display pipes. The state of display 847 * registers backed by affected power rails are saved/restored as needed. 848 * 849 * Based on the above enabling a deeper DC power state is asynchronous wrt. 850 * enabling it. Disabling a deeper power state is synchronous: for instance 851 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 852 * back on and register state is restored. This is guaranteed by the MMIO write 853 * to DC_STATE_EN blocking until the state is restored. 854 */ 855 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) 856 { 857 u32 val; 858 u32 mask; 859 860 if (drm_WARN_ON_ONCE(&dev_priv->drm, 861 state & ~dev_priv->csr.allowed_dc_mask)) 862 state &= dev_priv->csr.allowed_dc_mask; 863 864 val = intel_de_read(dev_priv, DC_STATE_EN); 865 mask = gen9_dc_mask(dev_priv); 866 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", 867 val & mask, state); 868 869 /* Check if DMC is ignoring our DC state requests */ 870 if ((val & mask) != dev_priv->csr.dc_state) 871 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", 872 dev_priv->csr.dc_state, val & mask); 873 874 val &= ~mask; 875 val |= state; 876 877 gen9_write_dc_state(dev_priv, val); 878 879 dev_priv->csr.dc_state = val & mask; 880 } 881 882 static u32 883 sanitize_target_dc_state(struct drm_i915_private *dev_priv, 884 u32 target_dc_state) 885 { 886 u32 states[] = { 887 DC_STATE_EN_UPTO_DC6, 888 DC_STATE_EN_UPTO_DC5, 889 DC_STATE_EN_DC3CO, 890 DC_STATE_DISABLE, 891 }; 892 int i; 893 894 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 895 if (target_dc_state != states[i]) 896 continue; 897 898 if (dev_priv->csr.allowed_dc_mask & target_dc_state) 899 break; 900 901 target_dc_state = states[i + 1]; 902 } 903 904 return target_dc_state; 905 } 906 907 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) 908 { 909 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); 910 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); 911 } 912 913 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) 914 { 915 u32 val; 916 917 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); 918 val = intel_de_read(dev_priv, DC_STATE_EN); 919 val &= ~DC_STATE_DC3CO_STATUS; 920 intel_de_write(dev_priv, DC_STATE_EN, val); 921 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 922 /* 923 * Delay of 200us DC3CO Exit time B.Spec 49196 924 */ 925 usleep_range(200, 210); 926 } 927 928 static void bxt_enable_dc9(struct drm_i915_private *dev_priv) 929 { 930 assert_can_enable_dc9(dev_priv); 931 932 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); 933 /* 934 * Power sequencer reset is not needed on 935 * platforms with South Display Engine on PCH, 936 * because PPS registers are always on. 937 */ 938 if (!HAS_PCH_SPLIT(dev_priv)) 939 intel_pps_reset_all(dev_priv); 940 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 941 } 942 943 static void bxt_disable_dc9(struct drm_i915_private *dev_priv) 944 { 945 assert_can_disable_dc9(dev_priv); 946 947 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); 948 949 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 950 951 intel_pps_unlock_regs_wa(dev_priv); 952 } 953 954 static void assert_csr_loaded(struct drm_i915_private *dev_priv) 955 { 956 drm_WARN_ONCE(&dev_priv->drm, 957 !intel_de_read(dev_priv, CSR_PROGRAM(0)), 958 "CSR program storage start is NULL\n"); 959 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE), 960 "CSR SSP Base Not fine\n"); 961 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL), 962 "CSR HTP Not fine\n"); 963 } 964 965 static struct i915_power_well * 966 lookup_power_well(struct drm_i915_private *dev_priv, 967 enum i915_power_well_id power_well_id) 968 { 969 struct i915_power_well *power_well; 970 971 for_each_power_well(dev_priv, power_well) 972 if (power_well->desc->id == power_well_id) 973 return power_well; 974 975 /* 976 * It's not feasible to add error checking code to the callers since 977 * this condition really shouldn't happen and it doesn't even make sense 978 * to abort things like display initialization sequences. Just return 979 * the first power well and hope the WARN gets reported so we can fix 980 * our driver. 981 */ 982 drm_WARN(&dev_priv->drm, 1, 983 "Power well %d not defined for this platform\n", 984 power_well_id); 985 return &dev_priv->power_domains.power_wells[0]; 986 } 987 988 /** 989 * intel_display_power_set_target_dc_state - Set target dc state. 990 * @dev_priv: i915 device 991 * @state: state which needs to be set as target_dc_state. 992 * 993 * This function set the "DC off" power well target_dc_state, 994 * based upon this target_dc_stste, "DC off" power well will 995 * enable desired DC state. 996 */ 997 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, 998 u32 state) 999 { 1000 struct i915_power_well *power_well; 1001 bool dc_off_enabled; 1002 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1003 1004 mutex_lock(&power_domains->lock); 1005 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); 1006 1007 if (drm_WARN_ON(&dev_priv->drm, !power_well)) 1008 goto unlock; 1009 1010 state = sanitize_target_dc_state(dev_priv, state); 1011 1012 if (state == dev_priv->csr.target_dc_state) 1013 goto unlock; 1014 1015 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv, 1016 power_well); 1017 /* 1018 * If DC off power well is disabled, need to enable and disable the 1019 * DC off power well to effect target DC state. 1020 */ 1021 if (!dc_off_enabled) 1022 power_well->desc->ops->enable(dev_priv, power_well); 1023 1024 dev_priv->csr.target_dc_state = state; 1025 1026 if (!dc_off_enabled) 1027 power_well->desc->ops->disable(dev_priv, power_well); 1028 1029 unlock: 1030 mutex_unlock(&power_domains->lock); 1031 } 1032 1033 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 1034 { 1035 enum i915_power_well_id high_pg; 1036 1037 /* Power wells at this level and above must be disabled for DC5 entry */ 1038 if (DISPLAY_VER(dev_priv) >= 12) 1039 high_pg = ICL_DISP_PW_3; 1040 else 1041 high_pg = SKL_DISP_PW_2; 1042 1043 drm_WARN_ONCE(&dev_priv->drm, 1044 intel_display_power_well_is_enabled(dev_priv, high_pg), 1045 "Power wells above platform's DC5 limit still enabled.\n"); 1046 1047 drm_WARN_ONCE(&dev_priv->drm, 1048 (intel_de_read(dev_priv, DC_STATE_EN) & 1049 DC_STATE_EN_UPTO_DC5), 1050 "DC5 already programmed to be enabled.\n"); 1051 assert_rpm_wakelock_held(&dev_priv->runtime_pm); 1052 1053 assert_csr_loaded(dev_priv); 1054 } 1055 1056 static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 1057 { 1058 assert_can_enable_dc5(dev_priv); 1059 1060 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); 1061 1062 /* Wa Display #1183: skl,kbl,cfl */ 1063 if (IS_GEN9_BC(dev_priv)) 1064 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1065 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1066 1067 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 1068 } 1069 1070 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 1071 { 1072 drm_WARN_ONCE(&dev_priv->drm, 1073 intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 1074 "Backlight is not disabled.\n"); 1075 drm_WARN_ONCE(&dev_priv->drm, 1076 (intel_de_read(dev_priv, DC_STATE_EN) & 1077 DC_STATE_EN_UPTO_DC6), 1078 "DC6 already programmed to be enabled.\n"); 1079 1080 assert_csr_loaded(dev_priv); 1081 } 1082 1083 static void skl_enable_dc6(struct drm_i915_private *dev_priv) 1084 { 1085 assert_can_enable_dc6(dev_priv); 1086 1087 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); 1088 1089 /* Wa Display #1183: skl,kbl,cfl */ 1090 if (IS_GEN9_BC(dev_priv)) 1091 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1092 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1093 1094 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 1095 } 1096 1097 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 1098 struct i915_power_well *power_well) 1099 { 1100 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 1101 int pw_idx = power_well->desc->hsw.idx; 1102 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 1103 u32 bios_req = intel_de_read(dev_priv, regs->bios); 1104 1105 /* Take over the request bit if set by BIOS. */ 1106 if (bios_req & mask) { 1107 u32 drv_req = intel_de_read(dev_priv, regs->driver); 1108 1109 if (!(drv_req & mask)) 1110 intel_de_write(dev_priv, regs->driver, drv_req | mask); 1111 intel_de_write(dev_priv, regs->bios, bios_req & ~mask); 1112 } 1113 } 1114 1115 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1116 struct i915_power_well *power_well) 1117 { 1118 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); 1119 } 1120 1121 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1122 struct i915_power_well *power_well) 1123 { 1124 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); 1125 } 1126 1127 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 1128 struct i915_power_well *power_well) 1129 { 1130 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); 1131 } 1132 1133 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 1134 { 1135 struct i915_power_well *power_well; 1136 1137 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); 1138 if (power_well->count > 0) 1139 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1140 1141 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1142 if (power_well->count > 0) 1143 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1144 1145 if (IS_GEMINILAKE(dev_priv)) { 1146 power_well = lookup_power_well(dev_priv, 1147 GLK_DISP_PW_DPIO_CMN_C); 1148 if (power_well->count > 0) 1149 bxt_ddi_phy_verify_state(dev_priv, 1150 power_well->desc->bxt.phy); 1151 } 1152 } 1153 1154 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 1155 struct i915_power_well *power_well) 1156 { 1157 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && 1158 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); 1159 } 1160 1161 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 1162 { 1163 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); 1164 u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices; 1165 1166 drm_WARN(&dev_priv->drm, 1167 hw_enabled_dbuf_slices != enabled_dbuf_slices, 1168 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", 1169 hw_enabled_dbuf_slices, 1170 enabled_dbuf_slices); 1171 } 1172 1173 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) 1174 { 1175 struct intel_cdclk_config cdclk_config = {}; 1176 1177 if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) { 1178 tgl_disable_dc3co(dev_priv); 1179 return; 1180 } 1181 1182 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1183 1184 dev_priv->display.get_cdclk(dev_priv, &cdclk_config); 1185 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 1186 drm_WARN_ON(&dev_priv->drm, 1187 intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, 1188 &cdclk_config)); 1189 1190 gen9_assert_dbuf_enabled(dev_priv); 1191 1192 if (IS_GEN9_LP(dev_priv)) 1193 bxt_verify_ddi_phy_power_wells(dev_priv); 1194 1195 if (DISPLAY_VER(dev_priv) >= 11) 1196 /* 1197 * DMC retains HW context only for port A, the other combo 1198 * PHY's HW context for port B is lost after DC transitions, 1199 * so we need to restore it manually. 1200 */ 1201 intel_combo_phy_init(dev_priv); 1202 } 1203 1204 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 1205 struct i915_power_well *power_well) 1206 { 1207 gen9_disable_dc_states(dev_priv); 1208 } 1209 1210 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 1211 struct i915_power_well *power_well) 1212 { 1213 if (!dev_priv->csr.dmc_payload) 1214 return; 1215 1216 switch (dev_priv->csr.target_dc_state) { 1217 case DC_STATE_EN_DC3CO: 1218 tgl_enable_dc3co(dev_priv); 1219 break; 1220 case DC_STATE_EN_UPTO_DC6: 1221 skl_enable_dc6(dev_priv); 1222 break; 1223 case DC_STATE_EN_UPTO_DC5: 1224 gen9_enable_dc5(dev_priv); 1225 break; 1226 } 1227 } 1228 1229 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, 1230 struct i915_power_well *power_well) 1231 { 1232 } 1233 1234 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 1235 struct i915_power_well *power_well) 1236 { 1237 } 1238 1239 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 1240 struct i915_power_well *power_well) 1241 { 1242 return true; 1243 } 1244 1245 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 1246 struct i915_power_well *power_well) 1247 { 1248 if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) 1249 i830_enable_pipe(dev_priv, PIPE_A); 1250 if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) 1251 i830_enable_pipe(dev_priv, PIPE_B); 1252 } 1253 1254 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, 1255 struct i915_power_well *power_well) 1256 { 1257 i830_disable_pipe(dev_priv, PIPE_B); 1258 i830_disable_pipe(dev_priv, PIPE_A); 1259 } 1260 1261 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 1262 struct i915_power_well *power_well) 1263 { 1264 return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && 1265 intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; 1266 } 1267 1268 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, 1269 struct i915_power_well *power_well) 1270 { 1271 if (power_well->count > 0) 1272 i830_pipes_power_well_enable(dev_priv, power_well); 1273 else 1274 i830_pipes_power_well_disable(dev_priv, power_well); 1275 } 1276 1277 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 1278 struct i915_power_well *power_well, bool enable) 1279 { 1280 int pw_idx = power_well->desc->vlv.idx; 1281 u32 mask; 1282 u32 state; 1283 u32 ctrl; 1284 1285 mask = PUNIT_PWRGT_MASK(pw_idx); 1286 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1287 PUNIT_PWRGT_PWR_GATE(pw_idx); 1288 1289 vlv_punit_get(dev_priv); 1290 1291 #define COND \ 1292 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1293 1294 if (COND) 1295 goto out; 1296 1297 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 1298 ctrl &= ~mask; 1299 ctrl |= state; 1300 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1301 1302 if (wait_for(COND, 100)) 1303 drm_err(&dev_priv->drm, 1304 "timeout setting power well state %08x (%08x)\n", 1305 state, 1306 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1307 1308 #undef COND 1309 1310 out: 1311 vlv_punit_put(dev_priv); 1312 } 1313 1314 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1315 struct i915_power_well *power_well) 1316 { 1317 vlv_set_power_well(dev_priv, power_well, true); 1318 } 1319 1320 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1321 struct i915_power_well *power_well) 1322 { 1323 vlv_set_power_well(dev_priv, power_well, false); 1324 } 1325 1326 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1327 struct i915_power_well *power_well) 1328 { 1329 int pw_idx = power_well->desc->vlv.idx; 1330 bool enabled = false; 1331 u32 mask; 1332 u32 state; 1333 u32 ctrl; 1334 1335 mask = PUNIT_PWRGT_MASK(pw_idx); 1336 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1337 1338 vlv_punit_get(dev_priv); 1339 1340 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1341 /* 1342 * We only ever set the power-on and power-gate states, anything 1343 * else is unexpected. 1344 */ 1345 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1346 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1347 if (state == ctrl) 1348 enabled = true; 1349 1350 /* 1351 * A transient state at this point would mean some unexpected party 1352 * is poking at the power controls too. 1353 */ 1354 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1355 drm_WARN_ON(&dev_priv->drm, ctrl != state); 1356 1357 vlv_punit_put(dev_priv); 1358 1359 return enabled; 1360 } 1361 1362 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1363 { 1364 u32 val; 1365 1366 /* 1367 * On driver load, a pipe may be active and driving a DSI display. 1368 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1369 * (and never recovering) in this case. intel_dsi_post_disable() will 1370 * clear it when we turn off the display. 1371 */ 1372 val = intel_de_read(dev_priv, DSPCLK_GATE_D); 1373 val &= DPOUNIT_CLOCK_GATE_DISABLE; 1374 val |= VRHUNIT_CLOCK_GATE_DISABLE; 1375 intel_de_write(dev_priv, DSPCLK_GATE_D, val); 1376 1377 /* 1378 * Disable trickle feed and enable pnd deadline calculation 1379 */ 1380 intel_de_write(dev_priv, MI_ARB_VLV, 1381 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1382 intel_de_write(dev_priv, CBR1_VLV, 0); 1383 1384 drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); 1385 intel_de_write(dev_priv, RAWCLK_FREQ_VLV, 1386 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 1387 1000)); 1388 } 1389 1390 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1391 { 1392 struct intel_encoder *encoder; 1393 enum pipe pipe; 1394 1395 /* 1396 * Enable the CRI clock source so we can get at the 1397 * display and the reference clock for VGA 1398 * hotplug / manual detection. Supposedly DSI also 1399 * needs the ref clock up and running. 1400 * 1401 * CHV DPLL B/C have some issues if VGA mode is enabled. 1402 */ 1403 for_each_pipe(dev_priv, pipe) { 1404 u32 val = intel_de_read(dev_priv, DPLL(pipe)); 1405 1406 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1407 if (pipe != PIPE_A) 1408 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1409 1410 intel_de_write(dev_priv, DPLL(pipe), val); 1411 } 1412 1413 vlv_init_display_clock_gating(dev_priv); 1414 1415 spin_lock_irq(&dev_priv->irq_lock); 1416 valleyview_enable_display_irqs(dev_priv); 1417 spin_unlock_irq(&dev_priv->irq_lock); 1418 1419 /* 1420 * During driver initialization/resume we can avoid restoring the 1421 * part of the HW/SW state that will be inited anyway explicitly. 1422 */ 1423 if (dev_priv->power_domains.initializing) 1424 return; 1425 1426 intel_hpd_init(dev_priv); 1427 intel_hpd_poll_disable(dev_priv); 1428 1429 /* Re-enable the ADPA, if we have one */ 1430 for_each_intel_encoder(&dev_priv->drm, encoder) { 1431 if (encoder->type == INTEL_OUTPUT_ANALOG) 1432 intel_crt_reset(&encoder->base); 1433 } 1434 1435 intel_vga_redisable_power_on(dev_priv); 1436 1437 intel_pps_unlock_regs_wa(dev_priv); 1438 } 1439 1440 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1441 { 1442 spin_lock_irq(&dev_priv->irq_lock); 1443 valleyview_disable_display_irqs(dev_priv); 1444 spin_unlock_irq(&dev_priv->irq_lock); 1445 1446 /* make sure we're done processing display irqs */ 1447 intel_synchronize_irq(dev_priv); 1448 1449 intel_pps_reset_all(dev_priv); 1450 1451 /* Prevent us from re-enabling polling on accident in late suspend */ 1452 if (!dev_priv->drm.dev->power.is_suspended) 1453 intel_hpd_poll_enable(dev_priv); 1454 } 1455 1456 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1457 struct i915_power_well *power_well) 1458 { 1459 vlv_set_power_well(dev_priv, power_well, true); 1460 1461 vlv_display_power_well_init(dev_priv); 1462 } 1463 1464 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1465 struct i915_power_well *power_well) 1466 { 1467 vlv_display_power_well_deinit(dev_priv); 1468 1469 vlv_set_power_well(dev_priv, power_well, false); 1470 } 1471 1472 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1473 struct i915_power_well *power_well) 1474 { 1475 /* since ref/cri clock was enabled */ 1476 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1477 1478 vlv_set_power_well(dev_priv, power_well, true); 1479 1480 /* 1481 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1482 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1483 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1484 * b. The other bits such as sfr settings / modesel may all 1485 * be set to 0. 1486 * 1487 * This should only be done on init and resume from S3 with 1488 * both PLLs disabled, or we risk losing DPIO and PLL 1489 * synchronization. 1490 */ 1491 intel_de_write(dev_priv, DPIO_CTL, 1492 intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); 1493 } 1494 1495 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1496 struct i915_power_well *power_well) 1497 { 1498 enum pipe pipe; 1499 1500 for_each_pipe(dev_priv, pipe) 1501 assert_pll_disabled(dev_priv, pipe); 1502 1503 /* Assert common reset */ 1504 intel_de_write(dev_priv, DPIO_CTL, 1505 intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); 1506 1507 vlv_set_power_well(dev_priv, power_well, false); 1508 } 1509 1510 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 1511 1512 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1513 1514 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1515 { 1516 struct i915_power_well *cmn_bc = 1517 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1518 struct i915_power_well *cmn_d = 1519 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1520 u32 phy_control = dev_priv->chv_phy_control; 1521 u32 phy_status = 0; 1522 u32 phy_status_mask = 0xffffffff; 1523 1524 /* 1525 * The BIOS can leave the PHY is some weird state 1526 * where it doesn't fully power down some parts. 1527 * Disable the asserts until the PHY has been fully 1528 * reset (ie. the power well has been disabled at 1529 * least once). 1530 */ 1531 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1532 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1533 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1534 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1535 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1536 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1537 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1538 1539 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1540 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1541 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1542 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1543 1544 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 1545 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1546 1547 /* this assumes override is only used to enable lanes */ 1548 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1549 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1550 1551 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1552 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1553 1554 /* CL1 is on whenever anything is on in either channel */ 1555 if (BITS_SET(phy_control, 1556 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1557 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1558 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1559 1560 /* 1561 * The DPLLB check accounts for the pipe B + port A usage 1562 * with CL2 powered up but all the lanes in the second channel 1563 * powered down. 1564 */ 1565 if (BITS_SET(phy_control, 1566 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1567 (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1568 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1569 1570 if (BITS_SET(phy_control, 1571 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1572 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1573 if (BITS_SET(phy_control, 1574 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1575 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1576 1577 if (BITS_SET(phy_control, 1578 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1579 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1580 if (BITS_SET(phy_control, 1581 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1582 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1583 } 1584 1585 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 1586 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1587 1588 /* this assumes override is only used to enable lanes */ 1589 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1590 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1591 1592 if (BITS_SET(phy_control, 1593 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1594 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1595 1596 if (BITS_SET(phy_control, 1597 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1598 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1599 if (BITS_SET(phy_control, 1600 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1601 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1602 } 1603 1604 phy_status &= phy_status_mask; 1605 1606 /* 1607 * The PHY may be busy with some initial calibration and whatnot, 1608 * so the power state can take a while to actually change. 1609 */ 1610 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, 1611 phy_status_mask, phy_status, 10)) 1612 drm_err(&dev_priv->drm, 1613 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1614 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, 1615 phy_status, dev_priv->chv_phy_control); 1616 } 1617 1618 #undef BITS_SET 1619 1620 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1621 struct i915_power_well *power_well) 1622 { 1623 enum dpio_phy phy; 1624 enum pipe pipe; 1625 u32 tmp; 1626 1627 drm_WARN_ON_ONCE(&dev_priv->drm, 1628 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1629 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1630 1631 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1632 pipe = PIPE_A; 1633 phy = DPIO_PHY0; 1634 } else { 1635 pipe = PIPE_C; 1636 phy = DPIO_PHY1; 1637 } 1638 1639 /* since ref/cri clock was enabled */ 1640 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1641 vlv_set_power_well(dev_priv, power_well, true); 1642 1643 /* Poll for phypwrgood signal */ 1644 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, 1645 PHY_POWERGOOD(phy), 1)) 1646 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", 1647 phy); 1648 1649 vlv_dpio_get(dev_priv); 1650 1651 /* Enable dynamic power down */ 1652 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1653 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1654 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1655 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1656 1657 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1658 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1659 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1660 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1661 } else { 1662 /* 1663 * Force the non-existing CL2 off. BXT does this 1664 * too, so maybe it saves some power even though 1665 * CL2 doesn't exist? 1666 */ 1667 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1668 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1669 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1670 } 1671 1672 vlv_dpio_put(dev_priv); 1673 1674 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1675 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1676 dev_priv->chv_phy_control); 1677 1678 drm_dbg_kms(&dev_priv->drm, 1679 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1680 phy, dev_priv->chv_phy_control); 1681 1682 assert_chv_phy_status(dev_priv); 1683 } 1684 1685 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1686 struct i915_power_well *power_well) 1687 { 1688 enum dpio_phy phy; 1689 1690 drm_WARN_ON_ONCE(&dev_priv->drm, 1691 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1692 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1693 1694 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1695 phy = DPIO_PHY0; 1696 assert_pll_disabled(dev_priv, PIPE_A); 1697 assert_pll_disabled(dev_priv, PIPE_B); 1698 } else { 1699 phy = DPIO_PHY1; 1700 assert_pll_disabled(dev_priv, PIPE_C); 1701 } 1702 1703 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1704 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1705 dev_priv->chv_phy_control); 1706 1707 vlv_set_power_well(dev_priv, power_well, false); 1708 1709 drm_dbg_kms(&dev_priv->drm, 1710 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1711 phy, dev_priv->chv_phy_control); 1712 1713 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1714 dev_priv->chv_phy_assert[phy] = true; 1715 1716 assert_chv_phy_status(dev_priv); 1717 } 1718 1719 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1720 enum dpio_channel ch, bool override, unsigned int mask) 1721 { 1722 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1723 u32 reg, val, expected, actual; 1724 1725 /* 1726 * The BIOS can leave the PHY is some weird state 1727 * where it doesn't fully power down some parts. 1728 * Disable the asserts until the PHY has been fully 1729 * reset (ie. the power well has been disabled at 1730 * least once). 1731 */ 1732 if (!dev_priv->chv_phy_assert[phy]) 1733 return; 1734 1735 if (ch == DPIO_CH0) 1736 reg = _CHV_CMN_DW0_CH0; 1737 else 1738 reg = _CHV_CMN_DW6_CH1; 1739 1740 vlv_dpio_get(dev_priv); 1741 val = vlv_dpio_read(dev_priv, pipe, reg); 1742 vlv_dpio_put(dev_priv); 1743 1744 /* 1745 * This assumes !override is only used when the port is disabled. 1746 * All lanes should power down even without the override when 1747 * the port is disabled. 1748 */ 1749 if (!override || mask == 0xf) { 1750 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1751 /* 1752 * If CH1 common lane is not active anymore 1753 * (eg. for pipe B DPLL) the entire channel will 1754 * shut down, which causes the common lane registers 1755 * to read as 0. That means we can't actually check 1756 * the lane power down status bits, but as the entire 1757 * register reads as 0 it's a good indication that the 1758 * channel is indeed entirely powered down. 1759 */ 1760 if (ch == DPIO_CH1 && val == 0) 1761 expected = 0; 1762 } else if (mask != 0x0) { 1763 expected = DPIO_ANYDL_POWERDOWN; 1764 } else { 1765 expected = 0; 1766 } 1767 1768 if (ch == DPIO_CH0) 1769 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1770 else 1771 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1772 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1773 1774 drm_WARN(&dev_priv->drm, actual != expected, 1775 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1776 !!(actual & DPIO_ALLDL_POWERDOWN), 1777 !!(actual & DPIO_ANYDL_POWERDOWN), 1778 !!(expected & DPIO_ALLDL_POWERDOWN), 1779 !!(expected & DPIO_ANYDL_POWERDOWN), 1780 reg, val); 1781 } 1782 1783 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1784 enum dpio_channel ch, bool override) 1785 { 1786 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1787 bool was_override; 1788 1789 mutex_lock(&power_domains->lock); 1790 1791 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1792 1793 if (override == was_override) 1794 goto out; 1795 1796 if (override) 1797 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1798 else 1799 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1800 1801 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1802 dev_priv->chv_phy_control); 1803 1804 drm_dbg_kms(&dev_priv->drm, 1805 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1806 phy, ch, dev_priv->chv_phy_control); 1807 1808 assert_chv_phy_status(dev_priv); 1809 1810 out: 1811 mutex_unlock(&power_domains->lock); 1812 1813 return was_override; 1814 } 1815 1816 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1817 bool override, unsigned int mask) 1818 { 1819 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1820 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1821 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); 1822 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); 1823 1824 mutex_lock(&power_domains->lock); 1825 1826 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1827 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1828 1829 if (override) 1830 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1831 else 1832 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1833 1834 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1835 dev_priv->chv_phy_control); 1836 1837 drm_dbg_kms(&dev_priv->drm, 1838 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1839 phy, ch, mask, dev_priv->chv_phy_control); 1840 1841 assert_chv_phy_status(dev_priv); 1842 1843 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1844 1845 mutex_unlock(&power_domains->lock); 1846 } 1847 1848 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1849 struct i915_power_well *power_well) 1850 { 1851 enum pipe pipe = PIPE_A; 1852 bool enabled; 1853 u32 state, ctrl; 1854 1855 vlv_punit_get(dev_priv); 1856 1857 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1858 /* 1859 * We only ever set the power-on and power-gate states, anything 1860 * else is unexpected. 1861 */ 1862 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && 1863 state != DP_SSS_PWR_GATE(pipe)); 1864 enabled = state == DP_SSS_PWR_ON(pipe); 1865 1866 /* 1867 * A transient state at this point would mean some unexpected party 1868 * is poking at the power controls too. 1869 */ 1870 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1871 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); 1872 1873 vlv_punit_put(dev_priv); 1874 1875 return enabled; 1876 } 1877 1878 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1879 struct i915_power_well *power_well, 1880 bool enable) 1881 { 1882 enum pipe pipe = PIPE_A; 1883 u32 state; 1884 u32 ctrl; 1885 1886 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1887 1888 vlv_punit_get(dev_priv); 1889 1890 #define COND \ 1891 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 1892 1893 if (COND) 1894 goto out; 1895 1896 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 1897 ctrl &= ~DP_SSC_MASK(pipe); 1898 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1899 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); 1900 1901 if (wait_for(COND, 100)) 1902 drm_err(&dev_priv->drm, 1903 "timeout setting power well state %08x (%08x)\n", 1904 state, 1905 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); 1906 1907 #undef COND 1908 1909 out: 1910 vlv_punit_put(dev_priv); 1911 } 1912 1913 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 1914 struct i915_power_well *power_well) 1915 { 1916 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1917 dev_priv->chv_phy_control); 1918 } 1919 1920 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1921 struct i915_power_well *power_well) 1922 { 1923 chv_set_pipe_power_well(dev_priv, power_well, true); 1924 1925 vlv_display_power_well_init(dev_priv); 1926 } 1927 1928 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1929 struct i915_power_well *power_well) 1930 { 1931 vlv_display_power_well_deinit(dev_priv); 1932 1933 chv_set_pipe_power_well(dev_priv, power_well, false); 1934 } 1935 1936 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) 1937 { 1938 return power_domains->async_put_domains[0] | 1939 power_domains->async_put_domains[1]; 1940 } 1941 1942 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 1943 1944 static bool 1945 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 1946 { 1947 struct drm_i915_private *i915 = container_of(power_domains, 1948 struct drm_i915_private, 1949 power_domains); 1950 return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] & 1951 power_domains->async_put_domains[1]); 1952 } 1953 1954 static bool 1955 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 1956 { 1957 struct drm_i915_private *i915 = container_of(power_domains, 1958 struct drm_i915_private, 1959 power_domains); 1960 enum intel_display_power_domain domain; 1961 bool err = false; 1962 1963 err |= !assert_async_put_domain_masks_disjoint(power_domains); 1964 err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref != 1965 !!__async_put_domains_mask(power_domains)); 1966 1967 for_each_power_domain(domain, __async_put_domains_mask(power_domains)) 1968 err |= drm_WARN_ON(&i915->drm, 1969 power_domains->domain_use_count[domain] != 1); 1970 1971 return !err; 1972 } 1973 1974 static void print_power_domains(struct i915_power_domains *power_domains, 1975 const char *prefix, u64 mask) 1976 { 1977 struct drm_i915_private *i915 = container_of(power_domains, 1978 struct drm_i915_private, 1979 power_domains); 1980 enum intel_display_power_domain domain; 1981 1982 drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask)); 1983 for_each_power_domain(domain, mask) 1984 drm_dbg(&i915->drm, "%s use_count %d\n", 1985 intel_display_power_domain_str(domain), 1986 power_domains->domain_use_count[domain]); 1987 } 1988 1989 static void 1990 print_async_put_domains_state(struct i915_power_domains *power_domains) 1991 { 1992 struct drm_i915_private *i915 = container_of(power_domains, 1993 struct drm_i915_private, 1994 power_domains); 1995 1996 drm_dbg(&i915->drm, "async_put_wakeref %u\n", 1997 power_domains->async_put_wakeref); 1998 1999 print_power_domains(power_domains, "async_put_domains[0]", 2000 power_domains->async_put_domains[0]); 2001 print_power_domains(power_domains, "async_put_domains[1]", 2002 power_domains->async_put_domains[1]); 2003 } 2004 2005 static void 2006 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2007 { 2008 if (!__async_put_domains_state_ok(power_domains)) 2009 print_async_put_domains_state(power_domains); 2010 } 2011 2012 #else 2013 2014 static void 2015 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 2016 { 2017 } 2018 2019 static void 2020 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2021 { 2022 } 2023 2024 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 2025 2026 static u64 async_put_domains_mask(struct i915_power_domains *power_domains) 2027 { 2028 assert_async_put_domain_masks_disjoint(power_domains); 2029 2030 return __async_put_domains_mask(power_domains); 2031 } 2032 2033 static void 2034 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 2035 enum intel_display_power_domain domain) 2036 { 2037 assert_async_put_domain_masks_disjoint(power_domains); 2038 2039 power_domains->async_put_domains[0] &= ~BIT_ULL(domain); 2040 power_domains->async_put_domains[1] &= ~BIT_ULL(domain); 2041 } 2042 2043 static bool 2044 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 2045 enum intel_display_power_domain domain) 2046 { 2047 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2048 bool ret = false; 2049 2050 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) 2051 goto out_verify; 2052 2053 async_put_domains_clear_domain(power_domains, domain); 2054 2055 ret = true; 2056 2057 if (async_put_domains_mask(power_domains)) 2058 goto out_verify; 2059 2060 cancel_delayed_work(&power_domains->async_put_work); 2061 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 2062 fetch_and_zero(&power_domains->async_put_wakeref)); 2063 out_verify: 2064 verify_async_put_domains_state(power_domains); 2065 2066 return ret; 2067 } 2068 2069 static void 2070 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 2071 enum intel_display_power_domain domain) 2072 { 2073 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2074 struct i915_power_well *power_well; 2075 2076 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 2077 return; 2078 2079 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) 2080 intel_power_well_get(dev_priv, power_well); 2081 2082 power_domains->domain_use_count[domain]++; 2083 } 2084 2085 /** 2086 * intel_display_power_get - grab a power domain reference 2087 * @dev_priv: i915 device instance 2088 * @domain: power domain to reference 2089 * 2090 * This function grabs a power domain reference for @domain and ensures that the 2091 * power domain and all its parents are powered up. Therefore users should only 2092 * grab a reference to the innermost power domain they need. 2093 * 2094 * Any power domain reference obtained by this function must have a symmetric 2095 * call to intel_display_power_put() to release the reference again. 2096 */ 2097 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 2098 enum intel_display_power_domain domain) 2099 { 2100 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2101 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2102 2103 mutex_lock(&power_domains->lock); 2104 __intel_display_power_get_domain(dev_priv, domain); 2105 mutex_unlock(&power_domains->lock); 2106 2107 return wakeref; 2108 } 2109 2110 /** 2111 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 2112 * @dev_priv: i915 device instance 2113 * @domain: power domain to reference 2114 * 2115 * This function grabs a power domain reference for @domain and ensures that the 2116 * power domain and all its parents are powered up. Therefore users should only 2117 * grab a reference to the innermost power domain they need. 2118 * 2119 * Any power domain reference obtained by this function must have a symmetric 2120 * call to intel_display_power_put() to release the reference again. 2121 */ 2122 intel_wakeref_t 2123 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 2124 enum intel_display_power_domain domain) 2125 { 2126 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2127 intel_wakeref_t wakeref; 2128 bool is_enabled; 2129 2130 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 2131 if (!wakeref) 2132 return false; 2133 2134 mutex_lock(&power_domains->lock); 2135 2136 if (__intel_display_power_is_enabled(dev_priv, domain)) { 2137 __intel_display_power_get_domain(dev_priv, domain); 2138 is_enabled = true; 2139 } else { 2140 is_enabled = false; 2141 } 2142 2143 mutex_unlock(&power_domains->lock); 2144 2145 if (!is_enabled) { 2146 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2147 wakeref = 0; 2148 } 2149 2150 return wakeref; 2151 } 2152 2153 static void 2154 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 2155 enum intel_display_power_domain domain) 2156 { 2157 struct i915_power_domains *power_domains; 2158 struct i915_power_well *power_well; 2159 const char *name = intel_display_power_domain_str(domain); 2160 2161 power_domains = &dev_priv->power_domains; 2162 2163 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], 2164 "Use count on domain %s is already zero\n", 2165 name); 2166 drm_WARN(&dev_priv->drm, 2167 async_put_domains_mask(power_domains) & BIT_ULL(domain), 2168 "Async disabling of domain %s is pending\n", 2169 name); 2170 2171 power_domains->domain_use_count[domain]--; 2172 2173 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) 2174 intel_power_well_put(dev_priv, power_well); 2175 } 2176 2177 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 2178 enum intel_display_power_domain domain) 2179 { 2180 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2181 2182 mutex_lock(&power_domains->lock); 2183 __intel_display_power_put_domain(dev_priv, domain); 2184 mutex_unlock(&power_domains->lock); 2185 } 2186 2187 static void 2188 queue_async_put_domains_work(struct i915_power_domains *power_domains, 2189 intel_wakeref_t wakeref) 2190 { 2191 struct drm_i915_private *i915 = container_of(power_domains, 2192 struct drm_i915_private, 2193 power_domains); 2194 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2195 power_domains->async_put_wakeref = wakeref; 2196 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, 2197 &power_domains->async_put_work, 2198 msecs_to_jiffies(100))); 2199 } 2200 2201 static void 2202 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) 2203 { 2204 struct drm_i915_private *dev_priv = 2205 container_of(power_domains, struct drm_i915_private, 2206 power_domains); 2207 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2208 enum intel_display_power_domain domain; 2209 intel_wakeref_t wakeref; 2210 2211 /* 2212 * The caller must hold already raw wakeref, upgrade that to a proper 2213 * wakeref to make the state checker happy about the HW access during 2214 * power well disabling. 2215 */ 2216 assert_rpm_raw_wakeref_held(rpm); 2217 wakeref = intel_runtime_pm_get(rpm); 2218 2219 for_each_power_domain(domain, mask) { 2220 /* Clear before put, so put's sanity check is happy. */ 2221 async_put_domains_clear_domain(power_domains, domain); 2222 __intel_display_power_put_domain(dev_priv, domain); 2223 } 2224 2225 intel_runtime_pm_put(rpm, wakeref); 2226 } 2227 2228 static void 2229 intel_display_power_put_async_work(struct work_struct *work) 2230 { 2231 struct drm_i915_private *dev_priv = 2232 container_of(work, struct drm_i915_private, 2233 power_domains.async_put_work.work); 2234 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2235 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2236 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 2237 intel_wakeref_t old_work_wakeref = 0; 2238 2239 mutex_lock(&power_domains->lock); 2240 2241 /* 2242 * Bail out if all the domain refs pending to be released were grabbed 2243 * by subsequent gets or a flush_work. 2244 */ 2245 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2246 if (!old_work_wakeref) 2247 goto out_verify; 2248 2249 release_async_put_domains(power_domains, 2250 power_domains->async_put_domains[0]); 2251 2252 /* Requeue the work if more domains were async put meanwhile. */ 2253 if (power_domains->async_put_domains[1]) { 2254 power_domains->async_put_domains[0] = 2255 fetch_and_zero(&power_domains->async_put_domains[1]); 2256 queue_async_put_domains_work(power_domains, 2257 fetch_and_zero(&new_work_wakeref)); 2258 } 2259 2260 out_verify: 2261 verify_async_put_domains_state(power_domains); 2262 2263 mutex_unlock(&power_domains->lock); 2264 2265 if (old_work_wakeref) 2266 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 2267 if (new_work_wakeref) 2268 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 2269 } 2270 2271 /** 2272 * intel_display_power_put_async - release a power domain reference asynchronously 2273 * @i915: i915 device instance 2274 * @domain: power domain to reference 2275 * @wakeref: wakeref acquired for the reference that is being released 2276 * 2277 * This function drops the power domain reference obtained by 2278 * intel_display_power_get*() and schedules a work to power down the 2279 * corresponding hardware block if this is the last reference. 2280 */ 2281 void __intel_display_power_put_async(struct drm_i915_private *i915, 2282 enum intel_display_power_domain domain, 2283 intel_wakeref_t wakeref) 2284 { 2285 struct i915_power_domains *power_domains = &i915->power_domains; 2286 struct intel_runtime_pm *rpm = &i915->runtime_pm; 2287 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 2288 2289 mutex_lock(&power_domains->lock); 2290 2291 if (power_domains->domain_use_count[domain] > 1) { 2292 __intel_display_power_put_domain(i915, domain); 2293 2294 goto out_verify; 2295 } 2296 2297 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); 2298 2299 /* Let a pending work requeue itself or queue a new one. */ 2300 if (power_domains->async_put_wakeref) { 2301 power_domains->async_put_domains[1] |= BIT_ULL(domain); 2302 } else { 2303 power_domains->async_put_domains[0] |= BIT_ULL(domain); 2304 queue_async_put_domains_work(power_domains, 2305 fetch_and_zero(&work_wakeref)); 2306 } 2307 2308 out_verify: 2309 verify_async_put_domains_state(power_domains); 2310 2311 mutex_unlock(&power_domains->lock); 2312 2313 if (work_wakeref) 2314 intel_runtime_pm_put_raw(rpm, work_wakeref); 2315 2316 intel_runtime_pm_put(rpm, wakeref); 2317 } 2318 2319 /** 2320 * intel_display_power_flush_work - flushes the async display power disabling work 2321 * @i915: i915 device instance 2322 * 2323 * Flushes any pending work that was scheduled by a preceding 2324 * intel_display_power_put_async() call, completing the disabling of the 2325 * corresponding power domains. 2326 * 2327 * Note that the work handler function may still be running after this 2328 * function returns; to ensure that the work handler isn't running use 2329 * intel_display_power_flush_work_sync() instead. 2330 */ 2331 void intel_display_power_flush_work(struct drm_i915_private *i915) 2332 { 2333 struct i915_power_domains *power_domains = &i915->power_domains; 2334 intel_wakeref_t work_wakeref; 2335 2336 mutex_lock(&power_domains->lock); 2337 2338 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2339 if (!work_wakeref) 2340 goto out_verify; 2341 2342 release_async_put_domains(power_domains, 2343 async_put_domains_mask(power_domains)); 2344 cancel_delayed_work(&power_domains->async_put_work); 2345 2346 out_verify: 2347 verify_async_put_domains_state(power_domains); 2348 2349 mutex_unlock(&power_domains->lock); 2350 2351 if (work_wakeref) 2352 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 2353 } 2354 2355 /** 2356 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 2357 * @i915: i915 device instance 2358 * 2359 * Like intel_display_power_flush_work(), but also ensure that the work 2360 * handler function is not running any more when this function returns. 2361 */ 2362 static void 2363 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 2364 { 2365 struct i915_power_domains *power_domains = &i915->power_domains; 2366 2367 intel_display_power_flush_work(i915); 2368 cancel_delayed_work_sync(&power_domains->async_put_work); 2369 2370 verify_async_put_domains_state(power_domains); 2371 2372 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2373 } 2374 2375 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2376 /** 2377 * intel_display_power_put - release a power domain reference 2378 * @dev_priv: i915 device instance 2379 * @domain: power domain to reference 2380 * @wakeref: wakeref acquired for the reference that is being released 2381 * 2382 * This function drops the power domain reference obtained by 2383 * intel_display_power_get() and might power down the corresponding hardware 2384 * block right away if this is the last reference. 2385 */ 2386 void intel_display_power_put(struct drm_i915_private *dev_priv, 2387 enum intel_display_power_domain domain, 2388 intel_wakeref_t wakeref) 2389 { 2390 __intel_display_power_put(dev_priv, domain); 2391 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2392 } 2393 #else 2394 /** 2395 * intel_display_power_put_unchecked - release an unchecked power domain reference 2396 * @dev_priv: i915 device instance 2397 * @domain: power domain to reference 2398 * 2399 * This function drops the power domain reference obtained by 2400 * intel_display_power_get() and might power down the corresponding hardware 2401 * block right away if this is the last reference. 2402 * 2403 * This function is only for the power domain code's internal use to suppress wakeref 2404 * tracking when the correspondig debug kconfig option is disabled, should not 2405 * be used otherwise. 2406 */ 2407 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 2408 enum intel_display_power_domain domain) 2409 { 2410 __intel_display_power_put(dev_priv, domain); 2411 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 2412 } 2413 #endif 2414 2415 void 2416 intel_display_power_get_in_set(struct drm_i915_private *i915, 2417 struct intel_display_power_domain_set *power_domain_set, 2418 enum intel_display_power_domain domain) 2419 { 2420 intel_wakeref_t __maybe_unused wf; 2421 2422 drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); 2423 2424 wf = intel_display_power_get(i915, domain); 2425 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2426 power_domain_set->wakerefs[domain] = wf; 2427 #endif 2428 power_domain_set->mask |= BIT_ULL(domain); 2429 } 2430 2431 bool 2432 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, 2433 struct intel_display_power_domain_set *power_domain_set, 2434 enum intel_display_power_domain domain) 2435 { 2436 intel_wakeref_t wf; 2437 2438 drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); 2439 2440 wf = intel_display_power_get_if_enabled(i915, domain); 2441 if (!wf) 2442 return false; 2443 2444 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2445 power_domain_set->wakerefs[domain] = wf; 2446 #endif 2447 power_domain_set->mask |= BIT_ULL(domain); 2448 2449 return true; 2450 } 2451 2452 void 2453 intel_display_power_put_mask_in_set(struct drm_i915_private *i915, 2454 struct intel_display_power_domain_set *power_domain_set, 2455 u64 mask) 2456 { 2457 enum intel_display_power_domain domain; 2458 2459 drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask); 2460 2461 for_each_power_domain(domain, mask) { 2462 intel_wakeref_t __maybe_unused wf = -1; 2463 2464 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2465 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); 2466 #endif 2467 intel_display_power_put(i915, domain, wf); 2468 power_domain_set->mask &= ~BIT_ULL(domain); 2469 } 2470 } 2471 2472 #define I830_PIPES_POWER_DOMAINS ( \ 2473 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2474 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2475 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2476 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2477 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2478 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2479 BIT_ULL(POWER_DOMAIN_INIT)) 2480 2481 #define VLV_DISPLAY_POWER_DOMAINS ( \ 2482 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2483 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2484 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2485 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2486 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2487 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2488 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2489 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2490 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2491 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2492 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2493 BIT_ULL(POWER_DOMAIN_VGA) | \ 2494 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2495 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2496 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2497 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2498 BIT_ULL(POWER_DOMAIN_INIT)) 2499 2500 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2501 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2502 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2503 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2504 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2505 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2506 BIT_ULL(POWER_DOMAIN_INIT)) 2507 2508 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 2509 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2510 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2511 BIT_ULL(POWER_DOMAIN_INIT)) 2512 2513 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 2514 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2515 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2516 BIT_ULL(POWER_DOMAIN_INIT)) 2517 2518 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 2519 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2520 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2521 BIT_ULL(POWER_DOMAIN_INIT)) 2522 2523 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 2524 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2525 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2526 BIT_ULL(POWER_DOMAIN_INIT)) 2527 2528 #define CHV_DISPLAY_POWER_DOMAINS ( \ 2529 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2530 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2531 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2532 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2533 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2534 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2535 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2536 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2537 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2538 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2539 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2540 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2541 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2542 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2543 BIT_ULL(POWER_DOMAIN_VGA) | \ 2544 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2545 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2546 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2547 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2548 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2549 BIT_ULL(POWER_DOMAIN_INIT)) 2550 2551 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2552 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2553 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2554 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2555 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2556 BIT_ULL(POWER_DOMAIN_INIT)) 2557 2558 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 2559 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2560 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2561 BIT_ULL(POWER_DOMAIN_INIT)) 2562 2563 #define HSW_DISPLAY_POWER_DOMAINS ( \ 2564 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2565 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2566 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2567 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2568 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2569 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2570 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2571 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2572 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2573 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2574 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2575 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2576 BIT_ULL(POWER_DOMAIN_VGA) | \ 2577 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2578 BIT_ULL(POWER_DOMAIN_INIT)) 2579 2580 #define BDW_DISPLAY_POWER_DOMAINS ( \ 2581 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2582 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2583 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2584 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2585 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2586 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2587 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2588 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2589 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2590 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2591 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2592 BIT_ULL(POWER_DOMAIN_VGA) | \ 2593 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2594 BIT_ULL(POWER_DOMAIN_INIT)) 2595 2596 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2597 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2598 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2599 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2600 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2601 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2602 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2603 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2604 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2605 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2606 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2607 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2608 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2609 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2610 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2611 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2612 BIT_ULL(POWER_DOMAIN_VGA) | \ 2613 BIT_ULL(POWER_DOMAIN_INIT)) 2614 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ 2615 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2616 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ 2617 BIT_ULL(POWER_DOMAIN_INIT)) 2618 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2619 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2620 BIT_ULL(POWER_DOMAIN_INIT)) 2621 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2622 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2623 BIT_ULL(POWER_DOMAIN_INIT)) 2624 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ 2625 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2626 BIT_ULL(POWER_DOMAIN_INIT)) 2627 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2628 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2629 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2630 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2631 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2632 BIT_ULL(POWER_DOMAIN_INIT)) 2633 2634 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2635 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2636 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2637 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2638 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2639 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2640 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2641 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2642 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2643 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2644 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2645 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2646 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2647 BIT_ULL(POWER_DOMAIN_VGA) | \ 2648 BIT_ULL(POWER_DOMAIN_INIT)) 2649 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2650 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2651 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2652 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2653 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2654 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2655 BIT_ULL(POWER_DOMAIN_INIT)) 2656 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 2657 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2658 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2659 BIT_ULL(POWER_DOMAIN_INIT)) 2660 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 2661 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2662 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2663 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2664 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2665 BIT_ULL(POWER_DOMAIN_INIT)) 2666 2667 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2668 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2669 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2670 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2671 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2672 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2673 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2674 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2675 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2676 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2677 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2678 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2679 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2680 BIT_ULL(POWER_DOMAIN_VGA) | \ 2681 BIT_ULL(POWER_DOMAIN_INIT)) 2682 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ 2683 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2684 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2685 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2686 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2687 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2688 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ 2689 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2690 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2691 BIT_ULL(POWER_DOMAIN_INIT)) 2692 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ 2693 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2694 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2695 BIT_ULL(POWER_DOMAIN_INIT)) 2696 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ 2697 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2698 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2699 BIT_ULL(POWER_DOMAIN_INIT)) 2700 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2701 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2702 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2703 BIT_ULL(POWER_DOMAIN_INIT)) 2704 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2705 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2706 BIT_ULL(POWER_DOMAIN_INIT)) 2707 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2708 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2709 BIT_ULL(POWER_DOMAIN_INIT)) 2710 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2711 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2712 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2713 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2714 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2715 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2716 BIT_ULL(POWER_DOMAIN_INIT)) 2717 2718 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2719 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2720 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2721 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2722 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2723 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2724 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2725 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2726 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2727 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2728 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2729 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2730 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2731 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2732 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2733 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2734 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2735 BIT_ULL(POWER_DOMAIN_VGA) | \ 2736 BIT_ULL(POWER_DOMAIN_INIT)) 2737 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \ 2738 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2739 BIT_ULL(POWER_DOMAIN_INIT)) 2740 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \ 2741 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2742 BIT_ULL(POWER_DOMAIN_INIT)) 2743 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \ 2744 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2745 BIT_ULL(POWER_DOMAIN_INIT)) 2746 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \ 2747 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2748 BIT_ULL(POWER_DOMAIN_INIT)) 2749 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2750 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2751 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2752 BIT_ULL(POWER_DOMAIN_INIT)) 2753 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2754 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2755 BIT_ULL(POWER_DOMAIN_INIT)) 2756 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2757 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2758 BIT_ULL(POWER_DOMAIN_INIT)) 2759 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \ 2760 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2761 BIT_ULL(POWER_DOMAIN_INIT)) 2762 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \ 2763 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2764 BIT_ULL(POWER_DOMAIN_INIT)) 2765 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \ 2766 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ 2767 BIT_ULL(POWER_DOMAIN_INIT)) 2768 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2769 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2770 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2771 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2772 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2773 BIT_ULL(POWER_DOMAIN_INIT)) 2774 2775 /* 2776 * ICL PW_0/PG_0 domains (HW/DMC control): 2777 * - PCI 2778 * - clocks except port PLL 2779 * - central power except FBC 2780 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers 2781 * ICL PW_1/PG_1 domains (HW/DMC control): 2782 * - DBUF function 2783 * - PIPE_A and its planes, except VGA 2784 * - transcoder EDP + PSR 2785 * - transcoder DSI 2786 * - DDI_A 2787 * - FBC 2788 */ 2789 #define ICL_PW_4_POWER_DOMAINS ( \ 2790 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2791 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2792 BIT_ULL(POWER_DOMAIN_INIT)) 2793 /* VDSC/joining */ 2794 #define ICL_PW_3_POWER_DOMAINS ( \ 2795 ICL_PW_4_POWER_DOMAINS | \ 2796 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2797 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2798 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2799 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2800 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2801 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2802 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2803 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2804 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2805 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2806 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2807 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2808 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2809 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2810 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2811 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ 2812 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2813 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2814 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2815 BIT_ULL(POWER_DOMAIN_VGA) | \ 2816 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2817 BIT_ULL(POWER_DOMAIN_INIT)) 2818 /* 2819 * - transcoder WD 2820 * - KVMR (HW control) 2821 */ 2822 #define ICL_PW_2_POWER_DOMAINS ( \ 2823 ICL_PW_3_POWER_DOMAINS | \ 2824 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2825 BIT_ULL(POWER_DOMAIN_INIT)) 2826 /* 2827 * - KVMR (HW control) 2828 */ 2829 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2830 ICL_PW_2_POWER_DOMAINS | \ 2831 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2832 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2833 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \ 2834 BIT_ULL(POWER_DOMAIN_INIT)) 2835 2836 #define ICL_DDI_IO_A_POWER_DOMAINS ( \ 2837 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2838 #define ICL_DDI_IO_B_POWER_DOMAINS ( \ 2839 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2840 #define ICL_DDI_IO_C_POWER_DOMAINS ( \ 2841 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2842 #define ICL_DDI_IO_D_POWER_DOMAINS ( \ 2843 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2844 #define ICL_DDI_IO_E_POWER_DOMAINS ( \ 2845 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2846 #define ICL_DDI_IO_F_POWER_DOMAINS ( \ 2847 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2848 2849 #define ICL_AUX_A_IO_POWER_DOMAINS ( \ 2850 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2851 BIT_ULL(POWER_DOMAIN_AUX_A)) 2852 #define ICL_AUX_B_IO_POWER_DOMAINS ( \ 2853 BIT_ULL(POWER_DOMAIN_AUX_B)) 2854 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ 2855 BIT_ULL(POWER_DOMAIN_AUX_C)) 2856 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ 2857 BIT_ULL(POWER_DOMAIN_AUX_D)) 2858 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ 2859 BIT_ULL(POWER_DOMAIN_AUX_E)) 2860 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ 2861 BIT_ULL(POWER_DOMAIN_AUX_F)) 2862 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ 2863 BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) 2864 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ 2865 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2866 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ 2867 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2868 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ 2869 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2870 2871 #define TGL_PW_5_POWER_DOMAINS ( \ 2872 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 2873 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 2874 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 2875 BIT_ULL(POWER_DOMAIN_INIT)) 2876 2877 #define TGL_PW_4_POWER_DOMAINS ( \ 2878 TGL_PW_5_POWER_DOMAINS | \ 2879 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2880 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2881 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2882 BIT_ULL(POWER_DOMAIN_INIT)) 2883 2884 #define TGL_PW_3_POWER_DOMAINS ( \ 2885 TGL_PW_4_POWER_DOMAINS | \ 2886 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2887 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2888 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2889 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 2890 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 2891 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ 2892 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ 2893 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \ 2894 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \ 2895 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 2896 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 2897 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 2898 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 2899 BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ 2900 BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ 2901 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 2902 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 2903 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 2904 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 2905 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ 2906 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ 2907 BIT_ULL(POWER_DOMAIN_VGA) | \ 2908 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2909 BIT_ULL(POWER_DOMAIN_INIT)) 2910 2911 #define TGL_PW_2_POWER_DOMAINS ( \ 2912 TGL_PW_3_POWER_DOMAINS | \ 2913 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2914 BIT_ULL(POWER_DOMAIN_INIT)) 2915 2916 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2917 TGL_PW_3_POWER_DOMAINS | \ 2918 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2919 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2920 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2921 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2922 BIT_ULL(POWER_DOMAIN_INIT)) 2923 2924 #define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) 2925 #define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) 2926 #define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) 2927 #define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) 2928 #define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5) 2929 #define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6) 2930 2931 #define TGL_AUX_A_IO_POWER_DOMAINS ( \ 2932 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2933 BIT_ULL(POWER_DOMAIN_AUX_A)) 2934 #define TGL_AUX_B_IO_POWER_DOMAINS ( \ 2935 BIT_ULL(POWER_DOMAIN_AUX_B)) 2936 #define TGL_AUX_C_IO_POWER_DOMAINS ( \ 2937 BIT_ULL(POWER_DOMAIN_AUX_C)) 2938 2939 #define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) 2940 #define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) 2941 #define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) 2942 #define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) 2943 #define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5) 2944 #define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6) 2945 2946 #define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) 2947 #define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) 2948 #define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) 2949 #define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) 2950 #define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5) 2951 #define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6) 2952 2953 #define TGL_TC_COLD_OFF_POWER_DOMAINS ( \ 2954 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 2955 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 2956 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 2957 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 2958 BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ 2959 BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ 2960 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 2961 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 2962 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 2963 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 2964 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ 2965 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ 2966 BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) 2967 2968 #define RKL_PW_4_POWER_DOMAINS ( \ 2969 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2970 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2971 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2972 BIT_ULL(POWER_DOMAIN_INIT)) 2973 2974 #define RKL_PW_3_POWER_DOMAINS ( \ 2975 RKL_PW_4_POWER_DOMAINS | \ 2976 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2977 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2978 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2979 BIT_ULL(POWER_DOMAIN_VGA) | \ 2980 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2981 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 2982 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 2983 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 2984 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 2985 BIT_ULL(POWER_DOMAIN_INIT)) 2986 2987 /* 2988 * There is no PW_2/PG_2 on RKL. 2989 * 2990 * RKL PW_1/PG_1 domains (under HW/DMC control): 2991 * - DBUF function (note: registers are in PW0) 2992 * - PIPE_A and its planes and VDSC/joining, except VGA 2993 * - transcoder A 2994 * - DDI_A and DDI_B 2995 * - FBC 2996 * 2997 * RKL PW_0/PG_0 domains (under HW/DMC control): 2998 * - PCI 2999 * - clocks except port PLL 3000 * - shared functions: 3001 * * interrupts except pipe interrupts 3002 * * MBus except PIPE_MBUS_DBOX_CTL 3003 * * DBUF registers 3004 * - central power except FBC 3005 * - top-level GTC (DDI-level GTC is in the well associated with the DDI) 3006 */ 3007 3008 #define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 3009 RKL_PW_3_POWER_DOMAINS | \ 3010 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3011 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3012 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3013 BIT_ULL(POWER_DOMAIN_INIT)) 3014 3015 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 3016 .sync_hw = i9xx_power_well_sync_hw_noop, 3017 .enable = i9xx_always_on_power_well_noop, 3018 .disable = i9xx_always_on_power_well_noop, 3019 .is_enabled = i9xx_always_on_power_well_enabled, 3020 }; 3021 3022 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 3023 .sync_hw = chv_pipe_power_well_sync_hw, 3024 .enable = chv_pipe_power_well_enable, 3025 .disable = chv_pipe_power_well_disable, 3026 .is_enabled = chv_pipe_power_well_enabled, 3027 }; 3028 3029 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 3030 .sync_hw = i9xx_power_well_sync_hw_noop, 3031 .enable = chv_dpio_cmn_power_well_enable, 3032 .disable = chv_dpio_cmn_power_well_disable, 3033 .is_enabled = vlv_power_well_enabled, 3034 }; 3035 3036 static const struct i915_power_well_desc i9xx_always_on_power_well[] = { 3037 { 3038 .name = "always-on", 3039 .always_on = true, 3040 .domains = POWER_DOMAIN_MASK, 3041 .ops = &i9xx_always_on_power_well_ops, 3042 .id = DISP_PW_ID_NONE, 3043 }, 3044 }; 3045 3046 static const struct i915_power_well_ops i830_pipes_power_well_ops = { 3047 .sync_hw = i830_pipes_power_well_sync_hw, 3048 .enable = i830_pipes_power_well_enable, 3049 .disable = i830_pipes_power_well_disable, 3050 .is_enabled = i830_pipes_power_well_enabled, 3051 }; 3052 3053 static const struct i915_power_well_desc i830_power_wells[] = { 3054 { 3055 .name = "always-on", 3056 .always_on = true, 3057 .domains = POWER_DOMAIN_MASK, 3058 .ops = &i9xx_always_on_power_well_ops, 3059 .id = DISP_PW_ID_NONE, 3060 }, 3061 { 3062 .name = "pipes", 3063 .domains = I830_PIPES_POWER_DOMAINS, 3064 .ops = &i830_pipes_power_well_ops, 3065 .id = DISP_PW_ID_NONE, 3066 }, 3067 }; 3068 3069 static const struct i915_power_well_ops hsw_power_well_ops = { 3070 .sync_hw = hsw_power_well_sync_hw, 3071 .enable = hsw_power_well_enable, 3072 .disable = hsw_power_well_disable, 3073 .is_enabled = hsw_power_well_enabled, 3074 }; 3075 3076 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 3077 .sync_hw = i9xx_power_well_sync_hw_noop, 3078 .enable = gen9_dc_off_power_well_enable, 3079 .disable = gen9_dc_off_power_well_disable, 3080 .is_enabled = gen9_dc_off_power_well_enabled, 3081 }; 3082 3083 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 3084 .sync_hw = i9xx_power_well_sync_hw_noop, 3085 .enable = bxt_dpio_cmn_power_well_enable, 3086 .disable = bxt_dpio_cmn_power_well_disable, 3087 .is_enabled = bxt_dpio_cmn_power_well_enabled, 3088 }; 3089 3090 static const struct i915_power_well_regs hsw_power_well_regs = { 3091 .bios = HSW_PWR_WELL_CTL1, 3092 .driver = HSW_PWR_WELL_CTL2, 3093 .kvmr = HSW_PWR_WELL_CTL3, 3094 .debug = HSW_PWR_WELL_CTL4, 3095 }; 3096 3097 static const struct i915_power_well_desc hsw_power_wells[] = { 3098 { 3099 .name = "always-on", 3100 .always_on = true, 3101 .domains = POWER_DOMAIN_MASK, 3102 .ops = &i9xx_always_on_power_well_ops, 3103 .id = DISP_PW_ID_NONE, 3104 }, 3105 { 3106 .name = "display", 3107 .domains = HSW_DISPLAY_POWER_DOMAINS, 3108 .ops = &hsw_power_well_ops, 3109 .id = HSW_DISP_PW_GLOBAL, 3110 { 3111 .hsw.regs = &hsw_power_well_regs, 3112 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3113 .hsw.has_vga = true, 3114 }, 3115 }, 3116 }; 3117 3118 static const struct i915_power_well_desc bdw_power_wells[] = { 3119 { 3120 .name = "always-on", 3121 .always_on = true, 3122 .domains = POWER_DOMAIN_MASK, 3123 .ops = &i9xx_always_on_power_well_ops, 3124 .id = DISP_PW_ID_NONE, 3125 }, 3126 { 3127 .name = "display", 3128 .domains = BDW_DISPLAY_POWER_DOMAINS, 3129 .ops = &hsw_power_well_ops, 3130 .id = HSW_DISP_PW_GLOBAL, 3131 { 3132 .hsw.regs = &hsw_power_well_regs, 3133 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3134 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3135 .hsw.has_vga = true, 3136 }, 3137 }, 3138 }; 3139 3140 static const struct i915_power_well_ops vlv_display_power_well_ops = { 3141 .sync_hw = i9xx_power_well_sync_hw_noop, 3142 .enable = vlv_display_power_well_enable, 3143 .disable = vlv_display_power_well_disable, 3144 .is_enabled = vlv_power_well_enabled, 3145 }; 3146 3147 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 3148 .sync_hw = i9xx_power_well_sync_hw_noop, 3149 .enable = vlv_dpio_cmn_power_well_enable, 3150 .disable = vlv_dpio_cmn_power_well_disable, 3151 .is_enabled = vlv_power_well_enabled, 3152 }; 3153 3154 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 3155 .sync_hw = i9xx_power_well_sync_hw_noop, 3156 .enable = vlv_power_well_enable, 3157 .disable = vlv_power_well_disable, 3158 .is_enabled = vlv_power_well_enabled, 3159 }; 3160 3161 static const struct i915_power_well_desc vlv_power_wells[] = { 3162 { 3163 .name = "always-on", 3164 .always_on = true, 3165 .domains = POWER_DOMAIN_MASK, 3166 .ops = &i9xx_always_on_power_well_ops, 3167 .id = DISP_PW_ID_NONE, 3168 }, 3169 { 3170 .name = "display", 3171 .domains = VLV_DISPLAY_POWER_DOMAINS, 3172 .ops = &vlv_display_power_well_ops, 3173 .id = VLV_DISP_PW_DISP2D, 3174 { 3175 .vlv.idx = PUNIT_PWGT_IDX_DISP2D, 3176 }, 3177 }, 3178 { 3179 .name = "dpio-tx-b-01", 3180 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3181 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3182 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3183 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3184 .ops = &vlv_dpio_power_well_ops, 3185 .id = DISP_PW_ID_NONE, 3186 { 3187 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, 3188 }, 3189 }, 3190 { 3191 .name = "dpio-tx-b-23", 3192 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3193 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3194 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3195 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3196 .ops = &vlv_dpio_power_well_ops, 3197 .id = DISP_PW_ID_NONE, 3198 { 3199 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, 3200 }, 3201 }, 3202 { 3203 .name = "dpio-tx-c-01", 3204 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3205 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3206 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3207 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3208 .ops = &vlv_dpio_power_well_ops, 3209 .id = DISP_PW_ID_NONE, 3210 { 3211 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, 3212 }, 3213 }, 3214 { 3215 .name = "dpio-tx-c-23", 3216 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3217 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3218 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3219 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3220 .ops = &vlv_dpio_power_well_ops, 3221 .id = DISP_PW_ID_NONE, 3222 { 3223 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, 3224 }, 3225 }, 3226 { 3227 .name = "dpio-common", 3228 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 3229 .ops = &vlv_dpio_cmn_power_well_ops, 3230 .id = VLV_DISP_PW_DPIO_CMN_BC, 3231 { 3232 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3233 }, 3234 }, 3235 }; 3236 3237 static const struct i915_power_well_desc chv_power_wells[] = { 3238 { 3239 .name = "always-on", 3240 .always_on = true, 3241 .domains = POWER_DOMAIN_MASK, 3242 .ops = &i9xx_always_on_power_well_ops, 3243 .id = DISP_PW_ID_NONE, 3244 }, 3245 { 3246 .name = "display", 3247 /* 3248 * Pipe A power well is the new disp2d well. Pipe B and C 3249 * power wells don't actually exist. Pipe A power well is 3250 * required for any pipe to work. 3251 */ 3252 .domains = CHV_DISPLAY_POWER_DOMAINS, 3253 .ops = &chv_pipe_power_well_ops, 3254 .id = DISP_PW_ID_NONE, 3255 }, 3256 { 3257 .name = "dpio-common-bc", 3258 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 3259 .ops = &chv_dpio_cmn_power_well_ops, 3260 .id = VLV_DISP_PW_DPIO_CMN_BC, 3261 { 3262 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3263 }, 3264 }, 3265 { 3266 .name = "dpio-common-d", 3267 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 3268 .ops = &chv_dpio_cmn_power_well_ops, 3269 .id = CHV_DISP_PW_DPIO_CMN_D, 3270 { 3271 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, 3272 }, 3273 }, 3274 }; 3275 3276 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 3277 enum i915_power_well_id power_well_id) 3278 { 3279 struct i915_power_well *power_well; 3280 bool ret; 3281 3282 power_well = lookup_power_well(dev_priv, power_well_id); 3283 ret = power_well->desc->ops->is_enabled(dev_priv, power_well); 3284 3285 return ret; 3286 } 3287 3288 static const struct i915_power_well_desc skl_power_wells[] = { 3289 { 3290 .name = "always-on", 3291 .always_on = true, 3292 .domains = POWER_DOMAIN_MASK, 3293 .ops = &i9xx_always_on_power_well_ops, 3294 .id = DISP_PW_ID_NONE, 3295 }, 3296 { 3297 .name = "power well 1", 3298 /* Handled by the DMC firmware */ 3299 .always_on = true, 3300 .domains = 0, 3301 .ops = &hsw_power_well_ops, 3302 .id = SKL_DISP_PW_1, 3303 { 3304 .hsw.regs = &hsw_power_well_regs, 3305 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3306 .hsw.has_fuses = true, 3307 }, 3308 }, 3309 { 3310 .name = "MISC IO power well", 3311 /* Handled by the DMC firmware */ 3312 .always_on = true, 3313 .domains = 0, 3314 .ops = &hsw_power_well_ops, 3315 .id = SKL_DISP_PW_MISC_IO, 3316 { 3317 .hsw.regs = &hsw_power_well_regs, 3318 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, 3319 }, 3320 }, 3321 { 3322 .name = "DC off", 3323 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 3324 .ops = &gen9_dc_off_power_well_ops, 3325 .id = SKL_DISP_DC_OFF, 3326 }, 3327 { 3328 .name = "power well 2", 3329 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3330 .ops = &hsw_power_well_ops, 3331 .id = SKL_DISP_PW_2, 3332 { 3333 .hsw.regs = &hsw_power_well_regs, 3334 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3335 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3336 .hsw.has_vga = true, 3337 .hsw.has_fuses = true, 3338 }, 3339 }, 3340 { 3341 .name = "DDI A/E IO power well", 3342 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, 3343 .ops = &hsw_power_well_ops, 3344 .id = DISP_PW_ID_NONE, 3345 { 3346 .hsw.regs = &hsw_power_well_regs, 3347 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, 3348 }, 3349 }, 3350 { 3351 .name = "DDI B IO power well", 3352 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3353 .ops = &hsw_power_well_ops, 3354 .id = DISP_PW_ID_NONE, 3355 { 3356 .hsw.regs = &hsw_power_well_regs, 3357 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3358 }, 3359 }, 3360 { 3361 .name = "DDI C IO power well", 3362 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3363 .ops = &hsw_power_well_ops, 3364 .id = DISP_PW_ID_NONE, 3365 { 3366 .hsw.regs = &hsw_power_well_regs, 3367 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3368 }, 3369 }, 3370 { 3371 .name = "DDI D IO power well", 3372 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, 3373 .ops = &hsw_power_well_ops, 3374 .id = DISP_PW_ID_NONE, 3375 { 3376 .hsw.regs = &hsw_power_well_regs, 3377 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3378 }, 3379 }, 3380 }; 3381 3382 static const struct i915_power_well_desc bxt_power_wells[] = { 3383 { 3384 .name = "always-on", 3385 .always_on = true, 3386 .domains = POWER_DOMAIN_MASK, 3387 .ops = &i9xx_always_on_power_well_ops, 3388 .id = DISP_PW_ID_NONE, 3389 }, 3390 { 3391 .name = "power well 1", 3392 /* Handled by the DMC firmware */ 3393 .always_on = true, 3394 .domains = 0, 3395 .ops = &hsw_power_well_ops, 3396 .id = SKL_DISP_PW_1, 3397 { 3398 .hsw.regs = &hsw_power_well_regs, 3399 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3400 .hsw.has_fuses = true, 3401 }, 3402 }, 3403 { 3404 .name = "DC off", 3405 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 3406 .ops = &gen9_dc_off_power_well_ops, 3407 .id = SKL_DISP_DC_OFF, 3408 }, 3409 { 3410 .name = "power well 2", 3411 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3412 .ops = &hsw_power_well_ops, 3413 .id = SKL_DISP_PW_2, 3414 { 3415 .hsw.regs = &hsw_power_well_regs, 3416 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3417 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3418 .hsw.has_vga = true, 3419 .hsw.has_fuses = true, 3420 }, 3421 }, 3422 { 3423 .name = "dpio-common-a", 3424 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 3425 .ops = &bxt_dpio_cmn_power_well_ops, 3426 .id = BXT_DISP_PW_DPIO_CMN_A, 3427 { 3428 .bxt.phy = DPIO_PHY1, 3429 }, 3430 }, 3431 { 3432 .name = "dpio-common-bc", 3433 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 3434 .ops = &bxt_dpio_cmn_power_well_ops, 3435 .id = VLV_DISP_PW_DPIO_CMN_BC, 3436 { 3437 .bxt.phy = DPIO_PHY0, 3438 }, 3439 }, 3440 }; 3441 3442 static const struct i915_power_well_desc glk_power_wells[] = { 3443 { 3444 .name = "always-on", 3445 .always_on = true, 3446 .domains = POWER_DOMAIN_MASK, 3447 .ops = &i9xx_always_on_power_well_ops, 3448 .id = DISP_PW_ID_NONE, 3449 }, 3450 { 3451 .name = "power well 1", 3452 /* Handled by the DMC firmware */ 3453 .always_on = true, 3454 .domains = 0, 3455 .ops = &hsw_power_well_ops, 3456 .id = SKL_DISP_PW_1, 3457 { 3458 .hsw.regs = &hsw_power_well_regs, 3459 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3460 .hsw.has_fuses = true, 3461 }, 3462 }, 3463 { 3464 .name = "DC off", 3465 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, 3466 .ops = &gen9_dc_off_power_well_ops, 3467 .id = SKL_DISP_DC_OFF, 3468 }, 3469 { 3470 .name = "power well 2", 3471 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3472 .ops = &hsw_power_well_ops, 3473 .id = SKL_DISP_PW_2, 3474 { 3475 .hsw.regs = &hsw_power_well_regs, 3476 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3477 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3478 .hsw.has_vga = true, 3479 .hsw.has_fuses = true, 3480 }, 3481 }, 3482 { 3483 .name = "dpio-common-a", 3484 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, 3485 .ops = &bxt_dpio_cmn_power_well_ops, 3486 .id = BXT_DISP_PW_DPIO_CMN_A, 3487 { 3488 .bxt.phy = DPIO_PHY1, 3489 }, 3490 }, 3491 { 3492 .name = "dpio-common-b", 3493 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, 3494 .ops = &bxt_dpio_cmn_power_well_ops, 3495 .id = VLV_DISP_PW_DPIO_CMN_BC, 3496 { 3497 .bxt.phy = DPIO_PHY0, 3498 }, 3499 }, 3500 { 3501 .name = "dpio-common-c", 3502 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, 3503 .ops = &bxt_dpio_cmn_power_well_ops, 3504 .id = GLK_DISP_PW_DPIO_CMN_C, 3505 { 3506 .bxt.phy = DPIO_PHY2, 3507 }, 3508 }, 3509 { 3510 .name = "AUX A", 3511 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, 3512 .ops = &hsw_power_well_ops, 3513 .id = DISP_PW_ID_NONE, 3514 { 3515 .hsw.regs = &hsw_power_well_regs, 3516 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3517 }, 3518 }, 3519 { 3520 .name = "AUX B", 3521 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, 3522 .ops = &hsw_power_well_ops, 3523 .id = DISP_PW_ID_NONE, 3524 { 3525 .hsw.regs = &hsw_power_well_regs, 3526 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3527 }, 3528 }, 3529 { 3530 .name = "AUX C", 3531 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, 3532 .ops = &hsw_power_well_ops, 3533 .id = DISP_PW_ID_NONE, 3534 { 3535 .hsw.regs = &hsw_power_well_regs, 3536 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3537 }, 3538 }, 3539 { 3540 .name = "DDI A IO power well", 3541 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, 3542 .ops = &hsw_power_well_ops, 3543 .id = DISP_PW_ID_NONE, 3544 { 3545 .hsw.regs = &hsw_power_well_regs, 3546 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3547 }, 3548 }, 3549 { 3550 .name = "DDI B IO power well", 3551 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3552 .ops = &hsw_power_well_ops, 3553 .id = DISP_PW_ID_NONE, 3554 { 3555 .hsw.regs = &hsw_power_well_regs, 3556 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3557 }, 3558 }, 3559 { 3560 .name = "DDI C IO power well", 3561 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3562 .ops = &hsw_power_well_ops, 3563 .id = DISP_PW_ID_NONE, 3564 { 3565 .hsw.regs = &hsw_power_well_regs, 3566 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3567 }, 3568 }, 3569 }; 3570 3571 static const struct i915_power_well_desc cnl_power_wells[] = { 3572 { 3573 .name = "always-on", 3574 .always_on = true, 3575 .domains = POWER_DOMAIN_MASK, 3576 .ops = &i9xx_always_on_power_well_ops, 3577 .id = DISP_PW_ID_NONE, 3578 }, 3579 { 3580 .name = "power well 1", 3581 /* Handled by the DMC firmware */ 3582 .always_on = true, 3583 .domains = 0, 3584 .ops = &hsw_power_well_ops, 3585 .id = SKL_DISP_PW_1, 3586 { 3587 .hsw.regs = &hsw_power_well_regs, 3588 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3589 .hsw.has_fuses = true, 3590 }, 3591 }, 3592 { 3593 .name = "AUX A", 3594 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS, 3595 .ops = &hsw_power_well_ops, 3596 .id = DISP_PW_ID_NONE, 3597 { 3598 .hsw.regs = &hsw_power_well_regs, 3599 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3600 }, 3601 }, 3602 { 3603 .name = "AUX B", 3604 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS, 3605 .ops = &hsw_power_well_ops, 3606 .id = DISP_PW_ID_NONE, 3607 { 3608 .hsw.regs = &hsw_power_well_regs, 3609 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3610 }, 3611 }, 3612 { 3613 .name = "AUX C", 3614 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS, 3615 .ops = &hsw_power_well_ops, 3616 .id = DISP_PW_ID_NONE, 3617 { 3618 .hsw.regs = &hsw_power_well_regs, 3619 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3620 }, 3621 }, 3622 { 3623 .name = "AUX D", 3624 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS, 3625 .ops = &hsw_power_well_ops, 3626 .id = DISP_PW_ID_NONE, 3627 { 3628 .hsw.regs = &hsw_power_well_regs, 3629 .hsw.idx = CNL_PW_CTL_IDX_AUX_D, 3630 }, 3631 }, 3632 { 3633 .name = "DC off", 3634 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, 3635 .ops = &gen9_dc_off_power_well_ops, 3636 .id = SKL_DISP_DC_OFF, 3637 }, 3638 { 3639 .name = "power well 2", 3640 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3641 .ops = &hsw_power_well_ops, 3642 .id = SKL_DISP_PW_2, 3643 { 3644 .hsw.regs = &hsw_power_well_regs, 3645 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3646 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3647 .hsw.has_vga = true, 3648 .hsw.has_fuses = true, 3649 }, 3650 }, 3651 { 3652 .name = "DDI A IO power well", 3653 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS, 3654 .ops = &hsw_power_well_ops, 3655 .id = DISP_PW_ID_NONE, 3656 { 3657 .hsw.regs = &hsw_power_well_regs, 3658 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3659 }, 3660 }, 3661 { 3662 .name = "DDI B IO power well", 3663 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS, 3664 .ops = &hsw_power_well_ops, 3665 .id = DISP_PW_ID_NONE, 3666 { 3667 .hsw.regs = &hsw_power_well_regs, 3668 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3669 }, 3670 }, 3671 { 3672 .name = "DDI C IO power well", 3673 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS, 3674 .ops = &hsw_power_well_ops, 3675 .id = DISP_PW_ID_NONE, 3676 { 3677 .hsw.regs = &hsw_power_well_regs, 3678 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3679 }, 3680 }, 3681 { 3682 .name = "DDI D IO power well", 3683 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS, 3684 .ops = &hsw_power_well_ops, 3685 .id = DISP_PW_ID_NONE, 3686 { 3687 .hsw.regs = &hsw_power_well_regs, 3688 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3689 }, 3690 }, 3691 { 3692 .name = "DDI F IO power well", 3693 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS, 3694 .ops = &hsw_power_well_ops, 3695 .id = CNL_DISP_PW_DDI_F_IO, 3696 { 3697 .hsw.regs = &hsw_power_well_regs, 3698 .hsw.idx = CNL_PW_CTL_IDX_DDI_F, 3699 }, 3700 }, 3701 { 3702 .name = "AUX F", 3703 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS, 3704 .ops = &hsw_power_well_ops, 3705 .id = CNL_DISP_PW_DDI_F_AUX, 3706 { 3707 .hsw.regs = &hsw_power_well_regs, 3708 .hsw.idx = CNL_PW_CTL_IDX_AUX_F, 3709 }, 3710 }, 3711 }; 3712 3713 static const struct i915_power_well_ops icl_aux_power_well_ops = { 3714 .sync_hw = hsw_power_well_sync_hw, 3715 .enable = icl_aux_power_well_enable, 3716 .disable = icl_aux_power_well_disable, 3717 .is_enabled = hsw_power_well_enabled, 3718 }; 3719 3720 static const struct i915_power_well_regs icl_aux_power_well_regs = { 3721 .bios = ICL_PWR_WELL_CTL_AUX1, 3722 .driver = ICL_PWR_WELL_CTL_AUX2, 3723 .debug = ICL_PWR_WELL_CTL_AUX4, 3724 }; 3725 3726 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 3727 .bios = ICL_PWR_WELL_CTL_DDI1, 3728 .driver = ICL_PWR_WELL_CTL_DDI2, 3729 .debug = ICL_PWR_WELL_CTL_DDI4, 3730 }; 3731 3732 static const struct i915_power_well_desc icl_power_wells[] = { 3733 { 3734 .name = "always-on", 3735 .always_on = true, 3736 .domains = POWER_DOMAIN_MASK, 3737 .ops = &i9xx_always_on_power_well_ops, 3738 .id = DISP_PW_ID_NONE, 3739 }, 3740 { 3741 .name = "power well 1", 3742 /* Handled by the DMC firmware */ 3743 .always_on = true, 3744 .domains = 0, 3745 .ops = &hsw_power_well_ops, 3746 .id = SKL_DISP_PW_1, 3747 { 3748 .hsw.regs = &hsw_power_well_regs, 3749 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3750 .hsw.has_fuses = true, 3751 }, 3752 }, 3753 { 3754 .name = "DC off", 3755 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, 3756 .ops = &gen9_dc_off_power_well_ops, 3757 .id = SKL_DISP_DC_OFF, 3758 }, 3759 { 3760 .name = "power well 2", 3761 .domains = ICL_PW_2_POWER_DOMAINS, 3762 .ops = &hsw_power_well_ops, 3763 .id = SKL_DISP_PW_2, 3764 { 3765 .hsw.regs = &hsw_power_well_regs, 3766 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3767 .hsw.has_fuses = true, 3768 }, 3769 }, 3770 { 3771 .name = "power well 3", 3772 .domains = ICL_PW_3_POWER_DOMAINS, 3773 .ops = &hsw_power_well_ops, 3774 .id = ICL_DISP_PW_3, 3775 { 3776 .hsw.regs = &hsw_power_well_regs, 3777 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3778 .hsw.irq_pipe_mask = BIT(PIPE_B), 3779 .hsw.has_vga = true, 3780 .hsw.has_fuses = true, 3781 }, 3782 }, 3783 { 3784 .name = "DDI A IO", 3785 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3786 .ops = &hsw_power_well_ops, 3787 .id = DISP_PW_ID_NONE, 3788 { 3789 .hsw.regs = &icl_ddi_power_well_regs, 3790 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3791 }, 3792 }, 3793 { 3794 .name = "DDI B IO", 3795 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3796 .ops = &hsw_power_well_ops, 3797 .id = DISP_PW_ID_NONE, 3798 { 3799 .hsw.regs = &icl_ddi_power_well_regs, 3800 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3801 }, 3802 }, 3803 { 3804 .name = "DDI C IO", 3805 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3806 .ops = &hsw_power_well_ops, 3807 .id = DISP_PW_ID_NONE, 3808 { 3809 .hsw.regs = &icl_ddi_power_well_regs, 3810 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3811 }, 3812 }, 3813 { 3814 .name = "DDI D IO", 3815 .domains = ICL_DDI_IO_D_POWER_DOMAINS, 3816 .ops = &hsw_power_well_ops, 3817 .id = DISP_PW_ID_NONE, 3818 { 3819 .hsw.regs = &icl_ddi_power_well_regs, 3820 .hsw.idx = ICL_PW_CTL_IDX_DDI_D, 3821 }, 3822 }, 3823 { 3824 .name = "DDI E IO", 3825 .domains = ICL_DDI_IO_E_POWER_DOMAINS, 3826 .ops = &hsw_power_well_ops, 3827 .id = DISP_PW_ID_NONE, 3828 { 3829 .hsw.regs = &icl_ddi_power_well_regs, 3830 .hsw.idx = ICL_PW_CTL_IDX_DDI_E, 3831 }, 3832 }, 3833 { 3834 .name = "DDI F IO", 3835 .domains = ICL_DDI_IO_F_POWER_DOMAINS, 3836 .ops = &hsw_power_well_ops, 3837 .id = DISP_PW_ID_NONE, 3838 { 3839 .hsw.regs = &icl_ddi_power_well_regs, 3840 .hsw.idx = ICL_PW_CTL_IDX_DDI_F, 3841 }, 3842 }, 3843 { 3844 .name = "AUX A", 3845 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 3846 .ops = &icl_aux_power_well_ops, 3847 .id = DISP_PW_ID_NONE, 3848 { 3849 .hsw.regs = &icl_aux_power_well_regs, 3850 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3851 }, 3852 }, 3853 { 3854 .name = "AUX B", 3855 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 3856 .ops = &icl_aux_power_well_ops, 3857 .id = DISP_PW_ID_NONE, 3858 { 3859 .hsw.regs = &icl_aux_power_well_regs, 3860 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3861 }, 3862 }, 3863 { 3864 .name = "AUX C TC1", 3865 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, 3866 .ops = &icl_aux_power_well_ops, 3867 .id = DISP_PW_ID_NONE, 3868 { 3869 .hsw.regs = &icl_aux_power_well_regs, 3870 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3871 .hsw.is_tc_tbt = false, 3872 }, 3873 }, 3874 { 3875 .name = "AUX D TC2", 3876 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, 3877 .ops = &icl_aux_power_well_ops, 3878 .id = DISP_PW_ID_NONE, 3879 { 3880 .hsw.regs = &icl_aux_power_well_regs, 3881 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 3882 .hsw.is_tc_tbt = false, 3883 }, 3884 }, 3885 { 3886 .name = "AUX E TC3", 3887 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, 3888 .ops = &icl_aux_power_well_ops, 3889 .id = DISP_PW_ID_NONE, 3890 { 3891 .hsw.regs = &icl_aux_power_well_regs, 3892 .hsw.idx = ICL_PW_CTL_IDX_AUX_E, 3893 .hsw.is_tc_tbt = false, 3894 }, 3895 }, 3896 { 3897 .name = "AUX F TC4", 3898 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, 3899 .ops = &icl_aux_power_well_ops, 3900 .id = DISP_PW_ID_NONE, 3901 { 3902 .hsw.regs = &icl_aux_power_well_regs, 3903 .hsw.idx = ICL_PW_CTL_IDX_AUX_F, 3904 .hsw.is_tc_tbt = false, 3905 }, 3906 }, 3907 { 3908 .name = "AUX C TBT1", 3909 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, 3910 .ops = &icl_aux_power_well_ops, 3911 .id = DISP_PW_ID_NONE, 3912 { 3913 .hsw.regs = &icl_aux_power_well_regs, 3914 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, 3915 .hsw.is_tc_tbt = true, 3916 }, 3917 }, 3918 { 3919 .name = "AUX D TBT2", 3920 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, 3921 .ops = &icl_aux_power_well_ops, 3922 .id = DISP_PW_ID_NONE, 3923 { 3924 .hsw.regs = &icl_aux_power_well_regs, 3925 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, 3926 .hsw.is_tc_tbt = true, 3927 }, 3928 }, 3929 { 3930 .name = "AUX E TBT3", 3931 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, 3932 .ops = &icl_aux_power_well_ops, 3933 .id = DISP_PW_ID_NONE, 3934 { 3935 .hsw.regs = &icl_aux_power_well_regs, 3936 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, 3937 .hsw.is_tc_tbt = true, 3938 }, 3939 }, 3940 { 3941 .name = "AUX F TBT4", 3942 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, 3943 .ops = &icl_aux_power_well_ops, 3944 .id = DISP_PW_ID_NONE, 3945 { 3946 .hsw.regs = &icl_aux_power_well_regs, 3947 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, 3948 .hsw.is_tc_tbt = true, 3949 }, 3950 }, 3951 { 3952 .name = "power well 4", 3953 .domains = ICL_PW_4_POWER_DOMAINS, 3954 .ops = &hsw_power_well_ops, 3955 .id = DISP_PW_ID_NONE, 3956 { 3957 .hsw.regs = &hsw_power_well_regs, 3958 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 3959 .hsw.has_fuses = true, 3960 .hsw.irq_pipe_mask = BIT(PIPE_C), 3961 }, 3962 }, 3963 }; 3964 3965 static void 3966 tgl_tc_cold_request(struct drm_i915_private *i915, bool block) 3967 { 3968 u8 tries = 0; 3969 int ret; 3970 3971 while (1) { 3972 u32 low_val; 3973 u32 high_val = 0; 3974 3975 if (block) 3976 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; 3977 else 3978 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; 3979 3980 /* 3981 * Spec states that we should timeout the request after 200us 3982 * but the function below will timeout after 500us 3983 */ 3984 ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, 3985 &high_val); 3986 if (ret == 0) { 3987 if (block && 3988 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) 3989 ret = -EIO; 3990 else 3991 break; 3992 } 3993 3994 if (++tries == 3) 3995 break; 3996 3997 msleep(1); 3998 } 3999 4000 if (ret) 4001 drm_err(&i915->drm, "TC cold %sblock failed\n", 4002 block ? "" : "un"); 4003 else 4004 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", 4005 block ? "" : "un"); 4006 } 4007 4008 static void 4009 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, 4010 struct i915_power_well *power_well) 4011 { 4012 tgl_tc_cold_request(i915, true); 4013 } 4014 4015 static void 4016 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, 4017 struct i915_power_well *power_well) 4018 { 4019 tgl_tc_cold_request(i915, false); 4020 } 4021 4022 static void 4023 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, 4024 struct i915_power_well *power_well) 4025 { 4026 if (power_well->count > 0) 4027 tgl_tc_cold_off_power_well_enable(i915, power_well); 4028 else 4029 tgl_tc_cold_off_power_well_disable(i915, power_well); 4030 } 4031 4032 static bool 4033 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, 4034 struct i915_power_well *power_well) 4035 { 4036 /* 4037 * Not the correctly implementation but there is no way to just read it 4038 * from PCODE, so returning count to avoid state mismatch errors 4039 */ 4040 return power_well->count; 4041 } 4042 4043 static const struct i915_power_well_ops tgl_tc_cold_off_ops = { 4044 .sync_hw = tgl_tc_cold_off_power_well_sync_hw, 4045 .enable = tgl_tc_cold_off_power_well_enable, 4046 .disable = tgl_tc_cold_off_power_well_disable, 4047 .is_enabled = tgl_tc_cold_off_power_well_is_enabled, 4048 }; 4049 4050 static const struct i915_power_well_desc tgl_power_wells[] = { 4051 { 4052 .name = "always-on", 4053 .always_on = true, 4054 .domains = POWER_DOMAIN_MASK, 4055 .ops = &i9xx_always_on_power_well_ops, 4056 .id = DISP_PW_ID_NONE, 4057 }, 4058 { 4059 .name = "power well 1", 4060 /* Handled by the DMC firmware */ 4061 .always_on = true, 4062 .domains = 0, 4063 .ops = &hsw_power_well_ops, 4064 .id = SKL_DISP_PW_1, 4065 { 4066 .hsw.regs = &hsw_power_well_regs, 4067 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4068 .hsw.has_fuses = true, 4069 }, 4070 }, 4071 { 4072 .name = "DC off", 4073 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, 4074 .ops = &gen9_dc_off_power_well_ops, 4075 .id = SKL_DISP_DC_OFF, 4076 }, 4077 { 4078 .name = "power well 2", 4079 .domains = TGL_PW_2_POWER_DOMAINS, 4080 .ops = &hsw_power_well_ops, 4081 .id = SKL_DISP_PW_2, 4082 { 4083 .hsw.regs = &hsw_power_well_regs, 4084 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4085 .hsw.has_fuses = true, 4086 }, 4087 }, 4088 { 4089 .name = "power well 3", 4090 .domains = TGL_PW_3_POWER_DOMAINS, 4091 .ops = &hsw_power_well_ops, 4092 .id = ICL_DISP_PW_3, 4093 { 4094 .hsw.regs = &hsw_power_well_regs, 4095 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4096 .hsw.irq_pipe_mask = BIT(PIPE_B), 4097 .hsw.has_vga = true, 4098 .hsw.has_fuses = true, 4099 }, 4100 }, 4101 { 4102 .name = "DDI A IO", 4103 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4104 .ops = &hsw_power_well_ops, 4105 .id = DISP_PW_ID_NONE, 4106 { 4107 .hsw.regs = &icl_ddi_power_well_regs, 4108 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4109 } 4110 }, 4111 { 4112 .name = "DDI B IO", 4113 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4114 .ops = &hsw_power_well_ops, 4115 .id = DISP_PW_ID_NONE, 4116 { 4117 .hsw.regs = &icl_ddi_power_well_regs, 4118 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4119 } 4120 }, 4121 { 4122 .name = "DDI C IO", 4123 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 4124 .ops = &hsw_power_well_ops, 4125 .id = DISP_PW_ID_NONE, 4126 { 4127 .hsw.regs = &icl_ddi_power_well_regs, 4128 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 4129 } 4130 }, 4131 { 4132 .name = "DDI IO TC1", 4133 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4134 .ops = &hsw_power_well_ops, 4135 .id = DISP_PW_ID_NONE, 4136 { 4137 .hsw.regs = &icl_ddi_power_well_regs, 4138 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4139 }, 4140 }, 4141 { 4142 .name = "DDI IO TC2", 4143 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4144 .ops = &hsw_power_well_ops, 4145 .id = DISP_PW_ID_NONE, 4146 { 4147 .hsw.regs = &icl_ddi_power_well_regs, 4148 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4149 }, 4150 }, 4151 { 4152 .name = "DDI IO TC3", 4153 .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, 4154 .ops = &hsw_power_well_ops, 4155 .id = DISP_PW_ID_NONE, 4156 { 4157 .hsw.regs = &icl_ddi_power_well_regs, 4158 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 4159 }, 4160 }, 4161 { 4162 .name = "DDI IO TC4", 4163 .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, 4164 .ops = &hsw_power_well_ops, 4165 .id = DISP_PW_ID_NONE, 4166 { 4167 .hsw.regs = &icl_ddi_power_well_regs, 4168 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 4169 }, 4170 }, 4171 { 4172 .name = "DDI IO TC5", 4173 .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, 4174 .ops = &hsw_power_well_ops, 4175 .id = DISP_PW_ID_NONE, 4176 { 4177 .hsw.regs = &icl_ddi_power_well_regs, 4178 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, 4179 }, 4180 }, 4181 { 4182 .name = "DDI IO TC6", 4183 .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, 4184 .ops = &hsw_power_well_ops, 4185 .id = DISP_PW_ID_NONE, 4186 { 4187 .hsw.regs = &icl_ddi_power_well_regs, 4188 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, 4189 }, 4190 }, 4191 { 4192 .name = "TC cold off", 4193 .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, 4194 .ops = &tgl_tc_cold_off_ops, 4195 .id = TGL_DISP_PW_TC_COLD_OFF, 4196 }, 4197 { 4198 .name = "AUX A", 4199 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 4200 .ops = &icl_aux_power_well_ops, 4201 .id = DISP_PW_ID_NONE, 4202 { 4203 .hsw.regs = &icl_aux_power_well_regs, 4204 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4205 }, 4206 }, 4207 { 4208 .name = "AUX B", 4209 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 4210 .ops = &icl_aux_power_well_ops, 4211 .id = DISP_PW_ID_NONE, 4212 { 4213 .hsw.regs = &icl_aux_power_well_regs, 4214 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4215 }, 4216 }, 4217 { 4218 .name = "AUX C", 4219 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 4220 .ops = &icl_aux_power_well_ops, 4221 .id = DISP_PW_ID_NONE, 4222 { 4223 .hsw.regs = &icl_aux_power_well_regs, 4224 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4225 }, 4226 }, 4227 { 4228 .name = "AUX USBC1", 4229 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4230 .ops = &icl_aux_power_well_ops, 4231 .id = DISP_PW_ID_NONE, 4232 { 4233 .hsw.regs = &icl_aux_power_well_regs, 4234 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4235 .hsw.is_tc_tbt = false, 4236 }, 4237 }, 4238 { 4239 .name = "AUX USBC2", 4240 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4241 .ops = &icl_aux_power_well_ops, 4242 .id = DISP_PW_ID_NONE, 4243 { 4244 .hsw.regs = &icl_aux_power_well_regs, 4245 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4246 .hsw.is_tc_tbt = false, 4247 }, 4248 }, 4249 { 4250 .name = "AUX USBC3", 4251 .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS, 4252 .ops = &icl_aux_power_well_ops, 4253 .id = DISP_PW_ID_NONE, 4254 { 4255 .hsw.regs = &icl_aux_power_well_regs, 4256 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 4257 .hsw.is_tc_tbt = false, 4258 }, 4259 }, 4260 { 4261 .name = "AUX USBC4", 4262 .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS, 4263 .ops = &icl_aux_power_well_ops, 4264 .id = DISP_PW_ID_NONE, 4265 { 4266 .hsw.regs = &icl_aux_power_well_regs, 4267 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 4268 .hsw.is_tc_tbt = false, 4269 }, 4270 }, 4271 { 4272 .name = "AUX USBC5", 4273 .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS, 4274 .ops = &icl_aux_power_well_ops, 4275 .id = DISP_PW_ID_NONE, 4276 { 4277 .hsw.regs = &icl_aux_power_well_regs, 4278 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, 4279 .hsw.is_tc_tbt = false, 4280 }, 4281 }, 4282 { 4283 .name = "AUX USBC6", 4284 .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS, 4285 .ops = &icl_aux_power_well_ops, 4286 .id = DISP_PW_ID_NONE, 4287 { 4288 .hsw.regs = &icl_aux_power_well_regs, 4289 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, 4290 .hsw.is_tc_tbt = false, 4291 }, 4292 }, 4293 { 4294 .name = "AUX TBT1", 4295 .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS, 4296 .ops = &icl_aux_power_well_ops, 4297 .id = DISP_PW_ID_NONE, 4298 { 4299 .hsw.regs = &icl_aux_power_well_regs, 4300 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 4301 .hsw.is_tc_tbt = true, 4302 }, 4303 }, 4304 { 4305 .name = "AUX TBT2", 4306 .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS, 4307 .ops = &icl_aux_power_well_ops, 4308 .id = DISP_PW_ID_NONE, 4309 { 4310 .hsw.regs = &icl_aux_power_well_regs, 4311 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 4312 .hsw.is_tc_tbt = true, 4313 }, 4314 }, 4315 { 4316 .name = "AUX TBT3", 4317 .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS, 4318 .ops = &icl_aux_power_well_ops, 4319 .id = DISP_PW_ID_NONE, 4320 { 4321 .hsw.regs = &icl_aux_power_well_regs, 4322 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 4323 .hsw.is_tc_tbt = true, 4324 }, 4325 }, 4326 { 4327 .name = "AUX TBT4", 4328 .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS, 4329 .ops = &icl_aux_power_well_ops, 4330 .id = DISP_PW_ID_NONE, 4331 { 4332 .hsw.regs = &icl_aux_power_well_regs, 4333 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 4334 .hsw.is_tc_tbt = true, 4335 }, 4336 }, 4337 { 4338 .name = "AUX TBT5", 4339 .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS, 4340 .ops = &icl_aux_power_well_ops, 4341 .id = DISP_PW_ID_NONE, 4342 { 4343 .hsw.regs = &icl_aux_power_well_regs, 4344 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, 4345 .hsw.is_tc_tbt = true, 4346 }, 4347 }, 4348 { 4349 .name = "AUX TBT6", 4350 .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS, 4351 .ops = &icl_aux_power_well_ops, 4352 .id = DISP_PW_ID_NONE, 4353 { 4354 .hsw.regs = &icl_aux_power_well_regs, 4355 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, 4356 .hsw.is_tc_tbt = true, 4357 }, 4358 }, 4359 { 4360 .name = "power well 4", 4361 .domains = TGL_PW_4_POWER_DOMAINS, 4362 .ops = &hsw_power_well_ops, 4363 .id = DISP_PW_ID_NONE, 4364 { 4365 .hsw.regs = &hsw_power_well_regs, 4366 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4367 .hsw.has_fuses = true, 4368 .hsw.irq_pipe_mask = BIT(PIPE_C), 4369 } 4370 }, 4371 { 4372 .name = "power well 5", 4373 .domains = TGL_PW_5_POWER_DOMAINS, 4374 .ops = &hsw_power_well_ops, 4375 .id = DISP_PW_ID_NONE, 4376 { 4377 .hsw.regs = &hsw_power_well_regs, 4378 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4379 .hsw.has_fuses = true, 4380 .hsw.irq_pipe_mask = BIT(PIPE_D), 4381 }, 4382 }, 4383 }; 4384 4385 static const struct i915_power_well_desc rkl_power_wells[] = { 4386 { 4387 .name = "always-on", 4388 .always_on = true, 4389 .domains = POWER_DOMAIN_MASK, 4390 .ops = &i9xx_always_on_power_well_ops, 4391 .id = DISP_PW_ID_NONE, 4392 }, 4393 { 4394 .name = "power well 1", 4395 /* Handled by the DMC firmware */ 4396 .always_on = true, 4397 .domains = 0, 4398 .ops = &hsw_power_well_ops, 4399 .id = SKL_DISP_PW_1, 4400 { 4401 .hsw.regs = &hsw_power_well_regs, 4402 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4403 .hsw.has_fuses = true, 4404 }, 4405 }, 4406 { 4407 .name = "DC off", 4408 .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS, 4409 .ops = &gen9_dc_off_power_well_ops, 4410 .id = SKL_DISP_DC_OFF, 4411 }, 4412 { 4413 .name = "power well 3", 4414 .domains = RKL_PW_3_POWER_DOMAINS, 4415 .ops = &hsw_power_well_ops, 4416 .id = ICL_DISP_PW_3, 4417 { 4418 .hsw.regs = &hsw_power_well_regs, 4419 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4420 .hsw.irq_pipe_mask = BIT(PIPE_B), 4421 .hsw.has_vga = true, 4422 .hsw.has_fuses = true, 4423 }, 4424 }, 4425 { 4426 .name = "power well 4", 4427 .domains = RKL_PW_4_POWER_DOMAINS, 4428 .ops = &hsw_power_well_ops, 4429 .id = DISP_PW_ID_NONE, 4430 { 4431 .hsw.regs = &hsw_power_well_regs, 4432 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4433 .hsw.has_fuses = true, 4434 .hsw.irq_pipe_mask = BIT(PIPE_C), 4435 } 4436 }, 4437 { 4438 .name = "DDI A IO", 4439 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4440 .ops = &hsw_power_well_ops, 4441 .id = DISP_PW_ID_NONE, 4442 { 4443 .hsw.regs = &icl_ddi_power_well_regs, 4444 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4445 } 4446 }, 4447 { 4448 .name = "DDI B IO", 4449 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4450 .ops = &hsw_power_well_ops, 4451 .id = DISP_PW_ID_NONE, 4452 { 4453 .hsw.regs = &icl_ddi_power_well_regs, 4454 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4455 } 4456 }, 4457 { 4458 .name = "DDI IO TC1", 4459 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4460 .ops = &hsw_power_well_ops, 4461 .id = DISP_PW_ID_NONE, 4462 { 4463 .hsw.regs = &icl_ddi_power_well_regs, 4464 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4465 }, 4466 }, 4467 { 4468 .name = "DDI IO TC2", 4469 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4470 .ops = &hsw_power_well_ops, 4471 .id = DISP_PW_ID_NONE, 4472 { 4473 .hsw.regs = &icl_ddi_power_well_regs, 4474 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4475 }, 4476 }, 4477 { 4478 .name = "AUX A", 4479 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 4480 .ops = &icl_aux_power_well_ops, 4481 .id = DISP_PW_ID_NONE, 4482 { 4483 .hsw.regs = &icl_aux_power_well_regs, 4484 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4485 }, 4486 }, 4487 { 4488 .name = "AUX B", 4489 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 4490 .ops = &icl_aux_power_well_ops, 4491 .id = DISP_PW_ID_NONE, 4492 { 4493 .hsw.regs = &icl_aux_power_well_regs, 4494 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4495 }, 4496 }, 4497 { 4498 .name = "AUX USBC1", 4499 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4500 .ops = &icl_aux_power_well_ops, 4501 .id = DISP_PW_ID_NONE, 4502 { 4503 .hsw.regs = &icl_aux_power_well_regs, 4504 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4505 }, 4506 }, 4507 { 4508 .name = "AUX USBC2", 4509 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4510 .ops = &icl_aux_power_well_ops, 4511 .id = DISP_PW_ID_NONE, 4512 { 4513 .hsw.regs = &icl_aux_power_well_regs, 4514 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4515 }, 4516 }, 4517 }; 4518 4519 static int 4520 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 4521 int disable_power_well) 4522 { 4523 if (disable_power_well >= 0) 4524 return !!disable_power_well; 4525 4526 return 1; 4527 } 4528 4529 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 4530 int enable_dc) 4531 { 4532 u32 mask; 4533 int requested_dc; 4534 int max_dc; 4535 4536 if (IS_DG1(dev_priv)) 4537 max_dc = 3; 4538 else if (DISPLAY_VER(dev_priv) >= 12) 4539 max_dc = 4; 4540 else if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv) || IS_GEN9_BC(dev_priv)) 4541 max_dc = 2; 4542 else if (IS_GEN9_LP(dev_priv)) 4543 max_dc = 1; 4544 else 4545 max_dc = 0; 4546 4547 /* 4548 * DC9 has a separate HW flow from the rest of the DC states, 4549 * not depending on the DMC firmware. It's needed by system 4550 * suspend/resume, so allow it unconditionally. 4551 */ 4552 mask = IS_GEN9_LP(dev_priv) || DISPLAY_VER(dev_priv) >= 11 ? 4553 DC_STATE_EN_DC9 : 0; 4554 4555 if (!dev_priv->params.disable_power_well) 4556 max_dc = 0; 4557 4558 if (enable_dc >= 0 && enable_dc <= max_dc) { 4559 requested_dc = enable_dc; 4560 } else if (enable_dc == -1) { 4561 requested_dc = max_dc; 4562 } else if (enable_dc > max_dc && enable_dc <= 4) { 4563 drm_dbg_kms(&dev_priv->drm, 4564 "Adjusting requested max DC state (%d->%d)\n", 4565 enable_dc, max_dc); 4566 requested_dc = max_dc; 4567 } else { 4568 drm_err(&dev_priv->drm, 4569 "Unexpected value for enable_dc (%d)\n", enable_dc); 4570 requested_dc = max_dc; 4571 } 4572 4573 switch (requested_dc) { 4574 case 4: 4575 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 4576 break; 4577 case 3: 4578 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 4579 break; 4580 case 2: 4581 mask |= DC_STATE_EN_UPTO_DC6; 4582 break; 4583 case 1: 4584 mask |= DC_STATE_EN_UPTO_DC5; 4585 break; 4586 } 4587 4588 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); 4589 4590 return mask; 4591 } 4592 4593 static int 4594 __set_power_wells(struct i915_power_domains *power_domains, 4595 const struct i915_power_well_desc *power_well_descs, 4596 int power_well_descs_sz, u64 skip_mask) 4597 { 4598 struct drm_i915_private *i915 = container_of(power_domains, 4599 struct drm_i915_private, 4600 power_domains); 4601 u64 power_well_ids = 0; 4602 int power_well_count = 0; 4603 int i, plt_idx = 0; 4604 4605 for (i = 0; i < power_well_descs_sz; i++) 4606 if (!(BIT_ULL(power_well_descs[i].id) & skip_mask)) 4607 power_well_count++; 4608 4609 power_domains->power_well_count = power_well_count; 4610 power_domains->power_wells = 4611 kcalloc(power_well_count, 4612 sizeof(*power_domains->power_wells), 4613 GFP_KERNEL); 4614 if (!power_domains->power_wells) 4615 return -ENOMEM; 4616 4617 for (i = 0; i < power_well_descs_sz; i++) { 4618 enum i915_power_well_id id = power_well_descs[i].id; 4619 4620 if (BIT_ULL(id) & skip_mask) 4621 continue; 4622 4623 power_domains->power_wells[plt_idx++].desc = 4624 &power_well_descs[i]; 4625 4626 if (id == DISP_PW_ID_NONE) 4627 continue; 4628 4629 drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8); 4630 drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id)); 4631 power_well_ids |= BIT_ULL(id); 4632 } 4633 4634 return 0; 4635 } 4636 4637 #define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \ 4638 __set_power_wells(power_domains, __power_well_descs, \ 4639 ARRAY_SIZE(__power_well_descs), skip_mask) 4640 4641 #define set_power_wells(power_domains, __power_well_descs) \ 4642 set_power_wells_mask(power_domains, __power_well_descs, 0) 4643 4644 /** 4645 * intel_power_domains_init - initializes the power domain structures 4646 * @dev_priv: i915 device instance 4647 * 4648 * Initializes the power domain structures for @dev_priv depending upon the 4649 * supported platform. 4650 */ 4651 int intel_power_domains_init(struct drm_i915_private *dev_priv) 4652 { 4653 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4654 int err; 4655 4656 dev_priv->params.disable_power_well = 4657 sanitize_disable_power_well_option(dev_priv, 4658 dev_priv->params.disable_power_well); 4659 dev_priv->csr.allowed_dc_mask = 4660 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); 4661 4662 dev_priv->csr.target_dc_state = 4663 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 4664 4665 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); 4666 4667 mutex_init(&power_domains->lock); 4668 4669 INIT_DELAYED_WORK(&power_domains->async_put_work, 4670 intel_display_power_put_async_work); 4671 4672 /* 4673 * The enabling order will be from lower to higher indexed wells, 4674 * the disabling order is reversed. 4675 */ 4676 if (IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) { 4677 err = set_power_wells_mask(power_domains, tgl_power_wells, 4678 BIT_ULL(TGL_DISP_PW_TC_COLD_OFF)); 4679 } else if (IS_ROCKETLAKE(dev_priv)) { 4680 err = set_power_wells(power_domains, rkl_power_wells); 4681 } else if (IS_DISPLAY_VER(dev_priv, 12)) { 4682 err = set_power_wells(power_domains, tgl_power_wells); 4683 } else if (IS_DISPLAY_VER(dev_priv, 11)) { 4684 err = set_power_wells(power_domains, icl_power_wells); 4685 } else if (IS_CNL_WITH_PORT_F(dev_priv)) { 4686 err = set_power_wells(power_domains, cnl_power_wells); 4687 } else if (IS_CANNONLAKE(dev_priv)) { 4688 err = set_power_wells_mask(power_domains, cnl_power_wells, 4689 BIT_ULL(CNL_DISP_PW_DDI_F_IO) | 4690 BIT_ULL(CNL_DISP_PW_DDI_F_AUX)); 4691 } else if (IS_GEMINILAKE(dev_priv)) { 4692 err = set_power_wells(power_domains, glk_power_wells); 4693 } else if (IS_BROXTON(dev_priv)) { 4694 err = set_power_wells(power_domains, bxt_power_wells); 4695 } else if (IS_GEN9_BC(dev_priv)) { 4696 err = set_power_wells(power_domains, skl_power_wells); 4697 } else if (IS_CHERRYVIEW(dev_priv)) { 4698 err = set_power_wells(power_domains, chv_power_wells); 4699 } else if (IS_BROADWELL(dev_priv)) { 4700 err = set_power_wells(power_domains, bdw_power_wells); 4701 } else if (IS_HASWELL(dev_priv)) { 4702 err = set_power_wells(power_domains, hsw_power_wells); 4703 } else if (IS_VALLEYVIEW(dev_priv)) { 4704 err = set_power_wells(power_domains, vlv_power_wells); 4705 } else if (IS_I830(dev_priv)) { 4706 err = set_power_wells(power_domains, i830_power_wells); 4707 } else { 4708 err = set_power_wells(power_domains, i9xx_always_on_power_well); 4709 } 4710 4711 return err; 4712 } 4713 4714 /** 4715 * intel_power_domains_cleanup - clean up power domains resources 4716 * @dev_priv: i915 device instance 4717 * 4718 * Release any resources acquired by intel_power_domains_init() 4719 */ 4720 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 4721 { 4722 kfree(dev_priv->power_domains.power_wells); 4723 } 4724 4725 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 4726 { 4727 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4728 struct i915_power_well *power_well; 4729 4730 mutex_lock(&power_domains->lock); 4731 for_each_power_well(dev_priv, power_well) { 4732 power_well->desc->ops->sync_hw(dev_priv, power_well); 4733 power_well->hw_enabled = 4734 power_well->desc->ops->is_enabled(dev_priv, power_well); 4735 } 4736 mutex_unlock(&power_domains->lock); 4737 } 4738 4739 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, 4740 enum dbuf_slice slice, bool enable) 4741 { 4742 i915_reg_t reg = DBUF_CTL_S(slice); 4743 bool state; 4744 u32 val; 4745 4746 val = intel_de_read(dev_priv, reg); 4747 if (enable) 4748 val |= DBUF_POWER_REQUEST; 4749 else 4750 val &= ~DBUF_POWER_REQUEST; 4751 intel_de_write(dev_priv, reg, val); 4752 intel_de_posting_read(dev_priv, reg); 4753 udelay(10); 4754 4755 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; 4756 drm_WARN(&dev_priv->drm, enable != state, 4757 "DBuf slice %d power %s timeout!\n", 4758 slice, enable ? "enable" : "disable"); 4759 } 4760 4761 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, 4762 u8 req_slices) 4763 { 4764 int num_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices; 4765 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4766 enum dbuf_slice slice; 4767 4768 drm_WARN(&dev_priv->drm, req_slices & ~(BIT(num_slices) - 1), 4769 "Invalid set of dbuf slices (0x%x) requested (num dbuf slices %d)\n", 4770 req_slices, num_slices); 4771 4772 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", 4773 req_slices); 4774 4775 /* 4776 * Might be running this in parallel to gen9_dc_off_power_well_enable 4777 * being called from intel_dp_detect for instance, 4778 * which causes assertion triggered by race condition, 4779 * as gen9_assert_dbuf_enabled might preempt this when registers 4780 * were already updated, while dev_priv was not. 4781 */ 4782 mutex_lock(&power_domains->lock); 4783 4784 for (slice = DBUF_S1; slice < num_slices; slice++) 4785 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); 4786 4787 dev_priv->dbuf.enabled_slices = req_slices; 4788 4789 mutex_unlock(&power_domains->lock); 4790 } 4791 4792 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 4793 { 4794 dev_priv->dbuf.enabled_slices = 4795 intel_enabled_dbuf_slices_mask(dev_priv); 4796 4797 /* 4798 * Just power up at least 1 slice, we will 4799 * figure out later which slices we have and what we need. 4800 */ 4801 gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | 4802 dev_priv->dbuf.enabled_slices); 4803 } 4804 4805 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 4806 { 4807 gen9_dbuf_slices_update(dev_priv, 0); 4808 } 4809 4810 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) 4811 { 4812 const int num_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices; 4813 enum dbuf_slice slice; 4814 4815 for (slice = DBUF_S1; slice < (DBUF_S1 + num_slices); slice++) 4816 intel_de_rmw(dev_priv, DBUF_CTL_S(slice), 4817 DBUF_TRACKER_STATE_SERVICE_MASK, 4818 DBUF_TRACKER_STATE_SERVICE(8)); 4819 } 4820 4821 static void icl_mbus_init(struct drm_i915_private *dev_priv) 4822 { 4823 unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask; 4824 u32 mask, val, i; 4825 4826 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | 4827 MBUS_ABOX_BT_CREDIT_POOL2_MASK | 4828 MBUS_ABOX_B_CREDIT_MASK | 4829 MBUS_ABOX_BW_CREDIT_MASK; 4830 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 4831 MBUS_ABOX_BT_CREDIT_POOL2(16) | 4832 MBUS_ABOX_B_CREDIT(1) | 4833 MBUS_ABOX_BW_CREDIT(1); 4834 4835 /* 4836 * gen12 platforms that use abox1 and abox2 for pixel data reads still 4837 * expect us to program the abox_ctl0 register as well, even though 4838 * we don't have to program other instance-0 registers like BW_BUDDY. 4839 */ 4840 if (IS_DISPLAY_VER(dev_priv, 12)) 4841 abox_regs |= BIT(0); 4842 4843 for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) 4844 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val); 4845 } 4846 4847 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 4848 { 4849 u32 val = intel_de_read(dev_priv, LCPLL_CTL); 4850 4851 /* 4852 * The LCPLL register should be turned on by the BIOS. For now 4853 * let's just check its state and print errors in case 4854 * something is wrong. Don't even try to turn it on. 4855 */ 4856 4857 if (val & LCPLL_CD_SOURCE_FCLK) 4858 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); 4859 4860 if (val & LCPLL_PLL_DISABLE) 4861 drm_err(&dev_priv->drm, "LCPLL is disabled\n"); 4862 4863 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 4864 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); 4865 } 4866 4867 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 4868 { 4869 struct drm_device *dev = &dev_priv->drm; 4870 struct intel_crtc *crtc; 4871 4872 for_each_intel_crtc(dev, crtc) 4873 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 4874 pipe_name(crtc->pipe)); 4875 4876 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), 4877 "Display power well on\n"); 4878 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, 4879 "SPLL enabled\n"); 4880 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 4881 "WRPLL1 enabled\n"); 4882 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 4883 "WRPLL2 enabled\n"); 4884 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, 4885 "Panel power on\n"); 4886 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 4887 "CPU PWM1 enabled\n"); 4888 if (IS_HASWELL(dev_priv)) 4889 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 4890 "CPU PWM2 enabled\n"); 4891 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 4892 "PCH PWM1 enabled\n"); 4893 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 4894 "Utility pin enabled\n"); 4895 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, 4896 "PCH GTC enabled\n"); 4897 4898 /* 4899 * In theory we can still leave IRQs enabled, as long as only the HPD 4900 * interrupts remain enabled. We used to check for that, but since it's 4901 * gen-specific and since we only disable LCPLL after we fully disable 4902 * the interrupts, the check below should be enough. 4903 */ 4904 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 4905 } 4906 4907 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 4908 { 4909 if (IS_HASWELL(dev_priv)) 4910 return intel_de_read(dev_priv, D_COMP_HSW); 4911 else 4912 return intel_de_read(dev_priv, D_COMP_BDW); 4913 } 4914 4915 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 4916 { 4917 if (IS_HASWELL(dev_priv)) { 4918 if (sandybridge_pcode_write(dev_priv, 4919 GEN6_PCODE_WRITE_D_COMP, val)) 4920 drm_dbg_kms(&dev_priv->drm, 4921 "Failed to write to D_COMP\n"); 4922 } else { 4923 intel_de_write(dev_priv, D_COMP_BDW, val); 4924 intel_de_posting_read(dev_priv, D_COMP_BDW); 4925 } 4926 } 4927 4928 /* 4929 * This function implements pieces of two sequences from BSpec: 4930 * - Sequence for display software to disable LCPLL 4931 * - Sequence for display software to allow package C8+ 4932 * The steps implemented here are just the steps that actually touch the LCPLL 4933 * register. Callers should take care of disabling all the display engine 4934 * functions, doing the mode unset, fixing interrupts, etc. 4935 */ 4936 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 4937 bool switch_to_fclk, bool allow_power_down) 4938 { 4939 u32 val; 4940 4941 assert_can_disable_lcpll(dev_priv); 4942 4943 val = intel_de_read(dev_priv, LCPLL_CTL); 4944 4945 if (switch_to_fclk) { 4946 val |= LCPLL_CD_SOURCE_FCLK; 4947 intel_de_write(dev_priv, LCPLL_CTL, val); 4948 4949 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & 4950 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 4951 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); 4952 4953 val = intel_de_read(dev_priv, LCPLL_CTL); 4954 } 4955 4956 val |= LCPLL_PLL_DISABLE; 4957 intel_de_write(dev_priv, LCPLL_CTL, val); 4958 intel_de_posting_read(dev_priv, LCPLL_CTL); 4959 4960 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 4961 drm_err(&dev_priv->drm, "LCPLL still locked\n"); 4962 4963 val = hsw_read_dcomp(dev_priv); 4964 val |= D_COMP_COMP_DISABLE; 4965 hsw_write_dcomp(dev_priv, val); 4966 ndelay(100); 4967 4968 if (wait_for((hsw_read_dcomp(dev_priv) & 4969 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 4970 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); 4971 4972 if (allow_power_down) { 4973 val = intel_de_read(dev_priv, LCPLL_CTL); 4974 val |= LCPLL_POWER_DOWN_ALLOW; 4975 intel_de_write(dev_priv, LCPLL_CTL, val); 4976 intel_de_posting_read(dev_priv, LCPLL_CTL); 4977 } 4978 } 4979 4980 /* 4981 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 4982 * source. 4983 */ 4984 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 4985 { 4986 u32 val; 4987 4988 val = intel_de_read(dev_priv, LCPLL_CTL); 4989 4990 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 4991 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 4992 return; 4993 4994 /* 4995 * Make sure we're not on PC8 state before disabling PC8, otherwise 4996 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 4997 */ 4998 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 4999 5000 if (val & LCPLL_POWER_DOWN_ALLOW) { 5001 val &= ~LCPLL_POWER_DOWN_ALLOW; 5002 intel_de_write(dev_priv, LCPLL_CTL, val); 5003 intel_de_posting_read(dev_priv, LCPLL_CTL); 5004 } 5005 5006 val = hsw_read_dcomp(dev_priv); 5007 val |= D_COMP_COMP_FORCE; 5008 val &= ~D_COMP_COMP_DISABLE; 5009 hsw_write_dcomp(dev_priv, val); 5010 5011 val = intel_de_read(dev_priv, LCPLL_CTL); 5012 val &= ~LCPLL_PLL_DISABLE; 5013 intel_de_write(dev_priv, LCPLL_CTL, val); 5014 5015 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 5016 drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); 5017 5018 if (val & LCPLL_CD_SOURCE_FCLK) { 5019 val = intel_de_read(dev_priv, LCPLL_CTL); 5020 val &= ~LCPLL_CD_SOURCE_FCLK; 5021 intel_de_write(dev_priv, LCPLL_CTL, val); 5022 5023 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & 5024 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 5025 drm_err(&dev_priv->drm, 5026 "Switching back to LCPLL failed\n"); 5027 } 5028 5029 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 5030 5031 intel_update_cdclk(dev_priv); 5032 intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); 5033 } 5034 5035 /* 5036 * Package states C8 and deeper are really deep PC states that can only be 5037 * reached when all the devices on the system allow it, so even if the graphics 5038 * device allows PC8+, it doesn't mean the system will actually get to these 5039 * states. Our driver only allows PC8+ when going into runtime PM. 5040 * 5041 * The requirements for PC8+ are that all the outputs are disabled, the power 5042 * well is disabled and most interrupts are disabled, and these are also 5043 * requirements for runtime PM. When these conditions are met, we manually do 5044 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 5045 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 5046 * hang the machine. 5047 * 5048 * When we really reach PC8 or deeper states (not just when we allow it) we lose 5049 * the state of some registers, so when we come back from PC8+ we need to 5050 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 5051 * need to take care of the registers kept by RC6. Notice that this happens even 5052 * if we don't put the device in PCI D3 state (which is what currently happens 5053 * because of the runtime PM support). 5054 * 5055 * For more, read "Display Sequences for Package C8" on the hardware 5056 * documentation. 5057 */ 5058 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 5059 { 5060 u32 val; 5061 5062 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); 5063 5064 if (HAS_PCH_LPT_LP(dev_priv)) { 5065 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5066 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 5067 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5068 } 5069 5070 lpt_disable_clkout_dp(dev_priv); 5071 hsw_disable_lcpll(dev_priv, true, true); 5072 } 5073 5074 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 5075 { 5076 u32 val; 5077 5078 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); 5079 5080 hsw_restore_lcpll(dev_priv); 5081 intel_init_pch_refclk(dev_priv); 5082 5083 if (HAS_PCH_LPT_LP(dev_priv)) { 5084 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5085 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 5086 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5087 } 5088 } 5089 5090 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 5091 bool enable) 5092 { 5093 i915_reg_t reg; 5094 u32 reset_bits, val; 5095 5096 if (IS_IVYBRIDGE(dev_priv)) { 5097 reg = GEN7_MSG_CTL; 5098 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 5099 } else { 5100 reg = HSW_NDE_RSTWRN_OPT; 5101 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 5102 } 5103 5104 val = intel_de_read(dev_priv, reg); 5105 5106 if (enable) 5107 val |= reset_bits; 5108 else 5109 val &= ~reset_bits; 5110 5111 intel_de_write(dev_priv, reg, val); 5112 } 5113 5114 static void skl_display_core_init(struct drm_i915_private *dev_priv, 5115 bool resume) 5116 { 5117 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5118 struct i915_power_well *well; 5119 5120 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5121 5122 /* enable PCH reset handshake */ 5123 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5124 5125 /* enable PG1 and Misc I/O */ 5126 mutex_lock(&power_domains->lock); 5127 5128 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5129 intel_power_well_enable(dev_priv, well); 5130 5131 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 5132 intel_power_well_enable(dev_priv, well); 5133 5134 mutex_unlock(&power_domains->lock); 5135 5136 intel_cdclk_init_hw(dev_priv); 5137 5138 gen9_dbuf_enable(dev_priv); 5139 5140 if (resume && dev_priv->csr.dmc_payload) 5141 intel_csr_load_program(dev_priv); 5142 } 5143 5144 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 5145 { 5146 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5147 struct i915_power_well *well; 5148 5149 gen9_disable_dc_states(dev_priv); 5150 5151 gen9_dbuf_disable(dev_priv); 5152 5153 intel_cdclk_uninit_hw(dev_priv); 5154 5155 /* The spec doesn't call for removing the reset handshake flag */ 5156 /* disable PG1 and Misc I/O */ 5157 5158 mutex_lock(&power_domains->lock); 5159 5160 /* 5161 * BSpec says to keep the MISC IO power well enabled here, only 5162 * remove our request for power well 1. 5163 * Note that even though the driver's request is removed power well 1 5164 * may stay enabled after this due to DMC's own request on it. 5165 */ 5166 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5167 intel_power_well_disable(dev_priv, well); 5168 5169 mutex_unlock(&power_domains->lock); 5170 5171 usleep_range(10, 30); /* 10 us delay per Bspec */ 5172 } 5173 5174 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 5175 { 5176 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5177 struct i915_power_well *well; 5178 5179 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5180 5181 /* 5182 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 5183 * or else the reset will hang because there is no PCH to respond. 5184 * Move the handshake programming to initialization sequence. 5185 * Previously was left up to BIOS. 5186 */ 5187 intel_pch_reset_handshake(dev_priv, false); 5188 5189 /* Enable PG1 */ 5190 mutex_lock(&power_domains->lock); 5191 5192 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5193 intel_power_well_enable(dev_priv, well); 5194 5195 mutex_unlock(&power_domains->lock); 5196 5197 intel_cdclk_init_hw(dev_priv); 5198 5199 gen9_dbuf_enable(dev_priv); 5200 5201 if (resume && dev_priv->csr.dmc_payload) 5202 intel_csr_load_program(dev_priv); 5203 } 5204 5205 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 5206 { 5207 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5208 struct i915_power_well *well; 5209 5210 gen9_disable_dc_states(dev_priv); 5211 5212 gen9_dbuf_disable(dev_priv); 5213 5214 intel_cdclk_uninit_hw(dev_priv); 5215 5216 /* The spec doesn't call for removing the reset handshake flag */ 5217 5218 /* 5219 * Disable PW1 (PG1). 5220 * Note that even though the driver's request is removed power well 1 5221 * may stay enabled after this due to DMC's own request on it. 5222 */ 5223 mutex_lock(&power_domains->lock); 5224 5225 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5226 intel_power_well_disable(dev_priv, well); 5227 5228 mutex_unlock(&power_domains->lock); 5229 5230 usleep_range(10, 30); /* 10 us delay per Bspec */ 5231 } 5232 5233 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) 5234 { 5235 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5236 struct i915_power_well *well; 5237 5238 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5239 5240 /* 1. Enable PCH Reset Handshake */ 5241 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5242 5243 /* 2-3. */ 5244 intel_combo_phy_init(dev_priv); 5245 5246 /* 5247 * 4. Enable Power Well 1 (PG1). 5248 * The AUX IO power wells will be enabled on demand. 5249 */ 5250 mutex_lock(&power_domains->lock); 5251 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5252 intel_power_well_enable(dev_priv, well); 5253 mutex_unlock(&power_domains->lock); 5254 5255 /* 5. Enable CD clock */ 5256 intel_cdclk_init_hw(dev_priv); 5257 5258 /* 6. Enable DBUF */ 5259 gen9_dbuf_enable(dev_priv); 5260 5261 if (resume && dev_priv->csr.dmc_payload) 5262 intel_csr_load_program(dev_priv); 5263 } 5264 5265 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) 5266 { 5267 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5268 struct i915_power_well *well; 5269 5270 gen9_disable_dc_states(dev_priv); 5271 5272 /* 1. Disable all display engine functions -> aready done */ 5273 5274 /* 2. Disable DBUF */ 5275 gen9_dbuf_disable(dev_priv); 5276 5277 /* 3. Disable CD clock */ 5278 intel_cdclk_uninit_hw(dev_priv); 5279 5280 /* 5281 * 4. Disable Power Well 1 (PG1). 5282 * The AUX IO power wells are toggled on demand, so they are already 5283 * disabled at this point. 5284 */ 5285 mutex_lock(&power_domains->lock); 5286 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5287 intel_power_well_disable(dev_priv, well); 5288 mutex_unlock(&power_domains->lock); 5289 5290 usleep_range(10, 30); /* 10 us delay per Bspec */ 5291 5292 /* 5. */ 5293 intel_combo_phy_uninit(dev_priv); 5294 } 5295 5296 struct buddy_page_mask { 5297 u32 page_mask; 5298 u8 type; 5299 u8 num_channels; 5300 }; 5301 5302 static const struct buddy_page_mask tgl_buddy_page_masks[] = { 5303 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, 5304 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF }, 5305 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, 5306 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C }, 5307 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, 5308 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E }, 5309 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, 5310 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 }, 5311 {} 5312 }; 5313 5314 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { 5315 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, 5316 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, 5317 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 }, 5318 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 }, 5319 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, 5320 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, 5321 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 }, 5322 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 }, 5323 {} 5324 }; 5325 5326 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) 5327 { 5328 enum intel_dram_type type = dev_priv->dram_info.type; 5329 u8 num_channels = dev_priv->dram_info.num_channels; 5330 const struct buddy_page_mask *table; 5331 unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask; 5332 int config, i; 5333 5334 if (IS_ALDERLAKE_S(dev_priv) || 5335 IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) || 5336 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) 5337 /* Wa_1409767108:tgl,dg1,adl-s */ 5338 table = wa_1409767108_buddy_page_masks; 5339 else 5340 table = tgl_buddy_page_masks; 5341 5342 for (config = 0; table[config].page_mask != 0; config++) 5343 if (table[config].num_channels == num_channels && 5344 table[config].type == type) 5345 break; 5346 5347 if (table[config].page_mask == 0) { 5348 drm_dbg(&dev_priv->drm, 5349 "Unknown memory configuration; disabling address buddy logic.\n"); 5350 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) 5351 intel_de_write(dev_priv, BW_BUDDY_CTL(i), 5352 BW_BUDDY_DISABLE); 5353 } else { 5354 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { 5355 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i), 5356 table[config].page_mask); 5357 5358 /* Wa_22010178259:tgl,rkl */ 5359 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i), 5360 BW_BUDDY_TLB_REQ_TIMER_MASK, 5361 BW_BUDDY_TLB_REQ_TIMER(0x8)); 5362 } 5363 } 5364 } 5365 5366 static void icl_display_core_init(struct drm_i915_private *dev_priv, 5367 bool resume) 5368 { 5369 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5370 struct i915_power_well *well; 5371 u32 val; 5372 5373 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5374 5375 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ 5376 if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP && 5377 INTEL_PCH_TYPE(dev_priv) < PCH_DG1) 5378 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0, 5379 PCH_DPMGUNIT_CLOCK_GATE_DISABLE); 5380 5381 /* 1. Enable PCH reset handshake. */ 5382 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5383 5384 /* 2. Initialize all combo phys */ 5385 intel_combo_phy_init(dev_priv); 5386 5387 /* 5388 * 3. Enable Power Well 1 (PG1). 5389 * The AUX IO power wells will be enabled on demand. 5390 */ 5391 mutex_lock(&power_domains->lock); 5392 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5393 intel_power_well_enable(dev_priv, well); 5394 mutex_unlock(&power_domains->lock); 5395 5396 /* 4. Enable CDCLK. */ 5397 intel_cdclk_init_hw(dev_priv); 5398 5399 if (DISPLAY_VER(dev_priv) >= 12) 5400 gen12_dbuf_slices_config(dev_priv); 5401 5402 /* 5. Enable DBUF. */ 5403 gen9_dbuf_enable(dev_priv); 5404 5405 /* 6. Setup MBUS. */ 5406 icl_mbus_init(dev_priv); 5407 5408 /* 7. Program arbiter BW_BUDDY registers */ 5409 if (DISPLAY_VER(dev_priv) >= 12) 5410 tgl_bw_buddy_init(dev_priv); 5411 5412 if (resume && dev_priv->csr.dmc_payload) 5413 intel_csr_load_program(dev_priv); 5414 5415 /* Wa_14011508470 */ 5416 if (IS_DISPLAY_VER(dev_priv, 12)) { 5417 val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | 5418 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR; 5419 intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val); 5420 } 5421 } 5422 5423 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 5424 { 5425 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5426 struct i915_power_well *well; 5427 5428 gen9_disable_dc_states(dev_priv); 5429 5430 /* 1. Disable all display engine functions -> aready done */ 5431 5432 /* 2. Disable DBUF */ 5433 gen9_dbuf_disable(dev_priv); 5434 5435 /* 3. Disable CD clock */ 5436 intel_cdclk_uninit_hw(dev_priv); 5437 5438 /* 5439 * 4. Disable Power Well 1 (PG1). 5440 * The AUX IO power wells are toggled on demand, so they are already 5441 * disabled at this point. 5442 */ 5443 mutex_lock(&power_domains->lock); 5444 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5445 intel_power_well_disable(dev_priv, well); 5446 mutex_unlock(&power_domains->lock); 5447 5448 /* 5. */ 5449 intel_combo_phy_uninit(dev_priv); 5450 } 5451 5452 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 5453 { 5454 struct i915_power_well *cmn_bc = 5455 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5456 struct i915_power_well *cmn_d = 5457 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 5458 5459 /* 5460 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 5461 * workaround never ever read DISPLAY_PHY_CONTROL, and 5462 * instead maintain a shadow copy ourselves. Use the actual 5463 * power well state and lane status to reconstruct the 5464 * expected initial value. 5465 */ 5466 dev_priv->chv_phy_control = 5467 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 5468 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 5469 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 5470 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 5471 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 5472 5473 /* 5474 * If all lanes are disabled we leave the override disabled 5475 * with all power down bits cleared to match the state we 5476 * would use after disabling the port. Otherwise enable the 5477 * override and set the lane powerdown bits accding to the 5478 * current lane status. 5479 */ 5480 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 5481 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)); 5482 unsigned int mask; 5483 5484 mask = status & DPLL_PORTB_READY_MASK; 5485 if (mask == 0xf) 5486 mask = 0x0; 5487 else 5488 dev_priv->chv_phy_control |= 5489 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 5490 5491 dev_priv->chv_phy_control |= 5492 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 5493 5494 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 5495 if (mask == 0xf) 5496 mask = 0x0; 5497 else 5498 dev_priv->chv_phy_control |= 5499 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 5500 5501 dev_priv->chv_phy_control |= 5502 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 5503 5504 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 5505 5506 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 5507 } else { 5508 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 5509 } 5510 5511 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 5512 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS); 5513 unsigned int mask; 5514 5515 mask = status & DPLL_PORTD_READY_MASK; 5516 5517 if (mask == 0xf) 5518 mask = 0x0; 5519 else 5520 dev_priv->chv_phy_control |= 5521 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 5522 5523 dev_priv->chv_phy_control |= 5524 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 5525 5526 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 5527 5528 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 5529 } else { 5530 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 5531 } 5532 5533 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", 5534 dev_priv->chv_phy_control); 5535 5536 /* Defer application of initial phy_control to enabling the powerwell */ 5537 } 5538 5539 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 5540 { 5541 struct i915_power_well *cmn = 5542 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5543 struct i915_power_well *disp2d = 5544 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 5545 5546 /* If the display might be already active skip this */ 5547 if (cmn->desc->ops->is_enabled(dev_priv, cmn) && 5548 disp2d->desc->ops->is_enabled(dev_priv, disp2d) && 5549 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST) 5550 return; 5551 5552 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); 5553 5554 /* cmnlane needs DPLL registers */ 5555 disp2d->desc->ops->enable(dev_priv, disp2d); 5556 5557 /* 5558 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 5559 * Need to assert and de-assert PHY SB reset by gating the 5560 * common lane power, then un-gating it. 5561 * Simply ungating isn't enough to reset the PHY enough to get 5562 * ports and lanes running. 5563 */ 5564 cmn->desc->ops->disable(dev_priv, cmn); 5565 } 5566 5567 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 5568 { 5569 bool ret; 5570 5571 vlv_punit_get(dev_priv); 5572 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 5573 vlv_punit_put(dev_priv); 5574 5575 return ret; 5576 } 5577 5578 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 5579 { 5580 drm_WARN(&dev_priv->drm, 5581 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 5582 "VED not power gated\n"); 5583 } 5584 5585 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 5586 { 5587 static const struct pci_device_id isp_ids[] = { 5588 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 5589 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 5590 {} 5591 }; 5592 5593 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && 5594 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 5595 "ISP not power gated\n"); 5596 } 5597 5598 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 5599 5600 /** 5601 * intel_power_domains_init_hw - initialize hardware power domain state 5602 * @i915: i915 device instance 5603 * @resume: Called from resume code paths or not 5604 * 5605 * This function initializes the hardware power domain state and enables all 5606 * power wells belonging to the INIT power domain. Power wells in other 5607 * domains (and not in the INIT domain) are referenced or disabled by 5608 * intel_modeset_readout_hw_state(). After that the reference count of each 5609 * power well must match its HW enabled state, see 5610 * intel_power_domains_verify_state(). 5611 * 5612 * It will return with power domains disabled (to be enabled later by 5613 * intel_power_domains_enable()) and must be paired with 5614 * intel_power_domains_driver_remove(). 5615 */ 5616 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 5617 { 5618 struct i915_power_domains *power_domains = &i915->power_domains; 5619 5620 power_domains->initializing = true; 5621 5622 if (DISPLAY_VER(i915) >= 11) { 5623 icl_display_core_init(i915, resume); 5624 } else if (IS_CANNONLAKE(i915)) { 5625 cnl_display_core_init(i915, resume); 5626 } else if (IS_GEN9_BC(i915)) { 5627 skl_display_core_init(i915, resume); 5628 } else if (IS_GEN9_LP(i915)) { 5629 bxt_display_core_init(i915, resume); 5630 } else if (IS_CHERRYVIEW(i915)) { 5631 mutex_lock(&power_domains->lock); 5632 chv_phy_control_init(i915); 5633 mutex_unlock(&power_domains->lock); 5634 assert_isp_power_gated(i915); 5635 } else if (IS_VALLEYVIEW(i915)) { 5636 mutex_lock(&power_domains->lock); 5637 vlv_cmnlane_wa(i915); 5638 mutex_unlock(&power_domains->lock); 5639 assert_ved_power_gated(i915); 5640 assert_isp_power_gated(i915); 5641 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 5642 hsw_assert_cdclk(i915); 5643 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 5644 } else if (IS_IVYBRIDGE(i915)) { 5645 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 5646 } 5647 5648 /* 5649 * Keep all power wells enabled for any dependent HW access during 5650 * initialization and to make sure we keep BIOS enabled display HW 5651 * resources powered until display HW readout is complete. We drop 5652 * this reference in intel_power_domains_enable(). 5653 */ 5654 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 5655 power_domains->init_wakeref = 5656 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5657 5658 /* Disable power support if the user asked so. */ 5659 if (!i915->params.disable_power_well) { 5660 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); 5661 i915->power_domains.disable_wakeref = intel_display_power_get(i915, 5662 POWER_DOMAIN_INIT); 5663 } 5664 intel_power_domains_sync_hw(i915); 5665 5666 power_domains->initializing = false; 5667 } 5668 5669 /** 5670 * intel_power_domains_driver_remove - deinitialize hw power domain state 5671 * @i915: i915 device instance 5672 * 5673 * De-initializes the display power domain HW state. It also ensures that the 5674 * device stays powered up so that the driver can be reloaded. 5675 * 5676 * It must be called with power domains already disabled (after a call to 5677 * intel_power_domains_disable()) and must be paired with 5678 * intel_power_domains_init_hw(). 5679 */ 5680 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 5681 { 5682 intel_wakeref_t wakeref __maybe_unused = 5683 fetch_and_zero(&i915->power_domains.init_wakeref); 5684 5685 /* Remove the refcount we took to keep power well support disabled. */ 5686 if (!i915->params.disable_power_well) 5687 intel_display_power_put(i915, POWER_DOMAIN_INIT, 5688 fetch_and_zero(&i915->power_domains.disable_wakeref)); 5689 5690 intel_display_power_flush_work_sync(i915); 5691 5692 intel_power_domains_verify_state(i915); 5693 5694 /* Keep the power well enabled, but cancel its rpm wakeref. */ 5695 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 5696 } 5697 5698 /** 5699 * intel_power_domains_enable - enable toggling of display power wells 5700 * @i915: i915 device instance 5701 * 5702 * Enable the ondemand enabling/disabling of the display power wells. Note that 5703 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 5704 * only at specific points of the display modeset sequence, thus they are not 5705 * affected by the intel_power_domains_enable()/disable() calls. The purpose 5706 * of these function is to keep the rest of power wells enabled until the end 5707 * of display HW readout (which will acquire the power references reflecting 5708 * the current HW state). 5709 */ 5710 void intel_power_domains_enable(struct drm_i915_private *i915) 5711 { 5712 intel_wakeref_t wakeref __maybe_unused = 5713 fetch_and_zero(&i915->power_domains.init_wakeref); 5714 5715 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 5716 intel_power_domains_verify_state(i915); 5717 } 5718 5719 /** 5720 * intel_power_domains_disable - disable toggling of display power wells 5721 * @i915: i915 device instance 5722 * 5723 * Disable the ondemand enabling/disabling of the display power wells. See 5724 * intel_power_domains_enable() for which power wells this call controls. 5725 */ 5726 void intel_power_domains_disable(struct drm_i915_private *i915) 5727 { 5728 struct i915_power_domains *power_domains = &i915->power_domains; 5729 5730 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 5731 power_domains->init_wakeref = 5732 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5733 5734 intel_power_domains_verify_state(i915); 5735 } 5736 5737 /** 5738 * intel_power_domains_suspend - suspend power domain state 5739 * @i915: i915 device instance 5740 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 5741 * 5742 * This function prepares the hardware power domain state before entering 5743 * system suspend. 5744 * 5745 * It must be called with power domains already disabled (after a call to 5746 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 5747 */ 5748 void intel_power_domains_suspend(struct drm_i915_private *i915, 5749 enum i915_drm_suspend_mode suspend_mode) 5750 { 5751 struct i915_power_domains *power_domains = &i915->power_domains; 5752 intel_wakeref_t wakeref __maybe_unused = 5753 fetch_and_zero(&power_domains->init_wakeref); 5754 5755 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 5756 5757 /* 5758 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 5759 * support don't manually deinit the power domains. This also means the 5760 * CSR/DMC firmware will stay active, it will power down any HW 5761 * resources as required and also enable deeper system power states 5762 * that would be blocked if the firmware was inactive. 5763 */ 5764 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) && 5765 suspend_mode == I915_DRM_SUSPEND_IDLE && 5766 i915->csr.dmc_payload) { 5767 intel_display_power_flush_work(i915); 5768 intel_power_domains_verify_state(i915); 5769 return; 5770 } 5771 5772 /* 5773 * Even if power well support was disabled we still want to disable 5774 * power wells if power domains must be deinitialized for suspend. 5775 */ 5776 if (!i915->params.disable_power_well) 5777 intel_display_power_put(i915, POWER_DOMAIN_INIT, 5778 fetch_and_zero(&i915->power_domains.disable_wakeref)); 5779 5780 intel_display_power_flush_work(i915); 5781 intel_power_domains_verify_state(i915); 5782 5783 if (DISPLAY_VER(i915) >= 11) 5784 icl_display_core_uninit(i915); 5785 else if (IS_CANNONLAKE(i915)) 5786 cnl_display_core_uninit(i915); 5787 else if (IS_GEN9_BC(i915)) 5788 skl_display_core_uninit(i915); 5789 else if (IS_GEN9_LP(i915)) 5790 bxt_display_core_uninit(i915); 5791 5792 power_domains->display_core_suspended = true; 5793 } 5794 5795 /** 5796 * intel_power_domains_resume - resume power domain state 5797 * @i915: i915 device instance 5798 * 5799 * This function resume the hardware power domain state during system resume. 5800 * 5801 * It will return with power domain support disabled (to be enabled later by 5802 * intel_power_domains_enable()) and must be paired with 5803 * intel_power_domains_suspend(). 5804 */ 5805 void intel_power_domains_resume(struct drm_i915_private *i915) 5806 { 5807 struct i915_power_domains *power_domains = &i915->power_domains; 5808 5809 if (power_domains->display_core_suspended) { 5810 intel_power_domains_init_hw(i915, true); 5811 power_domains->display_core_suspended = false; 5812 } else { 5813 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 5814 power_domains->init_wakeref = 5815 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5816 } 5817 5818 intel_power_domains_verify_state(i915); 5819 } 5820 5821 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 5822 5823 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 5824 { 5825 struct i915_power_domains *power_domains = &i915->power_domains; 5826 struct i915_power_well *power_well; 5827 5828 for_each_power_well(i915, power_well) { 5829 enum intel_display_power_domain domain; 5830 5831 drm_dbg(&i915->drm, "%-25s %d\n", 5832 power_well->desc->name, power_well->count); 5833 5834 for_each_power_domain(domain, power_well->desc->domains) 5835 drm_dbg(&i915->drm, " %-23s %d\n", 5836 intel_display_power_domain_str(domain), 5837 power_domains->domain_use_count[domain]); 5838 } 5839 } 5840 5841 /** 5842 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 5843 * @i915: i915 device instance 5844 * 5845 * Verify if the reference count of each power well matches its HW enabled 5846 * state and the total refcount of the domains it belongs to. This must be 5847 * called after modeset HW state sanitization, which is responsible for 5848 * acquiring reference counts for any power wells in use and disabling the 5849 * ones left on by BIOS but not required by any active output. 5850 */ 5851 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 5852 { 5853 struct i915_power_domains *power_domains = &i915->power_domains; 5854 struct i915_power_well *power_well; 5855 bool dump_domain_info; 5856 5857 mutex_lock(&power_domains->lock); 5858 5859 verify_async_put_domains_state(power_domains); 5860 5861 dump_domain_info = false; 5862 for_each_power_well(i915, power_well) { 5863 enum intel_display_power_domain domain; 5864 int domains_count; 5865 bool enabled; 5866 5867 enabled = power_well->desc->ops->is_enabled(i915, power_well); 5868 if ((power_well->count || power_well->desc->always_on) != 5869 enabled) 5870 drm_err(&i915->drm, 5871 "power well %s state mismatch (refcount %d/enabled %d)", 5872 power_well->desc->name, 5873 power_well->count, enabled); 5874 5875 domains_count = 0; 5876 for_each_power_domain(domain, power_well->desc->domains) 5877 domains_count += power_domains->domain_use_count[domain]; 5878 5879 if (power_well->count != domains_count) { 5880 drm_err(&i915->drm, 5881 "power well %s refcount/domain refcount mismatch " 5882 "(refcount %d/domains refcount %d)\n", 5883 power_well->desc->name, power_well->count, 5884 domains_count); 5885 dump_domain_info = true; 5886 } 5887 } 5888 5889 if (dump_domain_info) { 5890 static bool dumped; 5891 5892 if (!dumped) { 5893 intel_power_domains_dump_info(i915); 5894 dumped = true; 5895 } 5896 } 5897 5898 mutex_unlock(&power_domains->lock); 5899 } 5900 5901 #else 5902 5903 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 5904 { 5905 } 5906 5907 #endif 5908 5909 void intel_display_power_suspend_late(struct drm_i915_private *i915) 5910 { 5911 if (DISPLAY_VER(i915) >= 11 || IS_GEN9_LP(i915)) { 5912 bxt_enable_dc9(i915); 5913 /* Tweaked Wa_14010685332:icp,jsp,mcc */ 5914 if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC) 5915 intel_de_rmw(i915, SOUTH_CHICKEN1, 5916 SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); 5917 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5918 hsw_enable_pc8(i915); 5919 } 5920 } 5921 5922 void intel_display_power_resume_early(struct drm_i915_private *i915) 5923 { 5924 if (DISPLAY_VER(i915) >= 11 || IS_GEN9_LP(i915)) { 5925 gen9_sanitize_dc_state(i915); 5926 bxt_disable_dc9(i915); 5927 /* Tweaked Wa_14010685332:icp,jsp,mcc */ 5928 if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC) 5929 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); 5930 5931 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5932 hsw_disable_pc8(i915); 5933 } 5934 } 5935 5936 void intel_display_power_suspend(struct drm_i915_private *i915) 5937 { 5938 if (DISPLAY_VER(i915) >= 11) { 5939 icl_display_core_uninit(i915); 5940 bxt_enable_dc9(i915); 5941 } else if (IS_GEN9_LP(i915)) { 5942 bxt_display_core_uninit(i915); 5943 bxt_enable_dc9(i915); 5944 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5945 hsw_enable_pc8(i915); 5946 } 5947 } 5948 5949 void intel_display_power_resume(struct drm_i915_private *i915) 5950 { 5951 if (DISPLAY_VER(i915) >= 11) { 5952 bxt_disable_dc9(i915); 5953 icl_display_core_init(i915, true); 5954 if (i915->csr.dmc_payload) { 5955 if (i915->csr.allowed_dc_mask & 5956 DC_STATE_EN_UPTO_DC6) 5957 skl_enable_dc6(i915); 5958 else if (i915->csr.allowed_dc_mask & 5959 DC_STATE_EN_UPTO_DC5) 5960 gen9_enable_dc5(i915); 5961 } 5962 } else if (IS_GEN9_LP(i915)) { 5963 bxt_disable_dc9(i915); 5964 bxt_display_core_init(i915, true); 5965 if (i915->csr.dmc_payload && 5966 (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 5967 gen9_enable_dc5(i915); 5968 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5969 hsw_disable_pc8(i915); 5970 } 5971 } 5972