1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "display/intel_crt.h" 7 #include "display/intel_dp.h" 8 9 #include "i915_drv.h" 10 #include "i915_irq.h" 11 #include "intel_cdclk.h" 12 #include "intel_combo_phy.h" 13 #include "intel_csr.h" 14 #include "intel_display_power.h" 15 #include "intel_display_types.h" 16 #include "intel_dpio_phy.h" 17 #include "intel_hotplug.h" 18 #include "intel_pm.h" 19 #include "intel_sideband.h" 20 #include "intel_tc.h" 21 #include "intel_vga.h" 22 23 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 24 enum i915_power_well_id power_well_id); 25 26 const char * 27 intel_display_power_domain_str(enum intel_display_power_domain domain) 28 { 29 switch (domain) { 30 case POWER_DOMAIN_DISPLAY_CORE: 31 return "DISPLAY_CORE"; 32 case POWER_DOMAIN_PIPE_A: 33 return "PIPE_A"; 34 case POWER_DOMAIN_PIPE_B: 35 return "PIPE_B"; 36 case POWER_DOMAIN_PIPE_C: 37 return "PIPE_C"; 38 case POWER_DOMAIN_PIPE_D: 39 return "PIPE_D"; 40 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 41 return "PIPE_A_PANEL_FITTER"; 42 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 43 return "PIPE_B_PANEL_FITTER"; 44 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 45 return "PIPE_C_PANEL_FITTER"; 46 case POWER_DOMAIN_PIPE_D_PANEL_FITTER: 47 return "PIPE_D_PANEL_FITTER"; 48 case POWER_DOMAIN_TRANSCODER_A: 49 return "TRANSCODER_A"; 50 case POWER_DOMAIN_TRANSCODER_B: 51 return "TRANSCODER_B"; 52 case POWER_DOMAIN_TRANSCODER_C: 53 return "TRANSCODER_C"; 54 case POWER_DOMAIN_TRANSCODER_D: 55 return "TRANSCODER_D"; 56 case POWER_DOMAIN_TRANSCODER_EDP: 57 return "TRANSCODER_EDP"; 58 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 59 return "TRANSCODER_VDSC_PW2"; 60 case POWER_DOMAIN_TRANSCODER_DSI_A: 61 return "TRANSCODER_DSI_A"; 62 case POWER_DOMAIN_TRANSCODER_DSI_C: 63 return "TRANSCODER_DSI_C"; 64 case POWER_DOMAIN_PORT_DDI_A_LANES: 65 return "PORT_DDI_A_LANES"; 66 case POWER_DOMAIN_PORT_DDI_B_LANES: 67 return "PORT_DDI_B_LANES"; 68 case POWER_DOMAIN_PORT_DDI_C_LANES: 69 return "PORT_DDI_C_LANES"; 70 case POWER_DOMAIN_PORT_DDI_D_LANES: 71 return "PORT_DDI_D_LANES"; 72 case POWER_DOMAIN_PORT_DDI_E_LANES: 73 return "PORT_DDI_E_LANES"; 74 case POWER_DOMAIN_PORT_DDI_F_LANES: 75 return "PORT_DDI_F_LANES"; 76 case POWER_DOMAIN_PORT_DDI_G_LANES: 77 return "PORT_DDI_G_LANES"; 78 case POWER_DOMAIN_PORT_DDI_H_LANES: 79 return "PORT_DDI_H_LANES"; 80 case POWER_DOMAIN_PORT_DDI_I_LANES: 81 return "PORT_DDI_I_LANES"; 82 case POWER_DOMAIN_PORT_DDI_A_IO: 83 return "PORT_DDI_A_IO"; 84 case POWER_DOMAIN_PORT_DDI_B_IO: 85 return "PORT_DDI_B_IO"; 86 case POWER_DOMAIN_PORT_DDI_C_IO: 87 return "PORT_DDI_C_IO"; 88 case POWER_DOMAIN_PORT_DDI_D_IO: 89 return "PORT_DDI_D_IO"; 90 case POWER_DOMAIN_PORT_DDI_E_IO: 91 return "PORT_DDI_E_IO"; 92 case POWER_DOMAIN_PORT_DDI_F_IO: 93 return "PORT_DDI_F_IO"; 94 case POWER_DOMAIN_PORT_DDI_G_IO: 95 return "PORT_DDI_G_IO"; 96 case POWER_DOMAIN_PORT_DDI_H_IO: 97 return "PORT_DDI_H_IO"; 98 case POWER_DOMAIN_PORT_DDI_I_IO: 99 return "PORT_DDI_I_IO"; 100 case POWER_DOMAIN_PORT_DSI: 101 return "PORT_DSI"; 102 case POWER_DOMAIN_PORT_CRT: 103 return "PORT_CRT"; 104 case POWER_DOMAIN_PORT_OTHER: 105 return "PORT_OTHER"; 106 case POWER_DOMAIN_VGA: 107 return "VGA"; 108 case POWER_DOMAIN_AUDIO: 109 return "AUDIO"; 110 case POWER_DOMAIN_AUX_A: 111 return "AUX_A"; 112 case POWER_DOMAIN_AUX_B: 113 return "AUX_B"; 114 case POWER_DOMAIN_AUX_C: 115 return "AUX_C"; 116 case POWER_DOMAIN_AUX_D: 117 return "AUX_D"; 118 case POWER_DOMAIN_AUX_E: 119 return "AUX_E"; 120 case POWER_DOMAIN_AUX_F: 121 return "AUX_F"; 122 case POWER_DOMAIN_AUX_G: 123 return "AUX_G"; 124 case POWER_DOMAIN_AUX_H: 125 return "AUX_H"; 126 case POWER_DOMAIN_AUX_I: 127 return "AUX_I"; 128 case POWER_DOMAIN_AUX_IO_A: 129 return "AUX_IO_A"; 130 case POWER_DOMAIN_AUX_C_TBT: 131 return "AUX_C_TBT"; 132 case POWER_DOMAIN_AUX_D_TBT: 133 return "AUX_D_TBT"; 134 case POWER_DOMAIN_AUX_E_TBT: 135 return "AUX_E_TBT"; 136 case POWER_DOMAIN_AUX_F_TBT: 137 return "AUX_F_TBT"; 138 case POWER_DOMAIN_AUX_G_TBT: 139 return "AUX_G_TBT"; 140 case POWER_DOMAIN_AUX_H_TBT: 141 return "AUX_H_TBT"; 142 case POWER_DOMAIN_AUX_I_TBT: 143 return "AUX_I_TBT"; 144 case POWER_DOMAIN_GMBUS: 145 return "GMBUS"; 146 case POWER_DOMAIN_INIT: 147 return "INIT"; 148 case POWER_DOMAIN_MODESET: 149 return "MODESET"; 150 case POWER_DOMAIN_GT_IRQ: 151 return "GT_IRQ"; 152 case POWER_DOMAIN_DPLL_DC_OFF: 153 return "DPLL_DC_OFF"; 154 case POWER_DOMAIN_TC_COLD_OFF: 155 return "TC_COLD_OFF"; 156 default: 157 MISSING_CASE(domain); 158 return "?"; 159 } 160 } 161 162 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 163 struct i915_power_well *power_well) 164 { 165 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name); 166 power_well->desc->ops->enable(dev_priv, power_well); 167 power_well->hw_enabled = true; 168 } 169 170 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 171 struct i915_power_well *power_well) 172 { 173 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name); 174 power_well->hw_enabled = false; 175 power_well->desc->ops->disable(dev_priv, power_well); 176 } 177 178 static void intel_power_well_get(struct drm_i915_private *dev_priv, 179 struct i915_power_well *power_well) 180 { 181 if (!power_well->count++) 182 intel_power_well_enable(dev_priv, power_well); 183 } 184 185 static void intel_power_well_put(struct drm_i915_private *dev_priv, 186 struct i915_power_well *power_well) 187 { 188 drm_WARN(&dev_priv->drm, !power_well->count, 189 "Use count on power well %s is already zero", 190 power_well->desc->name); 191 192 if (!--power_well->count) 193 intel_power_well_disable(dev_priv, power_well); 194 } 195 196 /** 197 * __intel_display_power_is_enabled - unlocked check for a power domain 198 * @dev_priv: i915 device instance 199 * @domain: power domain to check 200 * 201 * This is the unlocked version of intel_display_power_is_enabled() and should 202 * only be used from error capture and recovery code where deadlocks are 203 * possible. 204 * 205 * Returns: 206 * True when the power domain is enabled, false otherwise. 207 */ 208 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 209 enum intel_display_power_domain domain) 210 { 211 struct i915_power_well *power_well; 212 bool is_enabled; 213 214 if (dev_priv->runtime_pm.suspended) 215 return false; 216 217 is_enabled = true; 218 219 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { 220 if (power_well->desc->always_on) 221 continue; 222 223 if (!power_well->hw_enabled) { 224 is_enabled = false; 225 break; 226 } 227 } 228 229 return is_enabled; 230 } 231 232 /** 233 * intel_display_power_is_enabled - check for a power domain 234 * @dev_priv: i915 device instance 235 * @domain: power domain to check 236 * 237 * This function can be used to check the hw power domain state. It is mostly 238 * used in hardware state readout functions. Everywhere else code should rely 239 * upon explicit power domain reference counting to ensure that the hardware 240 * block is powered up before accessing it. 241 * 242 * Callers must hold the relevant modesetting locks to ensure that concurrent 243 * threads can't disable the power well while the caller tries to read a few 244 * registers. 245 * 246 * Returns: 247 * True when the power domain is enabled, false otherwise. 248 */ 249 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 250 enum intel_display_power_domain domain) 251 { 252 struct i915_power_domains *power_domains; 253 bool ret; 254 255 power_domains = &dev_priv->power_domains; 256 257 mutex_lock(&power_domains->lock); 258 ret = __intel_display_power_is_enabled(dev_priv, domain); 259 mutex_unlock(&power_domains->lock); 260 261 return ret; 262 } 263 264 /* 265 * Starting with Haswell, we have a "Power Down Well" that can be turned off 266 * when not needed anymore. We have 4 registers that can request the power well 267 * to be enabled, and it will only be disabled if none of the registers is 268 * requesting it to be enabled. 269 */ 270 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, 271 u8 irq_pipe_mask, bool has_vga) 272 { 273 if (has_vga) 274 intel_vga_reset_io_mem(dev_priv); 275 276 if (irq_pipe_mask) 277 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); 278 } 279 280 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, 281 u8 irq_pipe_mask) 282 { 283 if (irq_pipe_mask) 284 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); 285 } 286 287 #define ICL_AUX_PW_TO_CH(pw_idx) \ 288 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 289 290 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 291 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 292 293 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv, 294 struct i915_power_well *power_well) 295 { 296 int pw_idx = power_well->desc->hsw.idx; 297 298 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 299 ICL_AUX_PW_TO_CH(pw_idx); 300 } 301 302 static struct intel_digital_port * 303 aux_ch_to_digital_port(struct drm_i915_private *dev_priv, 304 enum aux_ch aux_ch) 305 { 306 struct intel_digital_port *dig_port = NULL; 307 struct intel_encoder *encoder; 308 309 for_each_intel_encoder(&dev_priv->drm, encoder) { 310 /* We'll check the MST primary port */ 311 if (encoder->type == INTEL_OUTPUT_DP_MST) 312 continue; 313 314 dig_port = enc_to_dig_port(encoder); 315 if (!dig_port) 316 continue; 317 318 if (dig_port->aux_ch != aux_ch) { 319 dig_port = NULL; 320 continue; 321 } 322 323 break; 324 } 325 326 return dig_port; 327 } 328 329 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, 330 struct i915_power_well *power_well, 331 bool timeout_expected) 332 { 333 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 334 int pw_idx = power_well->desc->hsw.idx; 335 336 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 337 if (intel_de_wait_for_set(dev_priv, regs->driver, 338 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { 339 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", 340 power_well->desc->name); 341 342 drm_WARN_ON(&dev_priv->drm, !timeout_expected); 343 344 } 345 } 346 347 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, 348 const struct i915_power_well_regs *regs, 349 int pw_idx) 350 { 351 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 352 u32 ret; 353 354 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; 355 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; 356 if (regs->kvmr.reg) 357 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; 358 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; 359 360 return ret; 361 } 362 363 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, 364 struct i915_power_well *power_well) 365 { 366 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 367 int pw_idx = power_well->desc->hsw.idx; 368 bool disabled; 369 u32 reqs; 370 371 /* 372 * Bspec doesn't require waiting for PWs to get disabled, but still do 373 * this for paranoia. The known cases where a PW will be forced on: 374 * - a KVMR request on any power well via the KVMR request register 375 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 376 * DEBUG request registers 377 * Skip the wait in case any of the request bits are set and print a 378 * diagnostic message. 379 */ 380 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & 381 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 382 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); 383 if (disabled) 384 return; 385 386 drm_dbg_kms(&dev_priv->drm, 387 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 388 power_well->desc->name, 389 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 390 } 391 392 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, 393 enum skl_power_gate pg) 394 { 395 /* Timeout 5us for PG#0, for other PGs 1us */ 396 drm_WARN_ON(&dev_priv->drm, 397 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, 398 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 399 } 400 401 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 402 struct i915_power_well *power_well) 403 { 404 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 405 int pw_idx = power_well->desc->hsw.idx; 406 u32 val; 407 408 if (power_well->desc->hsw.has_fuses) { 409 enum skl_power_gate pg; 410 411 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 412 SKL_PW_CTL_IDX_TO_PG(pw_idx); 413 /* 414 * For PW1 we have to wait both for the PW0/PG0 fuse state 415 * before enabling the power well and PW1/PG1's own fuse 416 * state after the enabling. For all other power wells with 417 * fuses we only have to wait for that PW/PG's fuse state 418 * after the enabling. 419 */ 420 if (pg == SKL_PG1) 421 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 422 } 423 424 val = intel_de_read(dev_priv, regs->driver); 425 intel_de_write(dev_priv, regs->driver, 426 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 427 428 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 429 430 /* Display WA #1178: cnl */ 431 if (IS_CANNONLAKE(dev_priv) && 432 pw_idx >= GLK_PW_CTL_IDX_AUX_B && 433 pw_idx <= CNL_PW_CTL_IDX_AUX_F) { 434 u32 val; 435 436 val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx)); 437 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS; 438 intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val); 439 } 440 441 if (power_well->desc->hsw.has_fuses) { 442 enum skl_power_gate pg; 443 444 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 445 SKL_PW_CTL_IDX_TO_PG(pw_idx); 446 gen9_wait_for_power_well_fuses(dev_priv, pg); 447 } 448 449 hsw_power_well_post_enable(dev_priv, 450 power_well->desc->hsw.irq_pipe_mask, 451 power_well->desc->hsw.has_vga); 452 } 453 454 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 455 struct i915_power_well *power_well) 456 { 457 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 458 int pw_idx = power_well->desc->hsw.idx; 459 u32 val; 460 461 hsw_power_well_pre_disable(dev_priv, 462 power_well->desc->hsw.irq_pipe_mask); 463 464 val = intel_de_read(dev_priv, regs->driver); 465 intel_de_write(dev_priv, regs->driver, 466 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 467 hsw_wait_for_power_well_disable(dev_priv, power_well); 468 } 469 470 #define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) 471 472 static void 473 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 474 struct i915_power_well *power_well) 475 { 476 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 477 int pw_idx = power_well->desc->hsw.idx; 478 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); 479 u32 val; 480 481 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 482 483 val = intel_de_read(dev_priv, regs->driver); 484 intel_de_write(dev_priv, regs->driver, 485 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 486 487 if (INTEL_GEN(dev_priv) < 12) { 488 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 489 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 490 val | ICL_LANE_ENABLE_AUX); 491 } 492 493 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 494 495 /* Display WA #1178: icl */ 496 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && 497 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { 498 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); 499 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; 500 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); 501 } 502 } 503 504 static void 505 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 506 struct i915_power_well *power_well) 507 { 508 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 509 int pw_idx = power_well->desc->hsw.idx; 510 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); 511 u32 val; 512 513 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 514 515 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 516 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 517 val & ~ICL_LANE_ENABLE_AUX); 518 519 val = intel_de_read(dev_priv, regs->driver); 520 intel_de_write(dev_priv, regs->driver, 521 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 522 523 hsw_wait_for_power_well_disable(dev_priv, power_well); 524 } 525 526 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 527 528 static u64 async_put_domains_mask(struct i915_power_domains *power_domains); 529 530 static int power_well_async_ref_count(struct drm_i915_private *dev_priv, 531 struct i915_power_well *power_well) 532 { 533 int refs = hweight64(power_well->desc->domains & 534 async_put_domains_mask(&dev_priv->power_domains)); 535 536 drm_WARN_ON(&dev_priv->drm, refs > power_well->count); 537 538 return refs; 539 } 540 541 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 542 struct i915_power_well *power_well, 543 struct intel_digital_port *dig_port) 544 { 545 /* Bypass the check if all references are released asynchronously */ 546 if (power_well_async_ref_count(dev_priv, power_well) == 547 power_well->count) 548 return; 549 550 if (drm_WARN_ON(&dev_priv->drm, !dig_port)) 551 return; 552 553 if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) 554 return; 555 556 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); 557 } 558 559 #else 560 561 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 562 struct i915_power_well *power_well, 563 struct intel_digital_port *dig_port) 564 { 565 } 566 567 #endif 568 569 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 570 571 static void icl_tc_cold_exit(struct drm_i915_private *i915) 572 { 573 int ret, tries = 0; 574 575 while (1) { 576 ret = sandybridge_pcode_write_timeout(i915, 577 ICL_PCODE_EXIT_TCCOLD, 578 0, 250, 1); 579 if (ret != -EAGAIN || ++tries == 3) 580 break; 581 msleep(1); 582 } 583 584 /* Spec states that TC cold exit can take up to 1ms to complete */ 585 if (!ret) 586 msleep(1); 587 588 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ 589 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : 590 "succeeded"); 591 } 592 593 static void 594 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 595 struct i915_power_well *power_well) 596 { 597 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 598 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 599 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 600 bool is_tbt = power_well->desc->hsw.is_tc_tbt; 601 bool timeout_expected; 602 u32 val; 603 604 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 605 606 val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); 607 val &= ~DP_AUX_CH_CTL_TBT_IO; 608 if (is_tbt) 609 val |= DP_AUX_CH_CTL_TBT_IO; 610 intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); 611 612 val = intel_de_read(dev_priv, regs->driver); 613 intel_de_write(dev_priv, regs->driver, 614 val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx)); 615 616 /* 617 * An AUX timeout is expected if the TBT DP tunnel is down, 618 * or need to enable AUX on a legacy TypeC port as part of the TC-cold 619 * exit sequence. 620 */ 621 timeout_expected = is_tbt; 622 if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) { 623 icl_tc_cold_exit(dev_priv); 624 timeout_expected = true; 625 } 626 627 hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); 628 629 if (INTEL_GEN(dev_priv) >= 12 && !is_tbt) { 630 enum tc_port tc_port; 631 632 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); 633 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 634 HIP_INDEX_VAL(tc_port, 0x2)); 635 636 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), 637 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 638 drm_warn(&dev_priv->drm, 639 "Timeout waiting TC uC health\n"); 640 } 641 } 642 643 static void 644 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 645 struct i915_power_well *power_well) 646 { 647 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 648 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 649 650 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 651 652 hsw_power_well_disable(dev_priv, power_well); 653 } 654 655 static void 656 icl_aux_power_well_enable(struct drm_i915_private *dev_priv, 657 struct i915_power_well *power_well) 658 { 659 int pw_idx = power_well->desc->hsw.idx; 660 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */ 661 bool is_tbt = power_well->desc->hsw.is_tc_tbt; 662 663 if (is_tbt || intel_phy_is_tc(dev_priv, phy)) 664 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); 665 else if (IS_ICELAKE(dev_priv)) 666 return icl_combo_phy_aux_power_well_enable(dev_priv, 667 power_well); 668 else 669 return hsw_power_well_enable(dev_priv, power_well); 670 } 671 672 static void 673 icl_aux_power_well_disable(struct drm_i915_private *dev_priv, 674 struct i915_power_well *power_well) 675 { 676 int pw_idx = power_well->desc->hsw.idx; 677 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */ 678 bool is_tbt = power_well->desc->hsw.is_tc_tbt; 679 680 if (is_tbt || intel_phy_is_tc(dev_priv, phy)) 681 return icl_tc_phy_aux_power_well_disable(dev_priv, power_well); 682 else if (IS_ICELAKE(dev_priv)) 683 return icl_combo_phy_aux_power_well_disable(dev_priv, 684 power_well); 685 else 686 return hsw_power_well_disable(dev_priv, power_well); 687 } 688 689 /* 690 * We should only use the power well if we explicitly asked the hardware to 691 * enable it, so check if it's enabled and also check if we've requested it to 692 * be enabled. 693 */ 694 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 695 struct i915_power_well *power_well) 696 { 697 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 698 enum i915_power_well_id id = power_well->desc->id; 699 int pw_idx = power_well->desc->hsw.idx; 700 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 701 HSW_PWR_WELL_CTL_STATE(pw_idx); 702 u32 val; 703 704 val = intel_de_read(dev_priv, regs->driver); 705 706 /* 707 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 708 * and the MISC_IO PW will be not restored, so check instead for the 709 * BIOS's own request bits, which are forced-on for these power wells 710 * when exiting DC5/6. 711 */ 712 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) && 713 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 714 val |= intel_de_read(dev_priv, regs->bios); 715 716 return (val & mask) == mask; 717 } 718 719 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 720 { 721 drm_WARN_ONCE(&dev_priv->drm, 722 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), 723 "DC9 already programmed to be enabled.\n"); 724 drm_WARN_ONCE(&dev_priv->drm, 725 intel_de_read(dev_priv, DC_STATE_EN) & 726 DC_STATE_EN_UPTO_DC5, 727 "DC5 still not disabled to enable DC9.\n"); 728 drm_WARN_ONCE(&dev_priv->drm, 729 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & 730 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 731 "Power well 2 on.\n"); 732 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 733 "Interrupts not disabled yet.\n"); 734 735 /* 736 * TODO: check for the following to verify the conditions to enter DC9 737 * state are satisfied: 738 * 1] Check relevant display engine registers to verify if mode set 739 * disable sequence was followed. 740 * 2] Check if display uninitialize sequence is initialized. 741 */ 742 } 743 744 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 745 { 746 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 747 "Interrupts not disabled yet.\n"); 748 drm_WARN_ONCE(&dev_priv->drm, 749 intel_de_read(dev_priv, DC_STATE_EN) & 750 DC_STATE_EN_UPTO_DC5, 751 "DC5 still not disabled.\n"); 752 753 /* 754 * TODO: check for the following to verify DC9 state was indeed 755 * entered before programming to disable it: 756 * 1] Check relevant display engine registers to verify if mode 757 * set disable sequence was followed. 758 * 2] Check if display uninitialize sequence is initialized. 759 */ 760 } 761 762 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 763 u32 state) 764 { 765 int rewrites = 0; 766 int rereads = 0; 767 u32 v; 768 769 intel_de_write(dev_priv, DC_STATE_EN, state); 770 771 /* It has been observed that disabling the dc6 state sometimes 772 * doesn't stick and dmc keeps returning old value. Make sure 773 * the write really sticks enough times and also force rewrite until 774 * we are confident that state is exactly what we want. 775 */ 776 do { 777 v = intel_de_read(dev_priv, DC_STATE_EN); 778 779 if (v != state) { 780 intel_de_write(dev_priv, DC_STATE_EN, state); 781 rewrites++; 782 rereads = 0; 783 } else if (rereads++ > 5) { 784 break; 785 } 786 787 } while (rewrites < 100); 788 789 if (v != state) 790 drm_err(&dev_priv->drm, 791 "Writing dc state to 0x%x failed, now 0x%x\n", 792 state, v); 793 794 /* Most of the times we need one retry, avoid spam */ 795 if (rewrites > 1) 796 drm_dbg_kms(&dev_priv->drm, 797 "Rewrote dc state to 0x%x %d times\n", 798 state, rewrites); 799 } 800 801 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 802 { 803 u32 mask; 804 805 mask = DC_STATE_EN_UPTO_DC5; 806 807 if (INTEL_GEN(dev_priv) >= 12) 808 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 809 | DC_STATE_EN_DC9; 810 else if (IS_GEN(dev_priv, 11)) 811 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 812 else if (IS_GEN9_LP(dev_priv)) 813 mask |= DC_STATE_EN_DC9; 814 else 815 mask |= DC_STATE_EN_UPTO_DC6; 816 817 return mask; 818 } 819 820 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 821 { 822 u32 val; 823 824 val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); 825 826 drm_dbg_kms(&dev_priv->drm, 827 "Resetting DC state tracking from %02x to %02x\n", 828 dev_priv->csr.dc_state, val); 829 dev_priv->csr.dc_state = val; 830 } 831 832 /** 833 * gen9_set_dc_state - set target display C power state 834 * @dev_priv: i915 device instance 835 * @state: target DC power state 836 * - DC_STATE_DISABLE 837 * - DC_STATE_EN_UPTO_DC5 838 * - DC_STATE_EN_UPTO_DC6 839 * - DC_STATE_EN_DC9 840 * 841 * Signal to DMC firmware/HW the target DC power state passed in @state. 842 * DMC/HW can turn off individual display clocks and power rails when entering 843 * a deeper DC power state (higher in number) and turns these back when exiting 844 * that state to a shallower power state (lower in number). The HW will decide 845 * when to actually enter a given state on an on-demand basis, for instance 846 * depending on the active state of display pipes. The state of display 847 * registers backed by affected power rails are saved/restored as needed. 848 * 849 * Based on the above enabling a deeper DC power state is asynchronous wrt. 850 * enabling it. Disabling a deeper power state is synchronous: for instance 851 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 852 * back on and register state is restored. This is guaranteed by the MMIO write 853 * to DC_STATE_EN blocking until the state is restored. 854 */ 855 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) 856 { 857 u32 val; 858 u32 mask; 859 860 if (drm_WARN_ON_ONCE(&dev_priv->drm, 861 state & ~dev_priv->csr.allowed_dc_mask)) 862 state &= dev_priv->csr.allowed_dc_mask; 863 864 val = intel_de_read(dev_priv, DC_STATE_EN); 865 mask = gen9_dc_mask(dev_priv); 866 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", 867 val & mask, state); 868 869 /* Check if DMC is ignoring our DC state requests */ 870 if ((val & mask) != dev_priv->csr.dc_state) 871 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", 872 dev_priv->csr.dc_state, val & mask); 873 874 val &= ~mask; 875 val |= state; 876 877 gen9_write_dc_state(dev_priv, val); 878 879 dev_priv->csr.dc_state = val & mask; 880 } 881 882 static u32 883 sanitize_target_dc_state(struct drm_i915_private *dev_priv, 884 u32 target_dc_state) 885 { 886 u32 states[] = { 887 DC_STATE_EN_UPTO_DC6, 888 DC_STATE_EN_UPTO_DC5, 889 DC_STATE_EN_DC3CO, 890 DC_STATE_DISABLE, 891 }; 892 int i; 893 894 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 895 if (target_dc_state != states[i]) 896 continue; 897 898 if (dev_priv->csr.allowed_dc_mask & target_dc_state) 899 break; 900 901 target_dc_state = states[i + 1]; 902 } 903 904 return target_dc_state; 905 } 906 907 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) 908 { 909 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); 910 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); 911 } 912 913 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) 914 { 915 u32 val; 916 917 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); 918 val = intel_de_read(dev_priv, DC_STATE_EN); 919 val &= ~DC_STATE_DC3CO_STATUS; 920 intel_de_write(dev_priv, DC_STATE_EN, val); 921 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 922 /* 923 * Delay of 200us DC3CO Exit time B.Spec 49196 924 */ 925 usleep_range(200, 210); 926 } 927 928 static void bxt_enable_dc9(struct drm_i915_private *dev_priv) 929 { 930 assert_can_enable_dc9(dev_priv); 931 932 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); 933 /* 934 * Power sequencer reset is not needed on 935 * platforms with South Display Engine on PCH, 936 * because PPS registers are always on. 937 */ 938 if (!HAS_PCH_SPLIT(dev_priv)) 939 intel_power_sequencer_reset(dev_priv); 940 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 941 } 942 943 static void bxt_disable_dc9(struct drm_i915_private *dev_priv) 944 { 945 assert_can_disable_dc9(dev_priv); 946 947 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); 948 949 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 950 951 intel_pps_unlock_regs_wa(dev_priv); 952 } 953 954 static void assert_csr_loaded(struct drm_i915_private *dev_priv) 955 { 956 drm_WARN_ONCE(&dev_priv->drm, 957 !intel_de_read(dev_priv, CSR_PROGRAM(0)), 958 "CSR program storage start is NULL\n"); 959 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE), 960 "CSR SSP Base Not fine\n"); 961 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL), 962 "CSR HTP Not fine\n"); 963 } 964 965 static struct i915_power_well * 966 lookup_power_well(struct drm_i915_private *dev_priv, 967 enum i915_power_well_id power_well_id) 968 { 969 struct i915_power_well *power_well; 970 971 for_each_power_well(dev_priv, power_well) 972 if (power_well->desc->id == power_well_id) 973 return power_well; 974 975 /* 976 * It's not feasible to add error checking code to the callers since 977 * this condition really shouldn't happen and it doesn't even make sense 978 * to abort things like display initialization sequences. Just return 979 * the first power well and hope the WARN gets reported so we can fix 980 * our driver. 981 */ 982 drm_WARN(&dev_priv->drm, 1, 983 "Power well %d not defined for this platform\n", 984 power_well_id); 985 return &dev_priv->power_domains.power_wells[0]; 986 } 987 988 /** 989 * intel_display_power_set_target_dc_state - Set target dc state. 990 * @dev_priv: i915 device 991 * @state: state which needs to be set as target_dc_state. 992 * 993 * This function set the "DC off" power well target_dc_state, 994 * based upon this target_dc_stste, "DC off" power well will 995 * enable desired DC state. 996 */ 997 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, 998 u32 state) 999 { 1000 struct i915_power_well *power_well; 1001 bool dc_off_enabled; 1002 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1003 1004 mutex_lock(&power_domains->lock); 1005 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); 1006 1007 if (drm_WARN_ON(&dev_priv->drm, !power_well)) 1008 goto unlock; 1009 1010 state = sanitize_target_dc_state(dev_priv, state); 1011 1012 if (state == dev_priv->csr.target_dc_state) 1013 goto unlock; 1014 1015 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv, 1016 power_well); 1017 /* 1018 * If DC off power well is disabled, need to enable and disable the 1019 * DC off power well to effect target DC state. 1020 */ 1021 if (!dc_off_enabled) 1022 power_well->desc->ops->enable(dev_priv, power_well); 1023 1024 dev_priv->csr.target_dc_state = state; 1025 1026 if (!dc_off_enabled) 1027 power_well->desc->ops->disable(dev_priv, power_well); 1028 1029 unlock: 1030 mutex_unlock(&power_domains->lock); 1031 } 1032 1033 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 1034 { 1035 enum i915_power_well_id high_pg; 1036 1037 /* Power wells at this level and above must be disabled for DC5 entry */ 1038 if (INTEL_GEN(dev_priv) >= 12) 1039 high_pg = ICL_DISP_PW_3; 1040 else 1041 high_pg = SKL_DISP_PW_2; 1042 1043 drm_WARN_ONCE(&dev_priv->drm, 1044 intel_display_power_well_is_enabled(dev_priv, high_pg), 1045 "Power wells above platform's DC5 limit still enabled.\n"); 1046 1047 drm_WARN_ONCE(&dev_priv->drm, 1048 (intel_de_read(dev_priv, DC_STATE_EN) & 1049 DC_STATE_EN_UPTO_DC5), 1050 "DC5 already programmed to be enabled.\n"); 1051 assert_rpm_wakelock_held(&dev_priv->runtime_pm); 1052 1053 assert_csr_loaded(dev_priv); 1054 } 1055 1056 static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 1057 { 1058 assert_can_enable_dc5(dev_priv); 1059 1060 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); 1061 1062 /* Wa Display #1183: skl,kbl,cfl */ 1063 if (IS_GEN9_BC(dev_priv)) 1064 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1065 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1066 1067 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 1068 } 1069 1070 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 1071 { 1072 drm_WARN_ONCE(&dev_priv->drm, 1073 intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 1074 "Backlight is not disabled.\n"); 1075 drm_WARN_ONCE(&dev_priv->drm, 1076 (intel_de_read(dev_priv, DC_STATE_EN) & 1077 DC_STATE_EN_UPTO_DC6), 1078 "DC6 already programmed to be enabled.\n"); 1079 1080 assert_csr_loaded(dev_priv); 1081 } 1082 1083 static void skl_enable_dc6(struct drm_i915_private *dev_priv) 1084 { 1085 assert_can_enable_dc6(dev_priv); 1086 1087 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); 1088 1089 /* Wa Display #1183: skl,kbl,cfl */ 1090 if (IS_GEN9_BC(dev_priv)) 1091 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1092 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1093 1094 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 1095 } 1096 1097 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 1098 struct i915_power_well *power_well) 1099 { 1100 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 1101 int pw_idx = power_well->desc->hsw.idx; 1102 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 1103 u32 bios_req = intel_de_read(dev_priv, regs->bios); 1104 1105 /* Take over the request bit if set by BIOS. */ 1106 if (bios_req & mask) { 1107 u32 drv_req = intel_de_read(dev_priv, regs->driver); 1108 1109 if (!(drv_req & mask)) 1110 intel_de_write(dev_priv, regs->driver, drv_req | mask); 1111 intel_de_write(dev_priv, regs->bios, bios_req & ~mask); 1112 } 1113 } 1114 1115 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1116 struct i915_power_well *power_well) 1117 { 1118 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); 1119 } 1120 1121 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1122 struct i915_power_well *power_well) 1123 { 1124 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); 1125 } 1126 1127 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 1128 struct i915_power_well *power_well) 1129 { 1130 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); 1131 } 1132 1133 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 1134 { 1135 struct i915_power_well *power_well; 1136 1137 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); 1138 if (power_well->count > 0) 1139 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1140 1141 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1142 if (power_well->count > 0) 1143 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1144 1145 if (IS_GEMINILAKE(dev_priv)) { 1146 power_well = lookup_power_well(dev_priv, 1147 GLK_DISP_PW_DPIO_CMN_C); 1148 if (power_well->count > 0) 1149 bxt_ddi_phy_verify_state(dev_priv, 1150 power_well->desc->bxt.phy); 1151 } 1152 } 1153 1154 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 1155 struct i915_power_well *power_well) 1156 { 1157 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && 1158 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); 1159 } 1160 1161 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 1162 { 1163 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); 1164 u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices; 1165 1166 drm_WARN(&dev_priv->drm, 1167 hw_enabled_dbuf_slices != enabled_dbuf_slices, 1168 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", 1169 hw_enabled_dbuf_slices, 1170 enabled_dbuf_slices); 1171 } 1172 1173 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) 1174 { 1175 struct intel_cdclk_config cdclk_config = {}; 1176 1177 if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) { 1178 tgl_disable_dc3co(dev_priv); 1179 return; 1180 } 1181 1182 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1183 1184 dev_priv->display.get_cdclk(dev_priv, &cdclk_config); 1185 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 1186 drm_WARN_ON(&dev_priv->drm, 1187 intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, 1188 &cdclk_config)); 1189 1190 gen9_assert_dbuf_enabled(dev_priv); 1191 1192 if (IS_GEN9_LP(dev_priv)) 1193 bxt_verify_ddi_phy_power_wells(dev_priv); 1194 1195 if (INTEL_GEN(dev_priv) >= 11) 1196 /* 1197 * DMC retains HW context only for port A, the other combo 1198 * PHY's HW context for port B is lost after DC transitions, 1199 * so we need to restore it manually. 1200 */ 1201 intel_combo_phy_init(dev_priv); 1202 } 1203 1204 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 1205 struct i915_power_well *power_well) 1206 { 1207 gen9_disable_dc_states(dev_priv); 1208 } 1209 1210 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 1211 struct i915_power_well *power_well) 1212 { 1213 if (!dev_priv->csr.dmc_payload) 1214 return; 1215 1216 switch (dev_priv->csr.target_dc_state) { 1217 case DC_STATE_EN_DC3CO: 1218 tgl_enable_dc3co(dev_priv); 1219 break; 1220 case DC_STATE_EN_UPTO_DC6: 1221 skl_enable_dc6(dev_priv); 1222 break; 1223 case DC_STATE_EN_UPTO_DC5: 1224 gen9_enable_dc5(dev_priv); 1225 break; 1226 } 1227 } 1228 1229 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, 1230 struct i915_power_well *power_well) 1231 { 1232 } 1233 1234 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 1235 struct i915_power_well *power_well) 1236 { 1237 } 1238 1239 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 1240 struct i915_power_well *power_well) 1241 { 1242 return true; 1243 } 1244 1245 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 1246 struct i915_power_well *power_well) 1247 { 1248 if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) 1249 i830_enable_pipe(dev_priv, PIPE_A); 1250 if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) 1251 i830_enable_pipe(dev_priv, PIPE_B); 1252 } 1253 1254 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, 1255 struct i915_power_well *power_well) 1256 { 1257 i830_disable_pipe(dev_priv, PIPE_B); 1258 i830_disable_pipe(dev_priv, PIPE_A); 1259 } 1260 1261 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 1262 struct i915_power_well *power_well) 1263 { 1264 return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && 1265 intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; 1266 } 1267 1268 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, 1269 struct i915_power_well *power_well) 1270 { 1271 if (power_well->count > 0) 1272 i830_pipes_power_well_enable(dev_priv, power_well); 1273 else 1274 i830_pipes_power_well_disable(dev_priv, power_well); 1275 } 1276 1277 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 1278 struct i915_power_well *power_well, bool enable) 1279 { 1280 int pw_idx = power_well->desc->vlv.idx; 1281 u32 mask; 1282 u32 state; 1283 u32 ctrl; 1284 1285 mask = PUNIT_PWRGT_MASK(pw_idx); 1286 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1287 PUNIT_PWRGT_PWR_GATE(pw_idx); 1288 1289 vlv_punit_get(dev_priv); 1290 1291 #define COND \ 1292 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1293 1294 if (COND) 1295 goto out; 1296 1297 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 1298 ctrl &= ~mask; 1299 ctrl |= state; 1300 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1301 1302 if (wait_for(COND, 100)) 1303 drm_err(&dev_priv->drm, 1304 "timeout setting power well state %08x (%08x)\n", 1305 state, 1306 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1307 1308 #undef COND 1309 1310 out: 1311 vlv_punit_put(dev_priv); 1312 } 1313 1314 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1315 struct i915_power_well *power_well) 1316 { 1317 vlv_set_power_well(dev_priv, power_well, true); 1318 } 1319 1320 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1321 struct i915_power_well *power_well) 1322 { 1323 vlv_set_power_well(dev_priv, power_well, false); 1324 } 1325 1326 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1327 struct i915_power_well *power_well) 1328 { 1329 int pw_idx = power_well->desc->vlv.idx; 1330 bool enabled = false; 1331 u32 mask; 1332 u32 state; 1333 u32 ctrl; 1334 1335 mask = PUNIT_PWRGT_MASK(pw_idx); 1336 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1337 1338 vlv_punit_get(dev_priv); 1339 1340 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1341 /* 1342 * We only ever set the power-on and power-gate states, anything 1343 * else is unexpected. 1344 */ 1345 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1346 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1347 if (state == ctrl) 1348 enabled = true; 1349 1350 /* 1351 * A transient state at this point would mean some unexpected party 1352 * is poking at the power controls too. 1353 */ 1354 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1355 drm_WARN_ON(&dev_priv->drm, ctrl != state); 1356 1357 vlv_punit_put(dev_priv); 1358 1359 return enabled; 1360 } 1361 1362 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1363 { 1364 u32 val; 1365 1366 /* 1367 * On driver load, a pipe may be active and driving a DSI display. 1368 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1369 * (and never recovering) in this case. intel_dsi_post_disable() will 1370 * clear it when we turn off the display. 1371 */ 1372 val = intel_de_read(dev_priv, DSPCLK_GATE_D); 1373 val &= DPOUNIT_CLOCK_GATE_DISABLE; 1374 val |= VRHUNIT_CLOCK_GATE_DISABLE; 1375 intel_de_write(dev_priv, DSPCLK_GATE_D, val); 1376 1377 /* 1378 * Disable trickle feed and enable pnd deadline calculation 1379 */ 1380 intel_de_write(dev_priv, MI_ARB_VLV, 1381 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1382 intel_de_write(dev_priv, CBR1_VLV, 0); 1383 1384 drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); 1385 intel_de_write(dev_priv, RAWCLK_FREQ_VLV, 1386 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 1387 1000)); 1388 } 1389 1390 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1391 { 1392 struct intel_encoder *encoder; 1393 enum pipe pipe; 1394 1395 /* 1396 * Enable the CRI clock source so we can get at the 1397 * display and the reference clock for VGA 1398 * hotplug / manual detection. Supposedly DSI also 1399 * needs the ref clock up and running. 1400 * 1401 * CHV DPLL B/C have some issues if VGA mode is enabled. 1402 */ 1403 for_each_pipe(dev_priv, pipe) { 1404 u32 val = intel_de_read(dev_priv, DPLL(pipe)); 1405 1406 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1407 if (pipe != PIPE_A) 1408 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1409 1410 intel_de_write(dev_priv, DPLL(pipe), val); 1411 } 1412 1413 vlv_init_display_clock_gating(dev_priv); 1414 1415 spin_lock_irq(&dev_priv->irq_lock); 1416 valleyview_enable_display_irqs(dev_priv); 1417 spin_unlock_irq(&dev_priv->irq_lock); 1418 1419 /* 1420 * During driver initialization/resume we can avoid restoring the 1421 * part of the HW/SW state that will be inited anyway explicitly. 1422 */ 1423 if (dev_priv->power_domains.initializing) 1424 return; 1425 1426 intel_hpd_init(dev_priv); 1427 intel_hpd_poll_disable(dev_priv); 1428 1429 /* Re-enable the ADPA, if we have one */ 1430 for_each_intel_encoder(&dev_priv->drm, encoder) { 1431 if (encoder->type == INTEL_OUTPUT_ANALOG) 1432 intel_crt_reset(&encoder->base); 1433 } 1434 1435 intel_vga_redisable_power_on(dev_priv); 1436 1437 intel_pps_unlock_regs_wa(dev_priv); 1438 } 1439 1440 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1441 { 1442 spin_lock_irq(&dev_priv->irq_lock); 1443 valleyview_disable_display_irqs(dev_priv); 1444 spin_unlock_irq(&dev_priv->irq_lock); 1445 1446 /* make sure we're done processing display irqs */ 1447 intel_synchronize_irq(dev_priv); 1448 1449 intel_power_sequencer_reset(dev_priv); 1450 1451 /* Prevent us from re-enabling polling on accident in late suspend */ 1452 if (!dev_priv->drm.dev->power.is_suspended) 1453 intel_hpd_poll_enable(dev_priv); 1454 } 1455 1456 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1457 struct i915_power_well *power_well) 1458 { 1459 vlv_set_power_well(dev_priv, power_well, true); 1460 1461 vlv_display_power_well_init(dev_priv); 1462 } 1463 1464 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1465 struct i915_power_well *power_well) 1466 { 1467 vlv_display_power_well_deinit(dev_priv); 1468 1469 vlv_set_power_well(dev_priv, power_well, false); 1470 } 1471 1472 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1473 struct i915_power_well *power_well) 1474 { 1475 /* since ref/cri clock was enabled */ 1476 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1477 1478 vlv_set_power_well(dev_priv, power_well, true); 1479 1480 /* 1481 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1482 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1483 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1484 * b. The other bits such as sfr settings / modesel may all 1485 * be set to 0. 1486 * 1487 * This should only be done on init and resume from S3 with 1488 * both PLLs disabled, or we risk losing DPIO and PLL 1489 * synchronization. 1490 */ 1491 intel_de_write(dev_priv, DPIO_CTL, 1492 intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); 1493 } 1494 1495 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1496 struct i915_power_well *power_well) 1497 { 1498 enum pipe pipe; 1499 1500 for_each_pipe(dev_priv, pipe) 1501 assert_pll_disabled(dev_priv, pipe); 1502 1503 /* Assert common reset */ 1504 intel_de_write(dev_priv, DPIO_CTL, 1505 intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); 1506 1507 vlv_set_power_well(dev_priv, power_well, false); 1508 } 1509 1510 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 1511 1512 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1513 1514 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1515 { 1516 struct i915_power_well *cmn_bc = 1517 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1518 struct i915_power_well *cmn_d = 1519 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1520 u32 phy_control = dev_priv->chv_phy_control; 1521 u32 phy_status = 0; 1522 u32 phy_status_mask = 0xffffffff; 1523 1524 /* 1525 * The BIOS can leave the PHY is some weird state 1526 * where it doesn't fully power down some parts. 1527 * Disable the asserts until the PHY has been fully 1528 * reset (ie. the power well has been disabled at 1529 * least once). 1530 */ 1531 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1532 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1533 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1534 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1535 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1536 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1537 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1538 1539 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1540 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1541 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1542 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1543 1544 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 1545 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1546 1547 /* this assumes override is only used to enable lanes */ 1548 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1549 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1550 1551 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1552 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1553 1554 /* CL1 is on whenever anything is on in either channel */ 1555 if (BITS_SET(phy_control, 1556 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1557 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1558 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1559 1560 /* 1561 * The DPLLB check accounts for the pipe B + port A usage 1562 * with CL2 powered up but all the lanes in the second channel 1563 * powered down. 1564 */ 1565 if (BITS_SET(phy_control, 1566 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1567 (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1568 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1569 1570 if (BITS_SET(phy_control, 1571 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1572 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1573 if (BITS_SET(phy_control, 1574 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1575 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1576 1577 if (BITS_SET(phy_control, 1578 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1579 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1580 if (BITS_SET(phy_control, 1581 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1582 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1583 } 1584 1585 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 1586 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1587 1588 /* this assumes override is only used to enable lanes */ 1589 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1590 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1591 1592 if (BITS_SET(phy_control, 1593 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1594 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1595 1596 if (BITS_SET(phy_control, 1597 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1598 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1599 if (BITS_SET(phy_control, 1600 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1601 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1602 } 1603 1604 phy_status &= phy_status_mask; 1605 1606 /* 1607 * The PHY may be busy with some initial calibration and whatnot, 1608 * so the power state can take a while to actually change. 1609 */ 1610 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, 1611 phy_status_mask, phy_status, 10)) 1612 drm_err(&dev_priv->drm, 1613 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1614 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, 1615 phy_status, dev_priv->chv_phy_control); 1616 } 1617 1618 #undef BITS_SET 1619 1620 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1621 struct i915_power_well *power_well) 1622 { 1623 enum dpio_phy phy; 1624 enum pipe pipe; 1625 u32 tmp; 1626 1627 drm_WARN_ON_ONCE(&dev_priv->drm, 1628 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1629 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1630 1631 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1632 pipe = PIPE_A; 1633 phy = DPIO_PHY0; 1634 } else { 1635 pipe = PIPE_C; 1636 phy = DPIO_PHY1; 1637 } 1638 1639 /* since ref/cri clock was enabled */ 1640 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1641 vlv_set_power_well(dev_priv, power_well, true); 1642 1643 /* Poll for phypwrgood signal */ 1644 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, 1645 PHY_POWERGOOD(phy), 1)) 1646 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", 1647 phy); 1648 1649 vlv_dpio_get(dev_priv); 1650 1651 /* Enable dynamic power down */ 1652 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1653 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1654 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1655 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1656 1657 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1658 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1659 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1660 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1661 } else { 1662 /* 1663 * Force the non-existing CL2 off. BXT does this 1664 * too, so maybe it saves some power even though 1665 * CL2 doesn't exist? 1666 */ 1667 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1668 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1669 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1670 } 1671 1672 vlv_dpio_put(dev_priv); 1673 1674 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1675 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1676 dev_priv->chv_phy_control); 1677 1678 drm_dbg_kms(&dev_priv->drm, 1679 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1680 phy, dev_priv->chv_phy_control); 1681 1682 assert_chv_phy_status(dev_priv); 1683 } 1684 1685 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1686 struct i915_power_well *power_well) 1687 { 1688 enum dpio_phy phy; 1689 1690 drm_WARN_ON_ONCE(&dev_priv->drm, 1691 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1692 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1693 1694 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1695 phy = DPIO_PHY0; 1696 assert_pll_disabled(dev_priv, PIPE_A); 1697 assert_pll_disabled(dev_priv, PIPE_B); 1698 } else { 1699 phy = DPIO_PHY1; 1700 assert_pll_disabled(dev_priv, PIPE_C); 1701 } 1702 1703 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1704 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1705 dev_priv->chv_phy_control); 1706 1707 vlv_set_power_well(dev_priv, power_well, false); 1708 1709 drm_dbg_kms(&dev_priv->drm, 1710 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1711 phy, dev_priv->chv_phy_control); 1712 1713 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1714 dev_priv->chv_phy_assert[phy] = true; 1715 1716 assert_chv_phy_status(dev_priv); 1717 } 1718 1719 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1720 enum dpio_channel ch, bool override, unsigned int mask) 1721 { 1722 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1723 u32 reg, val, expected, actual; 1724 1725 /* 1726 * The BIOS can leave the PHY is some weird state 1727 * where it doesn't fully power down some parts. 1728 * Disable the asserts until the PHY has been fully 1729 * reset (ie. the power well has been disabled at 1730 * least once). 1731 */ 1732 if (!dev_priv->chv_phy_assert[phy]) 1733 return; 1734 1735 if (ch == DPIO_CH0) 1736 reg = _CHV_CMN_DW0_CH0; 1737 else 1738 reg = _CHV_CMN_DW6_CH1; 1739 1740 vlv_dpio_get(dev_priv); 1741 val = vlv_dpio_read(dev_priv, pipe, reg); 1742 vlv_dpio_put(dev_priv); 1743 1744 /* 1745 * This assumes !override is only used when the port is disabled. 1746 * All lanes should power down even without the override when 1747 * the port is disabled. 1748 */ 1749 if (!override || mask == 0xf) { 1750 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1751 /* 1752 * If CH1 common lane is not active anymore 1753 * (eg. for pipe B DPLL) the entire channel will 1754 * shut down, which causes the common lane registers 1755 * to read as 0. That means we can't actually check 1756 * the lane power down status bits, but as the entire 1757 * register reads as 0 it's a good indication that the 1758 * channel is indeed entirely powered down. 1759 */ 1760 if (ch == DPIO_CH1 && val == 0) 1761 expected = 0; 1762 } else if (mask != 0x0) { 1763 expected = DPIO_ANYDL_POWERDOWN; 1764 } else { 1765 expected = 0; 1766 } 1767 1768 if (ch == DPIO_CH0) 1769 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1770 else 1771 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1772 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1773 1774 drm_WARN(&dev_priv->drm, actual != expected, 1775 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1776 !!(actual & DPIO_ALLDL_POWERDOWN), 1777 !!(actual & DPIO_ANYDL_POWERDOWN), 1778 !!(expected & DPIO_ALLDL_POWERDOWN), 1779 !!(expected & DPIO_ANYDL_POWERDOWN), 1780 reg, val); 1781 } 1782 1783 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1784 enum dpio_channel ch, bool override) 1785 { 1786 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1787 bool was_override; 1788 1789 mutex_lock(&power_domains->lock); 1790 1791 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1792 1793 if (override == was_override) 1794 goto out; 1795 1796 if (override) 1797 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1798 else 1799 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1800 1801 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1802 dev_priv->chv_phy_control); 1803 1804 drm_dbg_kms(&dev_priv->drm, 1805 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1806 phy, ch, dev_priv->chv_phy_control); 1807 1808 assert_chv_phy_status(dev_priv); 1809 1810 out: 1811 mutex_unlock(&power_domains->lock); 1812 1813 return was_override; 1814 } 1815 1816 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1817 bool override, unsigned int mask) 1818 { 1819 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1820 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1821 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); 1822 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); 1823 1824 mutex_lock(&power_domains->lock); 1825 1826 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1827 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1828 1829 if (override) 1830 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1831 else 1832 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1833 1834 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1835 dev_priv->chv_phy_control); 1836 1837 drm_dbg_kms(&dev_priv->drm, 1838 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1839 phy, ch, mask, dev_priv->chv_phy_control); 1840 1841 assert_chv_phy_status(dev_priv); 1842 1843 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1844 1845 mutex_unlock(&power_domains->lock); 1846 } 1847 1848 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1849 struct i915_power_well *power_well) 1850 { 1851 enum pipe pipe = PIPE_A; 1852 bool enabled; 1853 u32 state, ctrl; 1854 1855 vlv_punit_get(dev_priv); 1856 1857 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1858 /* 1859 * We only ever set the power-on and power-gate states, anything 1860 * else is unexpected. 1861 */ 1862 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && 1863 state != DP_SSS_PWR_GATE(pipe)); 1864 enabled = state == DP_SSS_PWR_ON(pipe); 1865 1866 /* 1867 * A transient state at this point would mean some unexpected party 1868 * is poking at the power controls too. 1869 */ 1870 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1871 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); 1872 1873 vlv_punit_put(dev_priv); 1874 1875 return enabled; 1876 } 1877 1878 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1879 struct i915_power_well *power_well, 1880 bool enable) 1881 { 1882 enum pipe pipe = PIPE_A; 1883 u32 state; 1884 u32 ctrl; 1885 1886 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1887 1888 vlv_punit_get(dev_priv); 1889 1890 #define COND \ 1891 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 1892 1893 if (COND) 1894 goto out; 1895 1896 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 1897 ctrl &= ~DP_SSC_MASK(pipe); 1898 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1899 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); 1900 1901 if (wait_for(COND, 100)) 1902 drm_err(&dev_priv->drm, 1903 "timeout setting power well state %08x (%08x)\n", 1904 state, 1905 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); 1906 1907 #undef COND 1908 1909 out: 1910 vlv_punit_put(dev_priv); 1911 } 1912 1913 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 1914 struct i915_power_well *power_well) 1915 { 1916 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1917 dev_priv->chv_phy_control); 1918 } 1919 1920 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1921 struct i915_power_well *power_well) 1922 { 1923 chv_set_pipe_power_well(dev_priv, power_well, true); 1924 1925 vlv_display_power_well_init(dev_priv); 1926 } 1927 1928 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1929 struct i915_power_well *power_well) 1930 { 1931 vlv_display_power_well_deinit(dev_priv); 1932 1933 chv_set_pipe_power_well(dev_priv, power_well, false); 1934 } 1935 1936 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) 1937 { 1938 return power_domains->async_put_domains[0] | 1939 power_domains->async_put_domains[1]; 1940 } 1941 1942 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 1943 1944 static bool 1945 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 1946 { 1947 struct drm_i915_private *i915 = container_of(power_domains, 1948 struct drm_i915_private, 1949 power_domains); 1950 return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] & 1951 power_domains->async_put_domains[1]); 1952 } 1953 1954 static bool 1955 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 1956 { 1957 struct drm_i915_private *i915 = container_of(power_domains, 1958 struct drm_i915_private, 1959 power_domains); 1960 enum intel_display_power_domain domain; 1961 bool err = false; 1962 1963 err |= !assert_async_put_domain_masks_disjoint(power_domains); 1964 err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref != 1965 !!__async_put_domains_mask(power_domains)); 1966 1967 for_each_power_domain(domain, __async_put_domains_mask(power_domains)) 1968 err |= drm_WARN_ON(&i915->drm, 1969 power_domains->domain_use_count[domain] != 1); 1970 1971 return !err; 1972 } 1973 1974 static void print_power_domains(struct i915_power_domains *power_domains, 1975 const char *prefix, u64 mask) 1976 { 1977 struct drm_i915_private *i915 = container_of(power_domains, 1978 struct drm_i915_private, 1979 power_domains); 1980 enum intel_display_power_domain domain; 1981 1982 drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask)); 1983 for_each_power_domain(domain, mask) 1984 drm_dbg(&i915->drm, "%s use_count %d\n", 1985 intel_display_power_domain_str(domain), 1986 power_domains->domain_use_count[domain]); 1987 } 1988 1989 static void 1990 print_async_put_domains_state(struct i915_power_domains *power_domains) 1991 { 1992 struct drm_i915_private *i915 = container_of(power_domains, 1993 struct drm_i915_private, 1994 power_domains); 1995 1996 drm_dbg(&i915->drm, "async_put_wakeref %u\n", 1997 power_domains->async_put_wakeref); 1998 1999 print_power_domains(power_domains, "async_put_domains[0]", 2000 power_domains->async_put_domains[0]); 2001 print_power_domains(power_domains, "async_put_domains[1]", 2002 power_domains->async_put_domains[1]); 2003 } 2004 2005 static void 2006 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2007 { 2008 if (!__async_put_domains_state_ok(power_domains)) 2009 print_async_put_domains_state(power_domains); 2010 } 2011 2012 #else 2013 2014 static void 2015 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 2016 { 2017 } 2018 2019 static void 2020 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2021 { 2022 } 2023 2024 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 2025 2026 static u64 async_put_domains_mask(struct i915_power_domains *power_domains) 2027 { 2028 assert_async_put_domain_masks_disjoint(power_domains); 2029 2030 return __async_put_domains_mask(power_domains); 2031 } 2032 2033 static void 2034 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 2035 enum intel_display_power_domain domain) 2036 { 2037 assert_async_put_domain_masks_disjoint(power_domains); 2038 2039 power_domains->async_put_domains[0] &= ~BIT_ULL(domain); 2040 power_domains->async_put_domains[1] &= ~BIT_ULL(domain); 2041 } 2042 2043 static bool 2044 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 2045 enum intel_display_power_domain domain) 2046 { 2047 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2048 bool ret = false; 2049 2050 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) 2051 goto out_verify; 2052 2053 async_put_domains_clear_domain(power_domains, domain); 2054 2055 ret = true; 2056 2057 if (async_put_domains_mask(power_domains)) 2058 goto out_verify; 2059 2060 cancel_delayed_work(&power_domains->async_put_work); 2061 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 2062 fetch_and_zero(&power_domains->async_put_wakeref)); 2063 out_verify: 2064 verify_async_put_domains_state(power_domains); 2065 2066 return ret; 2067 } 2068 2069 static void 2070 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 2071 enum intel_display_power_domain domain) 2072 { 2073 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2074 struct i915_power_well *power_well; 2075 2076 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 2077 return; 2078 2079 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) 2080 intel_power_well_get(dev_priv, power_well); 2081 2082 power_domains->domain_use_count[domain]++; 2083 } 2084 2085 /** 2086 * intel_display_power_get - grab a power domain reference 2087 * @dev_priv: i915 device instance 2088 * @domain: power domain to reference 2089 * 2090 * This function grabs a power domain reference for @domain and ensures that the 2091 * power domain and all its parents are powered up. Therefore users should only 2092 * grab a reference to the innermost power domain they need. 2093 * 2094 * Any power domain reference obtained by this function must have a symmetric 2095 * call to intel_display_power_put() to release the reference again. 2096 */ 2097 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 2098 enum intel_display_power_domain domain) 2099 { 2100 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2101 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2102 2103 mutex_lock(&power_domains->lock); 2104 __intel_display_power_get_domain(dev_priv, domain); 2105 mutex_unlock(&power_domains->lock); 2106 2107 return wakeref; 2108 } 2109 2110 /** 2111 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 2112 * @dev_priv: i915 device instance 2113 * @domain: power domain to reference 2114 * 2115 * This function grabs a power domain reference for @domain and ensures that the 2116 * power domain and all its parents are powered up. Therefore users should only 2117 * grab a reference to the innermost power domain they need. 2118 * 2119 * Any power domain reference obtained by this function must have a symmetric 2120 * call to intel_display_power_put() to release the reference again. 2121 */ 2122 intel_wakeref_t 2123 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 2124 enum intel_display_power_domain domain) 2125 { 2126 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2127 intel_wakeref_t wakeref; 2128 bool is_enabled; 2129 2130 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 2131 if (!wakeref) 2132 return false; 2133 2134 mutex_lock(&power_domains->lock); 2135 2136 if (__intel_display_power_is_enabled(dev_priv, domain)) { 2137 __intel_display_power_get_domain(dev_priv, domain); 2138 is_enabled = true; 2139 } else { 2140 is_enabled = false; 2141 } 2142 2143 mutex_unlock(&power_domains->lock); 2144 2145 if (!is_enabled) { 2146 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2147 wakeref = 0; 2148 } 2149 2150 return wakeref; 2151 } 2152 2153 static void 2154 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 2155 enum intel_display_power_domain domain) 2156 { 2157 struct i915_power_domains *power_domains; 2158 struct i915_power_well *power_well; 2159 const char *name = intel_display_power_domain_str(domain); 2160 2161 power_domains = &dev_priv->power_domains; 2162 2163 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], 2164 "Use count on domain %s is already zero\n", 2165 name); 2166 drm_WARN(&dev_priv->drm, 2167 async_put_domains_mask(power_domains) & BIT_ULL(domain), 2168 "Async disabling of domain %s is pending\n", 2169 name); 2170 2171 power_domains->domain_use_count[domain]--; 2172 2173 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) 2174 intel_power_well_put(dev_priv, power_well); 2175 } 2176 2177 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 2178 enum intel_display_power_domain domain) 2179 { 2180 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2181 2182 mutex_lock(&power_domains->lock); 2183 __intel_display_power_put_domain(dev_priv, domain); 2184 mutex_unlock(&power_domains->lock); 2185 } 2186 2187 /** 2188 * intel_display_power_put_unchecked - release an unchecked power domain reference 2189 * @dev_priv: i915 device instance 2190 * @domain: power domain to reference 2191 * 2192 * This function drops the power domain reference obtained by 2193 * intel_display_power_get() and might power down the corresponding hardware 2194 * block right away if this is the last reference. 2195 * 2196 * This function exists only for historical reasons and should be avoided in 2197 * new code, as the correctness of its use cannot be checked. Always use 2198 * intel_display_power_put() instead. 2199 */ 2200 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 2201 enum intel_display_power_domain domain) 2202 { 2203 __intel_display_power_put(dev_priv, domain); 2204 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 2205 } 2206 2207 static void 2208 queue_async_put_domains_work(struct i915_power_domains *power_domains, 2209 intel_wakeref_t wakeref) 2210 { 2211 struct drm_i915_private *i915 = container_of(power_domains, 2212 struct drm_i915_private, 2213 power_domains); 2214 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2215 power_domains->async_put_wakeref = wakeref; 2216 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, 2217 &power_domains->async_put_work, 2218 msecs_to_jiffies(100))); 2219 } 2220 2221 static void 2222 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) 2223 { 2224 struct drm_i915_private *dev_priv = 2225 container_of(power_domains, struct drm_i915_private, 2226 power_domains); 2227 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2228 enum intel_display_power_domain domain; 2229 intel_wakeref_t wakeref; 2230 2231 /* 2232 * The caller must hold already raw wakeref, upgrade that to a proper 2233 * wakeref to make the state checker happy about the HW access during 2234 * power well disabling. 2235 */ 2236 assert_rpm_raw_wakeref_held(rpm); 2237 wakeref = intel_runtime_pm_get(rpm); 2238 2239 for_each_power_domain(domain, mask) { 2240 /* Clear before put, so put's sanity check is happy. */ 2241 async_put_domains_clear_domain(power_domains, domain); 2242 __intel_display_power_put_domain(dev_priv, domain); 2243 } 2244 2245 intel_runtime_pm_put(rpm, wakeref); 2246 } 2247 2248 static void 2249 intel_display_power_put_async_work(struct work_struct *work) 2250 { 2251 struct drm_i915_private *dev_priv = 2252 container_of(work, struct drm_i915_private, 2253 power_domains.async_put_work.work); 2254 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2255 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2256 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 2257 intel_wakeref_t old_work_wakeref = 0; 2258 2259 mutex_lock(&power_domains->lock); 2260 2261 /* 2262 * Bail out if all the domain refs pending to be released were grabbed 2263 * by subsequent gets or a flush_work. 2264 */ 2265 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2266 if (!old_work_wakeref) 2267 goto out_verify; 2268 2269 release_async_put_domains(power_domains, 2270 power_domains->async_put_domains[0]); 2271 2272 /* Requeue the work if more domains were async put meanwhile. */ 2273 if (power_domains->async_put_domains[1]) { 2274 power_domains->async_put_domains[0] = 2275 fetch_and_zero(&power_domains->async_put_domains[1]); 2276 queue_async_put_domains_work(power_domains, 2277 fetch_and_zero(&new_work_wakeref)); 2278 } 2279 2280 out_verify: 2281 verify_async_put_domains_state(power_domains); 2282 2283 mutex_unlock(&power_domains->lock); 2284 2285 if (old_work_wakeref) 2286 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 2287 if (new_work_wakeref) 2288 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 2289 } 2290 2291 /** 2292 * intel_display_power_put_async - release a power domain reference asynchronously 2293 * @i915: i915 device instance 2294 * @domain: power domain to reference 2295 * @wakeref: wakeref acquired for the reference that is being released 2296 * 2297 * This function drops the power domain reference obtained by 2298 * intel_display_power_get*() and schedules a work to power down the 2299 * corresponding hardware block if this is the last reference. 2300 */ 2301 void __intel_display_power_put_async(struct drm_i915_private *i915, 2302 enum intel_display_power_domain domain, 2303 intel_wakeref_t wakeref) 2304 { 2305 struct i915_power_domains *power_domains = &i915->power_domains; 2306 struct intel_runtime_pm *rpm = &i915->runtime_pm; 2307 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 2308 2309 mutex_lock(&power_domains->lock); 2310 2311 if (power_domains->domain_use_count[domain] > 1) { 2312 __intel_display_power_put_domain(i915, domain); 2313 2314 goto out_verify; 2315 } 2316 2317 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); 2318 2319 /* Let a pending work requeue itself or queue a new one. */ 2320 if (power_domains->async_put_wakeref) { 2321 power_domains->async_put_domains[1] |= BIT_ULL(domain); 2322 } else { 2323 power_domains->async_put_domains[0] |= BIT_ULL(domain); 2324 queue_async_put_domains_work(power_domains, 2325 fetch_and_zero(&work_wakeref)); 2326 } 2327 2328 out_verify: 2329 verify_async_put_domains_state(power_domains); 2330 2331 mutex_unlock(&power_domains->lock); 2332 2333 if (work_wakeref) 2334 intel_runtime_pm_put_raw(rpm, work_wakeref); 2335 2336 intel_runtime_pm_put(rpm, wakeref); 2337 } 2338 2339 /** 2340 * intel_display_power_flush_work - flushes the async display power disabling work 2341 * @i915: i915 device instance 2342 * 2343 * Flushes any pending work that was scheduled by a preceding 2344 * intel_display_power_put_async() call, completing the disabling of the 2345 * corresponding power domains. 2346 * 2347 * Note that the work handler function may still be running after this 2348 * function returns; to ensure that the work handler isn't running use 2349 * intel_display_power_flush_work_sync() instead. 2350 */ 2351 void intel_display_power_flush_work(struct drm_i915_private *i915) 2352 { 2353 struct i915_power_domains *power_domains = &i915->power_domains; 2354 intel_wakeref_t work_wakeref; 2355 2356 mutex_lock(&power_domains->lock); 2357 2358 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2359 if (!work_wakeref) 2360 goto out_verify; 2361 2362 release_async_put_domains(power_domains, 2363 async_put_domains_mask(power_domains)); 2364 cancel_delayed_work(&power_domains->async_put_work); 2365 2366 out_verify: 2367 verify_async_put_domains_state(power_domains); 2368 2369 mutex_unlock(&power_domains->lock); 2370 2371 if (work_wakeref) 2372 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 2373 } 2374 2375 /** 2376 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 2377 * @i915: i915 device instance 2378 * 2379 * Like intel_display_power_flush_work(), but also ensure that the work 2380 * handler function is not running any more when this function returns. 2381 */ 2382 static void 2383 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 2384 { 2385 struct i915_power_domains *power_domains = &i915->power_domains; 2386 2387 intel_display_power_flush_work(i915); 2388 cancel_delayed_work_sync(&power_domains->async_put_work); 2389 2390 verify_async_put_domains_state(power_domains); 2391 2392 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2393 } 2394 2395 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2396 /** 2397 * intel_display_power_put - release a power domain reference 2398 * @dev_priv: i915 device instance 2399 * @domain: power domain to reference 2400 * @wakeref: wakeref acquired for the reference that is being released 2401 * 2402 * This function drops the power domain reference obtained by 2403 * intel_display_power_get() and might power down the corresponding hardware 2404 * block right away if this is the last reference. 2405 */ 2406 void intel_display_power_put(struct drm_i915_private *dev_priv, 2407 enum intel_display_power_domain domain, 2408 intel_wakeref_t wakeref) 2409 { 2410 __intel_display_power_put(dev_priv, domain); 2411 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2412 } 2413 #endif 2414 2415 #define I830_PIPES_POWER_DOMAINS ( \ 2416 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2417 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2418 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2419 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2420 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2421 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2422 BIT_ULL(POWER_DOMAIN_INIT)) 2423 2424 #define VLV_DISPLAY_POWER_DOMAINS ( \ 2425 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2426 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2427 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2428 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2429 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2430 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2431 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2432 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2433 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2434 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2435 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2436 BIT_ULL(POWER_DOMAIN_VGA) | \ 2437 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2438 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2439 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2440 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2441 BIT_ULL(POWER_DOMAIN_INIT)) 2442 2443 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2444 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2445 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2446 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2447 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2448 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2449 BIT_ULL(POWER_DOMAIN_INIT)) 2450 2451 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 2452 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2453 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2454 BIT_ULL(POWER_DOMAIN_INIT)) 2455 2456 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 2457 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2458 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2459 BIT_ULL(POWER_DOMAIN_INIT)) 2460 2461 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 2462 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2463 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2464 BIT_ULL(POWER_DOMAIN_INIT)) 2465 2466 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 2467 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2468 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2469 BIT_ULL(POWER_DOMAIN_INIT)) 2470 2471 #define CHV_DISPLAY_POWER_DOMAINS ( \ 2472 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2473 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2474 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2475 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2476 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2477 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2478 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2479 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2480 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2481 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2482 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2483 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2484 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2485 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2486 BIT_ULL(POWER_DOMAIN_VGA) | \ 2487 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2488 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2489 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2490 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2491 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2492 BIT_ULL(POWER_DOMAIN_INIT)) 2493 2494 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2495 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2496 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2497 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2498 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2499 BIT_ULL(POWER_DOMAIN_INIT)) 2500 2501 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 2502 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2503 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2504 BIT_ULL(POWER_DOMAIN_INIT)) 2505 2506 #define HSW_DISPLAY_POWER_DOMAINS ( \ 2507 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2508 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2509 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2510 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2511 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2512 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2513 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2514 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2515 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2516 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2517 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2518 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2519 BIT_ULL(POWER_DOMAIN_VGA) | \ 2520 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2521 BIT_ULL(POWER_DOMAIN_INIT)) 2522 2523 #define BDW_DISPLAY_POWER_DOMAINS ( \ 2524 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2525 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2526 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2527 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2528 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2529 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2530 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2531 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2532 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2533 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2534 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2535 BIT_ULL(POWER_DOMAIN_VGA) | \ 2536 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2537 BIT_ULL(POWER_DOMAIN_INIT)) 2538 2539 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2540 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2541 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2542 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2543 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2544 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2545 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2546 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2547 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2548 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2549 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2550 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2551 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2552 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2553 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2554 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2555 BIT_ULL(POWER_DOMAIN_VGA) | \ 2556 BIT_ULL(POWER_DOMAIN_INIT)) 2557 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ 2558 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2559 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ 2560 BIT_ULL(POWER_DOMAIN_INIT)) 2561 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2562 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2563 BIT_ULL(POWER_DOMAIN_INIT)) 2564 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2565 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2566 BIT_ULL(POWER_DOMAIN_INIT)) 2567 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ 2568 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2569 BIT_ULL(POWER_DOMAIN_INIT)) 2570 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2571 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2572 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2573 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2574 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2575 BIT_ULL(POWER_DOMAIN_INIT)) 2576 2577 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2578 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2579 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2580 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2581 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2582 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2583 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2584 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2585 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2586 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2587 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2588 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2589 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2590 BIT_ULL(POWER_DOMAIN_VGA) | \ 2591 BIT_ULL(POWER_DOMAIN_INIT)) 2592 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2593 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2594 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2595 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2596 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2597 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2598 BIT_ULL(POWER_DOMAIN_INIT)) 2599 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 2600 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2601 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2602 BIT_ULL(POWER_DOMAIN_INIT)) 2603 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 2604 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2605 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2606 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2607 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2608 BIT_ULL(POWER_DOMAIN_INIT)) 2609 2610 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2611 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2612 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2613 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2614 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2615 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2616 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2617 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2618 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2619 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2620 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2621 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2622 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2623 BIT_ULL(POWER_DOMAIN_VGA) | \ 2624 BIT_ULL(POWER_DOMAIN_INIT)) 2625 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ 2626 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2627 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2628 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2629 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2630 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2631 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ 2632 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2633 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2634 BIT_ULL(POWER_DOMAIN_INIT)) 2635 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ 2636 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2637 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2638 BIT_ULL(POWER_DOMAIN_INIT)) 2639 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ 2640 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2641 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2642 BIT_ULL(POWER_DOMAIN_INIT)) 2643 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2644 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2645 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2646 BIT_ULL(POWER_DOMAIN_INIT)) 2647 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2648 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2649 BIT_ULL(POWER_DOMAIN_INIT)) 2650 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2651 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2652 BIT_ULL(POWER_DOMAIN_INIT)) 2653 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2654 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2655 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2656 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2657 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2658 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2659 BIT_ULL(POWER_DOMAIN_INIT)) 2660 2661 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2662 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2663 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2664 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2665 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2666 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2667 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2668 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2669 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2670 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2671 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2672 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2673 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2674 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2675 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2676 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2677 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2678 BIT_ULL(POWER_DOMAIN_VGA) | \ 2679 BIT_ULL(POWER_DOMAIN_INIT)) 2680 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \ 2681 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2682 BIT_ULL(POWER_DOMAIN_INIT)) 2683 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \ 2684 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2685 BIT_ULL(POWER_DOMAIN_INIT)) 2686 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \ 2687 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2688 BIT_ULL(POWER_DOMAIN_INIT)) 2689 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \ 2690 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2691 BIT_ULL(POWER_DOMAIN_INIT)) 2692 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2693 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2694 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2695 BIT_ULL(POWER_DOMAIN_INIT)) 2696 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2697 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2698 BIT_ULL(POWER_DOMAIN_INIT)) 2699 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2700 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2701 BIT_ULL(POWER_DOMAIN_INIT)) 2702 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \ 2703 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2704 BIT_ULL(POWER_DOMAIN_INIT)) 2705 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \ 2706 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2707 BIT_ULL(POWER_DOMAIN_INIT)) 2708 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \ 2709 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ 2710 BIT_ULL(POWER_DOMAIN_INIT)) 2711 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2712 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2713 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2714 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2715 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2716 BIT_ULL(POWER_DOMAIN_INIT)) 2717 2718 /* 2719 * ICL PW_0/PG_0 domains (HW/DMC control): 2720 * - PCI 2721 * - clocks except port PLL 2722 * - central power except FBC 2723 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers 2724 * ICL PW_1/PG_1 domains (HW/DMC control): 2725 * - DBUF function 2726 * - PIPE_A and its planes, except VGA 2727 * - transcoder EDP + PSR 2728 * - transcoder DSI 2729 * - DDI_A 2730 * - FBC 2731 */ 2732 #define ICL_PW_4_POWER_DOMAINS ( \ 2733 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2734 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2735 BIT_ULL(POWER_DOMAIN_INIT)) 2736 /* VDSC/joining */ 2737 #define ICL_PW_3_POWER_DOMAINS ( \ 2738 ICL_PW_4_POWER_DOMAINS | \ 2739 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2740 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2741 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2742 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2743 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2744 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2745 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2746 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2747 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2748 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2749 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2750 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2751 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2752 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2753 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2754 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ 2755 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2756 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2757 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2758 BIT_ULL(POWER_DOMAIN_VGA) | \ 2759 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2760 BIT_ULL(POWER_DOMAIN_INIT)) 2761 /* 2762 * - transcoder WD 2763 * - KVMR (HW control) 2764 */ 2765 #define ICL_PW_2_POWER_DOMAINS ( \ 2766 ICL_PW_3_POWER_DOMAINS | \ 2767 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2768 BIT_ULL(POWER_DOMAIN_INIT)) 2769 /* 2770 * - KVMR (HW control) 2771 */ 2772 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2773 ICL_PW_2_POWER_DOMAINS | \ 2774 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2775 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2776 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \ 2777 BIT_ULL(POWER_DOMAIN_INIT)) 2778 2779 #define ICL_DDI_IO_A_POWER_DOMAINS ( \ 2780 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2781 #define ICL_DDI_IO_B_POWER_DOMAINS ( \ 2782 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2783 #define ICL_DDI_IO_C_POWER_DOMAINS ( \ 2784 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2785 #define ICL_DDI_IO_D_POWER_DOMAINS ( \ 2786 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2787 #define ICL_DDI_IO_E_POWER_DOMAINS ( \ 2788 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2789 #define ICL_DDI_IO_F_POWER_DOMAINS ( \ 2790 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2791 2792 #define ICL_AUX_A_IO_POWER_DOMAINS ( \ 2793 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2794 BIT_ULL(POWER_DOMAIN_AUX_A)) 2795 #define ICL_AUX_B_IO_POWER_DOMAINS ( \ 2796 BIT_ULL(POWER_DOMAIN_AUX_B)) 2797 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ 2798 BIT_ULL(POWER_DOMAIN_AUX_C)) 2799 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ 2800 BIT_ULL(POWER_DOMAIN_AUX_D)) 2801 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ 2802 BIT_ULL(POWER_DOMAIN_AUX_E)) 2803 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ 2804 BIT_ULL(POWER_DOMAIN_AUX_F)) 2805 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ 2806 BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) 2807 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ 2808 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2809 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ 2810 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2811 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ 2812 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2813 2814 #define TGL_PW_5_POWER_DOMAINS ( \ 2815 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 2816 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 2817 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 2818 BIT_ULL(POWER_DOMAIN_INIT)) 2819 2820 #define TGL_PW_4_POWER_DOMAINS ( \ 2821 TGL_PW_5_POWER_DOMAINS | \ 2822 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2823 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2824 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2825 BIT_ULL(POWER_DOMAIN_INIT)) 2826 2827 #define TGL_PW_3_POWER_DOMAINS ( \ 2828 TGL_PW_4_POWER_DOMAINS | \ 2829 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2830 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2831 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2832 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2833 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2834 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2835 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \ 2836 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \ 2837 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \ 2838 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2839 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2840 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2841 BIT_ULL(POWER_DOMAIN_AUX_G) | \ 2842 BIT_ULL(POWER_DOMAIN_AUX_H) | \ 2843 BIT_ULL(POWER_DOMAIN_AUX_I) | \ 2844 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2845 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2846 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2847 BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \ 2848 BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \ 2849 BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \ 2850 BIT_ULL(POWER_DOMAIN_VGA) | \ 2851 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2852 BIT_ULL(POWER_DOMAIN_INIT)) 2853 2854 #define TGL_PW_2_POWER_DOMAINS ( \ 2855 TGL_PW_3_POWER_DOMAINS | \ 2856 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2857 BIT_ULL(POWER_DOMAIN_INIT)) 2858 2859 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2860 TGL_PW_3_POWER_DOMAINS | \ 2861 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2862 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2863 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2864 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2865 BIT_ULL(POWER_DOMAIN_INIT)) 2866 2867 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \ 2868 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2869 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \ 2870 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2871 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \ 2872 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2873 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \ 2874 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO)) 2875 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \ 2876 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO)) 2877 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \ 2878 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO)) 2879 2880 #define TGL_AUX_A_IO_POWER_DOMAINS ( \ 2881 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2882 BIT_ULL(POWER_DOMAIN_AUX_A)) 2883 #define TGL_AUX_B_IO_POWER_DOMAINS ( \ 2884 BIT_ULL(POWER_DOMAIN_AUX_B)) 2885 #define TGL_AUX_C_IO_POWER_DOMAINS ( \ 2886 BIT_ULL(POWER_DOMAIN_AUX_C)) 2887 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \ 2888 BIT_ULL(POWER_DOMAIN_AUX_D)) 2889 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \ 2890 BIT_ULL(POWER_DOMAIN_AUX_E)) 2891 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \ 2892 BIT_ULL(POWER_DOMAIN_AUX_F)) 2893 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \ 2894 BIT_ULL(POWER_DOMAIN_AUX_G)) 2895 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \ 2896 BIT_ULL(POWER_DOMAIN_AUX_H)) 2897 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \ 2898 BIT_ULL(POWER_DOMAIN_AUX_I)) 2899 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \ 2900 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2901 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \ 2902 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2903 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \ 2904 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2905 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \ 2906 BIT_ULL(POWER_DOMAIN_AUX_G_TBT)) 2907 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \ 2908 BIT_ULL(POWER_DOMAIN_AUX_H_TBT)) 2909 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \ 2910 BIT_ULL(POWER_DOMAIN_AUX_I_TBT)) 2911 2912 #define TGL_TC_COLD_OFF_POWER_DOMAINS ( \ 2913 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2914 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2915 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2916 BIT_ULL(POWER_DOMAIN_AUX_G) | \ 2917 BIT_ULL(POWER_DOMAIN_AUX_H) | \ 2918 BIT_ULL(POWER_DOMAIN_AUX_I) | \ 2919 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2920 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2921 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2922 BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \ 2923 BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \ 2924 BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \ 2925 BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) 2926 2927 #define RKL_PW_4_POWER_DOMAINS ( \ 2928 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2929 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2930 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2931 BIT_ULL(POWER_DOMAIN_INIT)) 2932 2933 #define RKL_PW_3_POWER_DOMAINS ( \ 2934 RKL_PW_4_POWER_DOMAINS | \ 2935 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2936 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2937 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2938 BIT_ULL(POWER_DOMAIN_VGA) | \ 2939 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2940 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2941 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2942 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2943 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2944 BIT_ULL(POWER_DOMAIN_INIT)) 2945 2946 /* 2947 * There is no PW_2/PG_2 on RKL. 2948 * 2949 * RKL PW_1/PG_1 domains (under HW/DMC control): 2950 * - DBUF function (note: registers are in PW0) 2951 * - PIPE_A and its planes and VDSC/joining, except VGA 2952 * - transcoder A 2953 * - DDI_A and DDI_B 2954 * - FBC 2955 * 2956 * RKL PW_0/PG_0 domains (under HW/DMC control): 2957 * - PCI 2958 * - clocks except port PLL 2959 * - shared functions: 2960 * * interrupts except pipe interrupts 2961 * * MBus except PIPE_MBUS_DBOX_CTL 2962 * * DBUF registers 2963 * - central power except FBC 2964 * - top-level GTC (DDI-level GTC is in the well associated with the DDI) 2965 */ 2966 2967 #define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2968 RKL_PW_3_POWER_DOMAINS | \ 2969 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2970 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2971 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2972 BIT_ULL(POWER_DOMAIN_INIT)) 2973 2974 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 2975 .sync_hw = i9xx_power_well_sync_hw_noop, 2976 .enable = i9xx_always_on_power_well_noop, 2977 .disable = i9xx_always_on_power_well_noop, 2978 .is_enabled = i9xx_always_on_power_well_enabled, 2979 }; 2980 2981 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 2982 .sync_hw = chv_pipe_power_well_sync_hw, 2983 .enable = chv_pipe_power_well_enable, 2984 .disable = chv_pipe_power_well_disable, 2985 .is_enabled = chv_pipe_power_well_enabled, 2986 }; 2987 2988 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 2989 .sync_hw = i9xx_power_well_sync_hw_noop, 2990 .enable = chv_dpio_cmn_power_well_enable, 2991 .disable = chv_dpio_cmn_power_well_disable, 2992 .is_enabled = vlv_power_well_enabled, 2993 }; 2994 2995 static const struct i915_power_well_desc i9xx_always_on_power_well[] = { 2996 { 2997 .name = "always-on", 2998 .always_on = true, 2999 .domains = POWER_DOMAIN_MASK, 3000 .ops = &i9xx_always_on_power_well_ops, 3001 .id = DISP_PW_ID_NONE, 3002 }, 3003 }; 3004 3005 static const struct i915_power_well_ops i830_pipes_power_well_ops = { 3006 .sync_hw = i830_pipes_power_well_sync_hw, 3007 .enable = i830_pipes_power_well_enable, 3008 .disable = i830_pipes_power_well_disable, 3009 .is_enabled = i830_pipes_power_well_enabled, 3010 }; 3011 3012 static const struct i915_power_well_desc i830_power_wells[] = { 3013 { 3014 .name = "always-on", 3015 .always_on = true, 3016 .domains = POWER_DOMAIN_MASK, 3017 .ops = &i9xx_always_on_power_well_ops, 3018 .id = DISP_PW_ID_NONE, 3019 }, 3020 { 3021 .name = "pipes", 3022 .domains = I830_PIPES_POWER_DOMAINS, 3023 .ops = &i830_pipes_power_well_ops, 3024 .id = DISP_PW_ID_NONE, 3025 }, 3026 }; 3027 3028 static const struct i915_power_well_ops hsw_power_well_ops = { 3029 .sync_hw = hsw_power_well_sync_hw, 3030 .enable = hsw_power_well_enable, 3031 .disable = hsw_power_well_disable, 3032 .is_enabled = hsw_power_well_enabled, 3033 }; 3034 3035 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 3036 .sync_hw = i9xx_power_well_sync_hw_noop, 3037 .enable = gen9_dc_off_power_well_enable, 3038 .disable = gen9_dc_off_power_well_disable, 3039 .is_enabled = gen9_dc_off_power_well_enabled, 3040 }; 3041 3042 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 3043 .sync_hw = i9xx_power_well_sync_hw_noop, 3044 .enable = bxt_dpio_cmn_power_well_enable, 3045 .disable = bxt_dpio_cmn_power_well_disable, 3046 .is_enabled = bxt_dpio_cmn_power_well_enabled, 3047 }; 3048 3049 static const struct i915_power_well_regs hsw_power_well_regs = { 3050 .bios = HSW_PWR_WELL_CTL1, 3051 .driver = HSW_PWR_WELL_CTL2, 3052 .kvmr = HSW_PWR_WELL_CTL3, 3053 .debug = HSW_PWR_WELL_CTL4, 3054 }; 3055 3056 static const struct i915_power_well_desc hsw_power_wells[] = { 3057 { 3058 .name = "always-on", 3059 .always_on = true, 3060 .domains = POWER_DOMAIN_MASK, 3061 .ops = &i9xx_always_on_power_well_ops, 3062 .id = DISP_PW_ID_NONE, 3063 }, 3064 { 3065 .name = "display", 3066 .domains = HSW_DISPLAY_POWER_DOMAINS, 3067 .ops = &hsw_power_well_ops, 3068 .id = HSW_DISP_PW_GLOBAL, 3069 { 3070 .hsw.regs = &hsw_power_well_regs, 3071 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3072 .hsw.has_vga = true, 3073 }, 3074 }, 3075 }; 3076 3077 static const struct i915_power_well_desc bdw_power_wells[] = { 3078 { 3079 .name = "always-on", 3080 .always_on = true, 3081 .domains = POWER_DOMAIN_MASK, 3082 .ops = &i9xx_always_on_power_well_ops, 3083 .id = DISP_PW_ID_NONE, 3084 }, 3085 { 3086 .name = "display", 3087 .domains = BDW_DISPLAY_POWER_DOMAINS, 3088 .ops = &hsw_power_well_ops, 3089 .id = HSW_DISP_PW_GLOBAL, 3090 { 3091 .hsw.regs = &hsw_power_well_regs, 3092 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3093 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3094 .hsw.has_vga = true, 3095 }, 3096 }, 3097 }; 3098 3099 static const struct i915_power_well_ops vlv_display_power_well_ops = { 3100 .sync_hw = i9xx_power_well_sync_hw_noop, 3101 .enable = vlv_display_power_well_enable, 3102 .disable = vlv_display_power_well_disable, 3103 .is_enabled = vlv_power_well_enabled, 3104 }; 3105 3106 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 3107 .sync_hw = i9xx_power_well_sync_hw_noop, 3108 .enable = vlv_dpio_cmn_power_well_enable, 3109 .disable = vlv_dpio_cmn_power_well_disable, 3110 .is_enabled = vlv_power_well_enabled, 3111 }; 3112 3113 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 3114 .sync_hw = i9xx_power_well_sync_hw_noop, 3115 .enable = vlv_power_well_enable, 3116 .disable = vlv_power_well_disable, 3117 .is_enabled = vlv_power_well_enabled, 3118 }; 3119 3120 static const struct i915_power_well_desc vlv_power_wells[] = { 3121 { 3122 .name = "always-on", 3123 .always_on = true, 3124 .domains = POWER_DOMAIN_MASK, 3125 .ops = &i9xx_always_on_power_well_ops, 3126 .id = DISP_PW_ID_NONE, 3127 }, 3128 { 3129 .name = "display", 3130 .domains = VLV_DISPLAY_POWER_DOMAINS, 3131 .ops = &vlv_display_power_well_ops, 3132 .id = VLV_DISP_PW_DISP2D, 3133 { 3134 .vlv.idx = PUNIT_PWGT_IDX_DISP2D, 3135 }, 3136 }, 3137 { 3138 .name = "dpio-tx-b-01", 3139 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3140 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3141 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3142 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3143 .ops = &vlv_dpio_power_well_ops, 3144 .id = DISP_PW_ID_NONE, 3145 { 3146 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, 3147 }, 3148 }, 3149 { 3150 .name = "dpio-tx-b-23", 3151 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3152 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3153 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3154 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3155 .ops = &vlv_dpio_power_well_ops, 3156 .id = DISP_PW_ID_NONE, 3157 { 3158 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, 3159 }, 3160 }, 3161 { 3162 .name = "dpio-tx-c-01", 3163 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3164 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3165 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3166 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3167 .ops = &vlv_dpio_power_well_ops, 3168 .id = DISP_PW_ID_NONE, 3169 { 3170 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, 3171 }, 3172 }, 3173 { 3174 .name = "dpio-tx-c-23", 3175 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3176 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3177 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3178 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3179 .ops = &vlv_dpio_power_well_ops, 3180 .id = DISP_PW_ID_NONE, 3181 { 3182 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, 3183 }, 3184 }, 3185 { 3186 .name = "dpio-common", 3187 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 3188 .ops = &vlv_dpio_cmn_power_well_ops, 3189 .id = VLV_DISP_PW_DPIO_CMN_BC, 3190 { 3191 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3192 }, 3193 }, 3194 }; 3195 3196 static const struct i915_power_well_desc chv_power_wells[] = { 3197 { 3198 .name = "always-on", 3199 .always_on = true, 3200 .domains = POWER_DOMAIN_MASK, 3201 .ops = &i9xx_always_on_power_well_ops, 3202 .id = DISP_PW_ID_NONE, 3203 }, 3204 { 3205 .name = "display", 3206 /* 3207 * Pipe A power well is the new disp2d well. Pipe B and C 3208 * power wells don't actually exist. Pipe A power well is 3209 * required for any pipe to work. 3210 */ 3211 .domains = CHV_DISPLAY_POWER_DOMAINS, 3212 .ops = &chv_pipe_power_well_ops, 3213 .id = DISP_PW_ID_NONE, 3214 }, 3215 { 3216 .name = "dpio-common-bc", 3217 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 3218 .ops = &chv_dpio_cmn_power_well_ops, 3219 .id = VLV_DISP_PW_DPIO_CMN_BC, 3220 { 3221 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3222 }, 3223 }, 3224 { 3225 .name = "dpio-common-d", 3226 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 3227 .ops = &chv_dpio_cmn_power_well_ops, 3228 .id = CHV_DISP_PW_DPIO_CMN_D, 3229 { 3230 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, 3231 }, 3232 }, 3233 }; 3234 3235 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 3236 enum i915_power_well_id power_well_id) 3237 { 3238 struct i915_power_well *power_well; 3239 bool ret; 3240 3241 power_well = lookup_power_well(dev_priv, power_well_id); 3242 ret = power_well->desc->ops->is_enabled(dev_priv, power_well); 3243 3244 return ret; 3245 } 3246 3247 static const struct i915_power_well_desc skl_power_wells[] = { 3248 { 3249 .name = "always-on", 3250 .always_on = true, 3251 .domains = POWER_DOMAIN_MASK, 3252 .ops = &i9xx_always_on_power_well_ops, 3253 .id = DISP_PW_ID_NONE, 3254 }, 3255 { 3256 .name = "power well 1", 3257 /* Handled by the DMC firmware */ 3258 .always_on = true, 3259 .domains = 0, 3260 .ops = &hsw_power_well_ops, 3261 .id = SKL_DISP_PW_1, 3262 { 3263 .hsw.regs = &hsw_power_well_regs, 3264 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3265 .hsw.has_fuses = true, 3266 }, 3267 }, 3268 { 3269 .name = "MISC IO power well", 3270 /* Handled by the DMC firmware */ 3271 .always_on = true, 3272 .domains = 0, 3273 .ops = &hsw_power_well_ops, 3274 .id = SKL_DISP_PW_MISC_IO, 3275 { 3276 .hsw.regs = &hsw_power_well_regs, 3277 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, 3278 }, 3279 }, 3280 { 3281 .name = "DC off", 3282 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 3283 .ops = &gen9_dc_off_power_well_ops, 3284 .id = SKL_DISP_DC_OFF, 3285 }, 3286 { 3287 .name = "power well 2", 3288 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3289 .ops = &hsw_power_well_ops, 3290 .id = SKL_DISP_PW_2, 3291 { 3292 .hsw.regs = &hsw_power_well_regs, 3293 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3294 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3295 .hsw.has_vga = true, 3296 .hsw.has_fuses = true, 3297 }, 3298 }, 3299 { 3300 .name = "DDI A/E IO power well", 3301 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, 3302 .ops = &hsw_power_well_ops, 3303 .id = DISP_PW_ID_NONE, 3304 { 3305 .hsw.regs = &hsw_power_well_regs, 3306 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, 3307 }, 3308 }, 3309 { 3310 .name = "DDI B IO power well", 3311 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3312 .ops = &hsw_power_well_ops, 3313 .id = DISP_PW_ID_NONE, 3314 { 3315 .hsw.regs = &hsw_power_well_regs, 3316 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3317 }, 3318 }, 3319 { 3320 .name = "DDI C IO power well", 3321 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3322 .ops = &hsw_power_well_ops, 3323 .id = DISP_PW_ID_NONE, 3324 { 3325 .hsw.regs = &hsw_power_well_regs, 3326 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3327 }, 3328 }, 3329 { 3330 .name = "DDI D IO power well", 3331 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, 3332 .ops = &hsw_power_well_ops, 3333 .id = DISP_PW_ID_NONE, 3334 { 3335 .hsw.regs = &hsw_power_well_regs, 3336 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3337 }, 3338 }, 3339 }; 3340 3341 static const struct i915_power_well_desc bxt_power_wells[] = { 3342 { 3343 .name = "always-on", 3344 .always_on = true, 3345 .domains = POWER_DOMAIN_MASK, 3346 .ops = &i9xx_always_on_power_well_ops, 3347 .id = DISP_PW_ID_NONE, 3348 }, 3349 { 3350 .name = "power well 1", 3351 /* Handled by the DMC firmware */ 3352 .always_on = true, 3353 .domains = 0, 3354 .ops = &hsw_power_well_ops, 3355 .id = SKL_DISP_PW_1, 3356 { 3357 .hsw.regs = &hsw_power_well_regs, 3358 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3359 .hsw.has_fuses = true, 3360 }, 3361 }, 3362 { 3363 .name = "DC off", 3364 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 3365 .ops = &gen9_dc_off_power_well_ops, 3366 .id = SKL_DISP_DC_OFF, 3367 }, 3368 { 3369 .name = "power well 2", 3370 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3371 .ops = &hsw_power_well_ops, 3372 .id = SKL_DISP_PW_2, 3373 { 3374 .hsw.regs = &hsw_power_well_regs, 3375 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3376 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3377 .hsw.has_vga = true, 3378 .hsw.has_fuses = true, 3379 }, 3380 }, 3381 { 3382 .name = "dpio-common-a", 3383 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 3384 .ops = &bxt_dpio_cmn_power_well_ops, 3385 .id = BXT_DISP_PW_DPIO_CMN_A, 3386 { 3387 .bxt.phy = DPIO_PHY1, 3388 }, 3389 }, 3390 { 3391 .name = "dpio-common-bc", 3392 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 3393 .ops = &bxt_dpio_cmn_power_well_ops, 3394 .id = VLV_DISP_PW_DPIO_CMN_BC, 3395 { 3396 .bxt.phy = DPIO_PHY0, 3397 }, 3398 }, 3399 }; 3400 3401 static const struct i915_power_well_desc glk_power_wells[] = { 3402 { 3403 .name = "always-on", 3404 .always_on = true, 3405 .domains = POWER_DOMAIN_MASK, 3406 .ops = &i9xx_always_on_power_well_ops, 3407 .id = DISP_PW_ID_NONE, 3408 }, 3409 { 3410 .name = "power well 1", 3411 /* Handled by the DMC firmware */ 3412 .always_on = true, 3413 .domains = 0, 3414 .ops = &hsw_power_well_ops, 3415 .id = SKL_DISP_PW_1, 3416 { 3417 .hsw.regs = &hsw_power_well_regs, 3418 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3419 .hsw.has_fuses = true, 3420 }, 3421 }, 3422 { 3423 .name = "DC off", 3424 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, 3425 .ops = &gen9_dc_off_power_well_ops, 3426 .id = SKL_DISP_DC_OFF, 3427 }, 3428 { 3429 .name = "power well 2", 3430 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3431 .ops = &hsw_power_well_ops, 3432 .id = SKL_DISP_PW_2, 3433 { 3434 .hsw.regs = &hsw_power_well_regs, 3435 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3436 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3437 .hsw.has_vga = true, 3438 .hsw.has_fuses = true, 3439 }, 3440 }, 3441 { 3442 .name = "dpio-common-a", 3443 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, 3444 .ops = &bxt_dpio_cmn_power_well_ops, 3445 .id = BXT_DISP_PW_DPIO_CMN_A, 3446 { 3447 .bxt.phy = DPIO_PHY1, 3448 }, 3449 }, 3450 { 3451 .name = "dpio-common-b", 3452 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, 3453 .ops = &bxt_dpio_cmn_power_well_ops, 3454 .id = VLV_DISP_PW_DPIO_CMN_BC, 3455 { 3456 .bxt.phy = DPIO_PHY0, 3457 }, 3458 }, 3459 { 3460 .name = "dpio-common-c", 3461 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, 3462 .ops = &bxt_dpio_cmn_power_well_ops, 3463 .id = GLK_DISP_PW_DPIO_CMN_C, 3464 { 3465 .bxt.phy = DPIO_PHY2, 3466 }, 3467 }, 3468 { 3469 .name = "AUX A", 3470 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, 3471 .ops = &hsw_power_well_ops, 3472 .id = DISP_PW_ID_NONE, 3473 { 3474 .hsw.regs = &hsw_power_well_regs, 3475 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3476 }, 3477 }, 3478 { 3479 .name = "AUX B", 3480 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, 3481 .ops = &hsw_power_well_ops, 3482 .id = DISP_PW_ID_NONE, 3483 { 3484 .hsw.regs = &hsw_power_well_regs, 3485 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3486 }, 3487 }, 3488 { 3489 .name = "AUX C", 3490 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, 3491 .ops = &hsw_power_well_ops, 3492 .id = DISP_PW_ID_NONE, 3493 { 3494 .hsw.regs = &hsw_power_well_regs, 3495 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3496 }, 3497 }, 3498 { 3499 .name = "DDI A IO power well", 3500 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, 3501 .ops = &hsw_power_well_ops, 3502 .id = DISP_PW_ID_NONE, 3503 { 3504 .hsw.regs = &hsw_power_well_regs, 3505 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3506 }, 3507 }, 3508 { 3509 .name = "DDI B IO power well", 3510 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3511 .ops = &hsw_power_well_ops, 3512 .id = DISP_PW_ID_NONE, 3513 { 3514 .hsw.regs = &hsw_power_well_regs, 3515 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3516 }, 3517 }, 3518 { 3519 .name = "DDI C IO power well", 3520 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3521 .ops = &hsw_power_well_ops, 3522 .id = DISP_PW_ID_NONE, 3523 { 3524 .hsw.regs = &hsw_power_well_regs, 3525 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3526 }, 3527 }, 3528 }; 3529 3530 static const struct i915_power_well_desc cnl_power_wells[] = { 3531 { 3532 .name = "always-on", 3533 .always_on = true, 3534 .domains = POWER_DOMAIN_MASK, 3535 .ops = &i9xx_always_on_power_well_ops, 3536 .id = DISP_PW_ID_NONE, 3537 }, 3538 { 3539 .name = "power well 1", 3540 /* Handled by the DMC firmware */ 3541 .always_on = true, 3542 .domains = 0, 3543 .ops = &hsw_power_well_ops, 3544 .id = SKL_DISP_PW_1, 3545 { 3546 .hsw.regs = &hsw_power_well_regs, 3547 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3548 .hsw.has_fuses = true, 3549 }, 3550 }, 3551 { 3552 .name = "AUX A", 3553 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS, 3554 .ops = &hsw_power_well_ops, 3555 .id = DISP_PW_ID_NONE, 3556 { 3557 .hsw.regs = &hsw_power_well_regs, 3558 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3559 }, 3560 }, 3561 { 3562 .name = "AUX B", 3563 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS, 3564 .ops = &hsw_power_well_ops, 3565 .id = DISP_PW_ID_NONE, 3566 { 3567 .hsw.regs = &hsw_power_well_regs, 3568 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3569 }, 3570 }, 3571 { 3572 .name = "AUX C", 3573 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS, 3574 .ops = &hsw_power_well_ops, 3575 .id = DISP_PW_ID_NONE, 3576 { 3577 .hsw.regs = &hsw_power_well_regs, 3578 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3579 }, 3580 }, 3581 { 3582 .name = "AUX D", 3583 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS, 3584 .ops = &hsw_power_well_ops, 3585 .id = DISP_PW_ID_NONE, 3586 { 3587 .hsw.regs = &hsw_power_well_regs, 3588 .hsw.idx = CNL_PW_CTL_IDX_AUX_D, 3589 }, 3590 }, 3591 { 3592 .name = "DC off", 3593 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, 3594 .ops = &gen9_dc_off_power_well_ops, 3595 .id = SKL_DISP_DC_OFF, 3596 }, 3597 { 3598 .name = "power well 2", 3599 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3600 .ops = &hsw_power_well_ops, 3601 .id = SKL_DISP_PW_2, 3602 { 3603 .hsw.regs = &hsw_power_well_regs, 3604 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3605 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3606 .hsw.has_vga = true, 3607 .hsw.has_fuses = true, 3608 }, 3609 }, 3610 { 3611 .name = "DDI A IO power well", 3612 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS, 3613 .ops = &hsw_power_well_ops, 3614 .id = DISP_PW_ID_NONE, 3615 { 3616 .hsw.regs = &hsw_power_well_regs, 3617 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3618 }, 3619 }, 3620 { 3621 .name = "DDI B IO power well", 3622 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS, 3623 .ops = &hsw_power_well_ops, 3624 .id = DISP_PW_ID_NONE, 3625 { 3626 .hsw.regs = &hsw_power_well_regs, 3627 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3628 }, 3629 }, 3630 { 3631 .name = "DDI C IO power well", 3632 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS, 3633 .ops = &hsw_power_well_ops, 3634 .id = DISP_PW_ID_NONE, 3635 { 3636 .hsw.regs = &hsw_power_well_regs, 3637 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3638 }, 3639 }, 3640 { 3641 .name = "DDI D IO power well", 3642 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS, 3643 .ops = &hsw_power_well_ops, 3644 .id = DISP_PW_ID_NONE, 3645 { 3646 .hsw.regs = &hsw_power_well_regs, 3647 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3648 }, 3649 }, 3650 { 3651 .name = "DDI F IO power well", 3652 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS, 3653 .ops = &hsw_power_well_ops, 3654 .id = CNL_DISP_PW_DDI_F_IO, 3655 { 3656 .hsw.regs = &hsw_power_well_regs, 3657 .hsw.idx = CNL_PW_CTL_IDX_DDI_F, 3658 }, 3659 }, 3660 { 3661 .name = "AUX F", 3662 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS, 3663 .ops = &hsw_power_well_ops, 3664 .id = CNL_DISP_PW_DDI_F_AUX, 3665 { 3666 .hsw.regs = &hsw_power_well_regs, 3667 .hsw.idx = CNL_PW_CTL_IDX_AUX_F, 3668 }, 3669 }, 3670 }; 3671 3672 static const struct i915_power_well_ops icl_aux_power_well_ops = { 3673 .sync_hw = hsw_power_well_sync_hw, 3674 .enable = icl_aux_power_well_enable, 3675 .disable = icl_aux_power_well_disable, 3676 .is_enabled = hsw_power_well_enabled, 3677 }; 3678 3679 static const struct i915_power_well_regs icl_aux_power_well_regs = { 3680 .bios = ICL_PWR_WELL_CTL_AUX1, 3681 .driver = ICL_PWR_WELL_CTL_AUX2, 3682 .debug = ICL_PWR_WELL_CTL_AUX4, 3683 }; 3684 3685 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 3686 .bios = ICL_PWR_WELL_CTL_DDI1, 3687 .driver = ICL_PWR_WELL_CTL_DDI2, 3688 .debug = ICL_PWR_WELL_CTL_DDI4, 3689 }; 3690 3691 static const struct i915_power_well_desc icl_power_wells[] = { 3692 { 3693 .name = "always-on", 3694 .always_on = true, 3695 .domains = POWER_DOMAIN_MASK, 3696 .ops = &i9xx_always_on_power_well_ops, 3697 .id = DISP_PW_ID_NONE, 3698 }, 3699 { 3700 .name = "power well 1", 3701 /* Handled by the DMC firmware */ 3702 .always_on = true, 3703 .domains = 0, 3704 .ops = &hsw_power_well_ops, 3705 .id = SKL_DISP_PW_1, 3706 { 3707 .hsw.regs = &hsw_power_well_regs, 3708 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3709 .hsw.has_fuses = true, 3710 }, 3711 }, 3712 { 3713 .name = "DC off", 3714 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, 3715 .ops = &gen9_dc_off_power_well_ops, 3716 .id = SKL_DISP_DC_OFF, 3717 }, 3718 { 3719 .name = "power well 2", 3720 .domains = ICL_PW_2_POWER_DOMAINS, 3721 .ops = &hsw_power_well_ops, 3722 .id = SKL_DISP_PW_2, 3723 { 3724 .hsw.regs = &hsw_power_well_regs, 3725 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3726 .hsw.has_fuses = true, 3727 }, 3728 }, 3729 { 3730 .name = "power well 3", 3731 .domains = ICL_PW_3_POWER_DOMAINS, 3732 .ops = &hsw_power_well_ops, 3733 .id = ICL_DISP_PW_3, 3734 { 3735 .hsw.regs = &hsw_power_well_regs, 3736 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3737 .hsw.irq_pipe_mask = BIT(PIPE_B), 3738 .hsw.has_vga = true, 3739 .hsw.has_fuses = true, 3740 }, 3741 }, 3742 { 3743 .name = "DDI A IO", 3744 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3745 .ops = &hsw_power_well_ops, 3746 .id = DISP_PW_ID_NONE, 3747 { 3748 .hsw.regs = &icl_ddi_power_well_regs, 3749 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3750 }, 3751 }, 3752 { 3753 .name = "DDI B IO", 3754 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3755 .ops = &hsw_power_well_ops, 3756 .id = DISP_PW_ID_NONE, 3757 { 3758 .hsw.regs = &icl_ddi_power_well_regs, 3759 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3760 }, 3761 }, 3762 { 3763 .name = "DDI C IO", 3764 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3765 .ops = &hsw_power_well_ops, 3766 .id = DISP_PW_ID_NONE, 3767 { 3768 .hsw.regs = &icl_ddi_power_well_regs, 3769 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3770 }, 3771 }, 3772 { 3773 .name = "DDI D IO", 3774 .domains = ICL_DDI_IO_D_POWER_DOMAINS, 3775 .ops = &hsw_power_well_ops, 3776 .id = DISP_PW_ID_NONE, 3777 { 3778 .hsw.regs = &icl_ddi_power_well_regs, 3779 .hsw.idx = ICL_PW_CTL_IDX_DDI_D, 3780 }, 3781 }, 3782 { 3783 .name = "DDI E IO", 3784 .domains = ICL_DDI_IO_E_POWER_DOMAINS, 3785 .ops = &hsw_power_well_ops, 3786 .id = DISP_PW_ID_NONE, 3787 { 3788 .hsw.regs = &icl_ddi_power_well_regs, 3789 .hsw.idx = ICL_PW_CTL_IDX_DDI_E, 3790 }, 3791 }, 3792 { 3793 .name = "DDI F IO", 3794 .domains = ICL_DDI_IO_F_POWER_DOMAINS, 3795 .ops = &hsw_power_well_ops, 3796 .id = DISP_PW_ID_NONE, 3797 { 3798 .hsw.regs = &icl_ddi_power_well_regs, 3799 .hsw.idx = ICL_PW_CTL_IDX_DDI_F, 3800 }, 3801 }, 3802 { 3803 .name = "AUX A", 3804 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 3805 .ops = &icl_aux_power_well_ops, 3806 .id = DISP_PW_ID_NONE, 3807 { 3808 .hsw.regs = &icl_aux_power_well_regs, 3809 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3810 }, 3811 }, 3812 { 3813 .name = "AUX B", 3814 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 3815 .ops = &icl_aux_power_well_ops, 3816 .id = DISP_PW_ID_NONE, 3817 { 3818 .hsw.regs = &icl_aux_power_well_regs, 3819 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3820 }, 3821 }, 3822 { 3823 .name = "AUX C TC1", 3824 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, 3825 .ops = &icl_aux_power_well_ops, 3826 .id = DISP_PW_ID_NONE, 3827 { 3828 .hsw.regs = &icl_aux_power_well_regs, 3829 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3830 .hsw.is_tc_tbt = false, 3831 }, 3832 }, 3833 { 3834 .name = "AUX D TC2", 3835 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, 3836 .ops = &icl_aux_power_well_ops, 3837 .id = DISP_PW_ID_NONE, 3838 { 3839 .hsw.regs = &icl_aux_power_well_regs, 3840 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 3841 .hsw.is_tc_tbt = false, 3842 }, 3843 }, 3844 { 3845 .name = "AUX E TC3", 3846 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, 3847 .ops = &icl_aux_power_well_ops, 3848 .id = DISP_PW_ID_NONE, 3849 { 3850 .hsw.regs = &icl_aux_power_well_regs, 3851 .hsw.idx = ICL_PW_CTL_IDX_AUX_E, 3852 .hsw.is_tc_tbt = false, 3853 }, 3854 }, 3855 { 3856 .name = "AUX F TC4", 3857 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, 3858 .ops = &icl_aux_power_well_ops, 3859 .id = DISP_PW_ID_NONE, 3860 { 3861 .hsw.regs = &icl_aux_power_well_regs, 3862 .hsw.idx = ICL_PW_CTL_IDX_AUX_F, 3863 .hsw.is_tc_tbt = false, 3864 }, 3865 }, 3866 { 3867 .name = "AUX C TBT1", 3868 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, 3869 .ops = &icl_aux_power_well_ops, 3870 .id = DISP_PW_ID_NONE, 3871 { 3872 .hsw.regs = &icl_aux_power_well_regs, 3873 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, 3874 .hsw.is_tc_tbt = true, 3875 }, 3876 }, 3877 { 3878 .name = "AUX D TBT2", 3879 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, 3880 .ops = &icl_aux_power_well_ops, 3881 .id = DISP_PW_ID_NONE, 3882 { 3883 .hsw.regs = &icl_aux_power_well_regs, 3884 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, 3885 .hsw.is_tc_tbt = true, 3886 }, 3887 }, 3888 { 3889 .name = "AUX E TBT3", 3890 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, 3891 .ops = &icl_aux_power_well_ops, 3892 .id = DISP_PW_ID_NONE, 3893 { 3894 .hsw.regs = &icl_aux_power_well_regs, 3895 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, 3896 .hsw.is_tc_tbt = true, 3897 }, 3898 }, 3899 { 3900 .name = "AUX F TBT4", 3901 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, 3902 .ops = &icl_aux_power_well_ops, 3903 .id = DISP_PW_ID_NONE, 3904 { 3905 .hsw.regs = &icl_aux_power_well_regs, 3906 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, 3907 .hsw.is_tc_tbt = true, 3908 }, 3909 }, 3910 { 3911 .name = "power well 4", 3912 .domains = ICL_PW_4_POWER_DOMAINS, 3913 .ops = &hsw_power_well_ops, 3914 .id = DISP_PW_ID_NONE, 3915 { 3916 .hsw.regs = &hsw_power_well_regs, 3917 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 3918 .hsw.has_fuses = true, 3919 .hsw.irq_pipe_mask = BIT(PIPE_C), 3920 }, 3921 }, 3922 }; 3923 3924 static void 3925 tgl_tc_cold_request(struct drm_i915_private *i915, bool block) 3926 { 3927 u8 tries = 0; 3928 int ret; 3929 3930 while (1) { 3931 u32 low_val; 3932 u32 high_val = 0; 3933 3934 if (block) 3935 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; 3936 else 3937 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; 3938 3939 /* 3940 * Spec states that we should timeout the request after 200us 3941 * but the function below will timeout after 500us 3942 */ 3943 ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, 3944 &high_val); 3945 if (ret == 0) { 3946 if (block && 3947 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) 3948 ret = -EIO; 3949 else 3950 break; 3951 } 3952 3953 if (++tries == 3) 3954 break; 3955 3956 msleep(1); 3957 } 3958 3959 if (ret) 3960 drm_err(&i915->drm, "TC cold %sblock failed\n", 3961 block ? "" : "un"); 3962 else 3963 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", 3964 block ? "" : "un"); 3965 } 3966 3967 static void 3968 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, 3969 struct i915_power_well *power_well) 3970 { 3971 tgl_tc_cold_request(i915, true); 3972 } 3973 3974 static void 3975 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, 3976 struct i915_power_well *power_well) 3977 { 3978 tgl_tc_cold_request(i915, false); 3979 } 3980 3981 static void 3982 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, 3983 struct i915_power_well *power_well) 3984 { 3985 if (power_well->count > 0) 3986 tgl_tc_cold_off_power_well_enable(i915, power_well); 3987 else 3988 tgl_tc_cold_off_power_well_disable(i915, power_well); 3989 } 3990 3991 static bool 3992 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, 3993 struct i915_power_well *power_well) 3994 { 3995 /* 3996 * Not the correctly implementation but there is no way to just read it 3997 * from PCODE, so returning count to avoid state mismatch errors 3998 */ 3999 return power_well->count; 4000 } 4001 4002 static const struct i915_power_well_ops tgl_tc_cold_off_ops = { 4003 .sync_hw = tgl_tc_cold_off_power_well_sync_hw, 4004 .enable = tgl_tc_cold_off_power_well_enable, 4005 .disable = tgl_tc_cold_off_power_well_disable, 4006 .is_enabled = tgl_tc_cold_off_power_well_is_enabled, 4007 }; 4008 4009 static const struct i915_power_well_desc tgl_power_wells[] = { 4010 { 4011 .name = "always-on", 4012 .always_on = true, 4013 .domains = POWER_DOMAIN_MASK, 4014 .ops = &i9xx_always_on_power_well_ops, 4015 .id = DISP_PW_ID_NONE, 4016 }, 4017 { 4018 .name = "power well 1", 4019 /* Handled by the DMC firmware */ 4020 .always_on = true, 4021 .domains = 0, 4022 .ops = &hsw_power_well_ops, 4023 .id = SKL_DISP_PW_1, 4024 { 4025 .hsw.regs = &hsw_power_well_regs, 4026 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4027 .hsw.has_fuses = true, 4028 }, 4029 }, 4030 { 4031 .name = "DC off", 4032 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, 4033 .ops = &gen9_dc_off_power_well_ops, 4034 .id = SKL_DISP_DC_OFF, 4035 }, 4036 { 4037 .name = "power well 2", 4038 .domains = TGL_PW_2_POWER_DOMAINS, 4039 .ops = &hsw_power_well_ops, 4040 .id = SKL_DISP_PW_2, 4041 { 4042 .hsw.regs = &hsw_power_well_regs, 4043 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4044 .hsw.has_fuses = true, 4045 }, 4046 }, 4047 { 4048 .name = "power well 3", 4049 .domains = TGL_PW_3_POWER_DOMAINS, 4050 .ops = &hsw_power_well_ops, 4051 .id = ICL_DISP_PW_3, 4052 { 4053 .hsw.regs = &hsw_power_well_regs, 4054 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4055 .hsw.irq_pipe_mask = BIT(PIPE_B), 4056 .hsw.has_vga = true, 4057 .hsw.has_fuses = true, 4058 }, 4059 }, 4060 { 4061 .name = "DDI A IO", 4062 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4063 .ops = &hsw_power_well_ops, 4064 .id = DISP_PW_ID_NONE, 4065 { 4066 .hsw.regs = &icl_ddi_power_well_regs, 4067 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4068 } 4069 }, 4070 { 4071 .name = "DDI B IO", 4072 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4073 .ops = &hsw_power_well_ops, 4074 .id = DISP_PW_ID_NONE, 4075 { 4076 .hsw.regs = &icl_ddi_power_well_regs, 4077 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4078 } 4079 }, 4080 { 4081 .name = "DDI C IO", 4082 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 4083 .ops = &hsw_power_well_ops, 4084 .id = DISP_PW_ID_NONE, 4085 { 4086 .hsw.regs = &icl_ddi_power_well_regs, 4087 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 4088 } 4089 }, 4090 { 4091 .name = "DDI D TC1 IO", 4092 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS, 4093 .ops = &hsw_power_well_ops, 4094 .id = DISP_PW_ID_NONE, 4095 { 4096 .hsw.regs = &icl_ddi_power_well_regs, 4097 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4098 }, 4099 }, 4100 { 4101 .name = "DDI E TC2 IO", 4102 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS, 4103 .ops = &hsw_power_well_ops, 4104 .id = DISP_PW_ID_NONE, 4105 { 4106 .hsw.regs = &icl_ddi_power_well_regs, 4107 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4108 }, 4109 }, 4110 { 4111 .name = "DDI F TC3 IO", 4112 .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS, 4113 .ops = &hsw_power_well_ops, 4114 .id = DISP_PW_ID_NONE, 4115 { 4116 .hsw.regs = &icl_ddi_power_well_regs, 4117 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 4118 }, 4119 }, 4120 { 4121 .name = "DDI G TC4 IO", 4122 .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS, 4123 .ops = &hsw_power_well_ops, 4124 .id = DISP_PW_ID_NONE, 4125 { 4126 .hsw.regs = &icl_ddi_power_well_regs, 4127 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 4128 }, 4129 }, 4130 { 4131 .name = "DDI H TC5 IO", 4132 .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS, 4133 .ops = &hsw_power_well_ops, 4134 .id = DISP_PW_ID_NONE, 4135 { 4136 .hsw.regs = &icl_ddi_power_well_regs, 4137 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, 4138 }, 4139 }, 4140 { 4141 .name = "DDI I TC6 IO", 4142 .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS, 4143 .ops = &hsw_power_well_ops, 4144 .id = DISP_PW_ID_NONE, 4145 { 4146 .hsw.regs = &icl_ddi_power_well_regs, 4147 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, 4148 }, 4149 }, 4150 { 4151 .name = "TC cold off", 4152 .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, 4153 .ops = &tgl_tc_cold_off_ops, 4154 .id = TGL_DISP_PW_TC_COLD_OFF, 4155 }, 4156 { 4157 .name = "AUX A", 4158 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 4159 .ops = &icl_aux_power_well_ops, 4160 .id = DISP_PW_ID_NONE, 4161 { 4162 .hsw.regs = &icl_aux_power_well_regs, 4163 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4164 }, 4165 }, 4166 { 4167 .name = "AUX B", 4168 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 4169 .ops = &icl_aux_power_well_ops, 4170 .id = DISP_PW_ID_NONE, 4171 { 4172 .hsw.regs = &icl_aux_power_well_regs, 4173 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4174 }, 4175 }, 4176 { 4177 .name = "AUX C", 4178 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 4179 .ops = &icl_aux_power_well_ops, 4180 .id = DISP_PW_ID_NONE, 4181 { 4182 .hsw.regs = &icl_aux_power_well_regs, 4183 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4184 }, 4185 }, 4186 { 4187 .name = "AUX D TC1", 4188 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS, 4189 .ops = &icl_aux_power_well_ops, 4190 .id = DISP_PW_ID_NONE, 4191 { 4192 .hsw.regs = &icl_aux_power_well_regs, 4193 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4194 .hsw.is_tc_tbt = false, 4195 }, 4196 }, 4197 { 4198 .name = "AUX E TC2", 4199 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS, 4200 .ops = &icl_aux_power_well_ops, 4201 .id = DISP_PW_ID_NONE, 4202 { 4203 .hsw.regs = &icl_aux_power_well_regs, 4204 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4205 .hsw.is_tc_tbt = false, 4206 }, 4207 }, 4208 { 4209 .name = "AUX F TC3", 4210 .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS, 4211 .ops = &icl_aux_power_well_ops, 4212 .id = DISP_PW_ID_NONE, 4213 { 4214 .hsw.regs = &icl_aux_power_well_regs, 4215 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 4216 .hsw.is_tc_tbt = false, 4217 }, 4218 }, 4219 { 4220 .name = "AUX G TC4", 4221 .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS, 4222 .ops = &icl_aux_power_well_ops, 4223 .id = DISP_PW_ID_NONE, 4224 { 4225 .hsw.regs = &icl_aux_power_well_regs, 4226 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 4227 .hsw.is_tc_tbt = false, 4228 }, 4229 }, 4230 { 4231 .name = "AUX H TC5", 4232 .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS, 4233 .ops = &icl_aux_power_well_ops, 4234 .id = DISP_PW_ID_NONE, 4235 { 4236 .hsw.regs = &icl_aux_power_well_regs, 4237 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, 4238 .hsw.is_tc_tbt = false, 4239 }, 4240 }, 4241 { 4242 .name = "AUX I TC6", 4243 .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS, 4244 .ops = &icl_aux_power_well_ops, 4245 .id = DISP_PW_ID_NONE, 4246 { 4247 .hsw.regs = &icl_aux_power_well_regs, 4248 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, 4249 .hsw.is_tc_tbt = false, 4250 }, 4251 }, 4252 { 4253 .name = "AUX D TBT1", 4254 .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS, 4255 .ops = &icl_aux_power_well_ops, 4256 .id = DISP_PW_ID_NONE, 4257 { 4258 .hsw.regs = &icl_aux_power_well_regs, 4259 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 4260 .hsw.is_tc_tbt = true, 4261 }, 4262 }, 4263 { 4264 .name = "AUX E TBT2", 4265 .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS, 4266 .ops = &icl_aux_power_well_ops, 4267 .id = DISP_PW_ID_NONE, 4268 { 4269 .hsw.regs = &icl_aux_power_well_regs, 4270 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 4271 .hsw.is_tc_tbt = true, 4272 }, 4273 }, 4274 { 4275 .name = "AUX F TBT3", 4276 .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS, 4277 .ops = &icl_aux_power_well_ops, 4278 .id = DISP_PW_ID_NONE, 4279 { 4280 .hsw.regs = &icl_aux_power_well_regs, 4281 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 4282 .hsw.is_tc_tbt = true, 4283 }, 4284 }, 4285 { 4286 .name = "AUX G TBT4", 4287 .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS, 4288 .ops = &icl_aux_power_well_ops, 4289 .id = DISP_PW_ID_NONE, 4290 { 4291 .hsw.regs = &icl_aux_power_well_regs, 4292 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 4293 .hsw.is_tc_tbt = true, 4294 }, 4295 }, 4296 { 4297 .name = "AUX H TBT5", 4298 .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS, 4299 .ops = &icl_aux_power_well_ops, 4300 .id = DISP_PW_ID_NONE, 4301 { 4302 .hsw.regs = &icl_aux_power_well_regs, 4303 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, 4304 .hsw.is_tc_tbt = true, 4305 }, 4306 }, 4307 { 4308 .name = "AUX I TBT6", 4309 .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS, 4310 .ops = &icl_aux_power_well_ops, 4311 .id = DISP_PW_ID_NONE, 4312 { 4313 .hsw.regs = &icl_aux_power_well_regs, 4314 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, 4315 .hsw.is_tc_tbt = true, 4316 }, 4317 }, 4318 { 4319 .name = "power well 4", 4320 .domains = TGL_PW_4_POWER_DOMAINS, 4321 .ops = &hsw_power_well_ops, 4322 .id = DISP_PW_ID_NONE, 4323 { 4324 .hsw.regs = &hsw_power_well_regs, 4325 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4326 .hsw.has_fuses = true, 4327 .hsw.irq_pipe_mask = BIT(PIPE_C), 4328 } 4329 }, 4330 { 4331 .name = "power well 5", 4332 .domains = TGL_PW_5_POWER_DOMAINS, 4333 .ops = &hsw_power_well_ops, 4334 .id = DISP_PW_ID_NONE, 4335 { 4336 .hsw.regs = &hsw_power_well_regs, 4337 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4338 .hsw.has_fuses = true, 4339 .hsw.irq_pipe_mask = BIT(PIPE_D), 4340 }, 4341 }, 4342 }; 4343 4344 static const struct i915_power_well_desc rkl_power_wells[] = { 4345 { 4346 .name = "always-on", 4347 .always_on = true, 4348 .domains = POWER_DOMAIN_MASK, 4349 .ops = &i9xx_always_on_power_well_ops, 4350 .id = DISP_PW_ID_NONE, 4351 }, 4352 { 4353 .name = "power well 1", 4354 /* Handled by the DMC firmware */ 4355 .always_on = true, 4356 .domains = 0, 4357 .ops = &hsw_power_well_ops, 4358 .id = SKL_DISP_PW_1, 4359 { 4360 .hsw.regs = &hsw_power_well_regs, 4361 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4362 .hsw.has_fuses = true, 4363 }, 4364 }, 4365 { 4366 .name = "DC off", 4367 .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS, 4368 .ops = &gen9_dc_off_power_well_ops, 4369 .id = SKL_DISP_DC_OFF, 4370 }, 4371 { 4372 .name = "power well 3", 4373 .domains = RKL_PW_3_POWER_DOMAINS, 4374 .ops = &hsw_power_well_ops, 4375 .id = ICL_DISP_PW_3, 4376 { 4377 .hsw.regs = &hsw_power_well_regs, 4378 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4379 .hsw.irq_pipe_mask = BIT(PIPE_B), 4380 .hsw.has_vga = true, 4381 .hsw.has_fuses = true, 4382 }, 4383 }, 4384 { 4385 .name = "power well 4", 4386 .domains = RKL_PW_4_POWER_DOMAINS, 4387 .ops = &hsw_power_well_ops, 4388 .id = DISP_PW_ID_NONE, 4389 { 4390 .hsw.regs = &hsw_power_well_regs, 4391 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4392 .hsw.has_fuses = true, 4393 .hsw.irq_pipe_mask = BIT(PIPE_C), 4394 } 4395 }, 4396 { 4397 .name = "DDI A IO", 4398 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4399 .ops = &hsw_power_well_ops, 4400 .id = DISP_PW_ID_NONE, 4401 { 4402 .hsw.regs = &icl_ddi_power_well_regs, 4403 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4404 } 4405 }, 4406 { 4407 .name = "DDI B IO", 4408 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4409 .ops = &hsw_power_well_ops, 4410 .id = DISP_PW_ID_NONE, 4411 { 4412 .hsw.regs = &icl_ddi_power_well_regs, 4413 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4414 } 4415 }, 4416 { 4417 .name = "DDI D TC1 IO", 4418 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS, 4419 .ops = &hsw_power_well_ops, 4420 .id = DISP_PW_ID_NONE, 4421 { 4422 .hsw.regs = &icl_ddi_power_well_regs, 4423 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4424 }, 4425 }, 4426 { 4427 .name = "DDI E TC2 IO", 4428 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS, 4429 .ops = &hsw_power_well_ops, 4430 .id = DISP_PW_ID_NONE, 4431 { 4432 .hsw.regs = &icl_ddi_power_well_regs, 4433 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4434 }, 4435 }, 4436 { 4437 .name = "AUX A", 4438 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 4439 .ops = &icl_aux_power_well_ops, 4440 .id = DISP_PW_ID_NONE, 4441 { 4442 .hsw.regs = &icl_aux_power_well_regs, 4443 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4444 }, 4445 }, 4446 { 4447 .name = "AUX B", 4448 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 4449 .ops = &icl_aux_power_well_ops, 4450 .id = DISP_PW_ID_NONE, 4451 { 4452 .hsw.regs = &icl_aux_power_well_regs, 4453 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4454 }, 4455 }, 4456 { 4457 .name = "AUX D TC1", 4458 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS, 4459 .ops = &icl_aux_power_well_ops, 4460 .id = DISP_PW_ID_NONE, 4461 { 4462 .hsw.regs = &icl_aux_power_well_regs, 4463 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4464 }, 4465 }, 4466 { 4467 .name = "AUX E TC2", 4468 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS, 4469 .ops = &icl_aux_power_well_ops, 4470 .id = DISP_PW_ID_NONE, 4471 { 4472 .hsw.regs = &icl_aux_power_well_regs, 4473 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4474 }, 4475 }, 4476 }; 4477 4478 static int 4479 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 4480 int disable_power_well) 4481 { 4482 if (disable_power_well >= 0) 4483 return !!disable_power_well; 4484 4485 return 1; 4486 } 4487 4488 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 4489 int enable_dc) 4490 { 4491 u32 mask; 4492 int requested_dc; 4493 int max_dc; 4494 4495 if (IS_DG1(dev_priv)) 4496 max_dc = 3; 4497 else if (INTEL_GEN(dev_priv) >= 12) 4498 max_dc = 4; 4499 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEN9_BC(dev_priv)) 4500 max_dc = 2; 4501 else if (IS_GEN9_LP(dev_priv)) 4502 max_dc = 1; 4503 else 4504 max_dc = 0; 4505 4506 /* 4507 * DC9 has a separate HW flow from the rest of the DC states, 4508 * not depending on the DMC firmware. It's needed by system 4509 * suspend/resume, so allow it unconditionally. 4510 */ 4511 mask = IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 11 ? 4512 DC_STATE_EN_DC9 : 0; 4513 4514 if (!dev_priv->params.disable_power_well) 4515 max_dc = 0; 4516 4517 if (enable_dc >= 0 && enable_dc <= max_dc) { 4518 requested_dc = enable_dc; 4519 } else if (enable_dc == -1) { 4520 requested_dc = max_dc; 4521 } else if (enable_dc > max_dc && enable_dc <= 4) { 4522 drm_dbg_kms(&dev_priv->drm, 4523 "Adjusting requested max DC state (%d->%d)\n", 4524 enable_dc, max_dc); 4525 requested_dc = max_dc; 4526 } else { 4527 drm_err(&dev_priv->drm, 4528 "Unexpected value for enable_dc (%d)\n", enable_dc); 4529 requested_dc = max_dc; 4530 } 4531 4532 switch (requested_dc) { 4533 case 4: 4534 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 4535 break; 4536 case 3: 4537 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 4538 break; 4539 case 2: 4540 mask |= DC_STATE_EN_UPTO_DC6; 4541 break; 4542 case 1: 4543 mask |= DC_STATE_EN_UPTO_DC5; 4544 break; 4545 } 4546 4547 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); 4548 4549 return mask; 4550 } 4551 4552 static int 4553 __set_power_wells(struct i915_power_domains *power_domains, 4554 const struct i915_power_well_desc *power_well_descs, 4555 int power_well_descs_sz, u64 skip_mask) 4556 { 4557 struct drm_i915_private *i915 = container_of(power_domains, 4558 struct drm_i915_private, 4559 power_domains); 4560 u64 power_well_ids = 0; 4561 int power_well_count = 0; 4562 int i, plt_idx = 0; 4563 4564 for (i = 0; i < power_well_descs_sz; i++) 4565 if (!(BIT_ULL(power_well_descs[i].id) & skip_mask)) 4566 power_well_count++; 4567 4568 power_domains->power_well_count = power_well_count; 4569 power_domains->power_wells = 4570 kcalloc(power_well_count, 4571 sizeof(*power_domains->power_wells), 4572 GFP_KERNEL); 4573 if (!power_domains->power_wells) 4574 return -ENOMEM; 4575 4576 for (i = 0; i < power_well_descs_sz; i++) { 4577 enum i915_power_well_id id = power_well_descs[i].id; 4578 4579 if (BIT_ULL(id) & skip_mask) 4580 continue; 4581 4582 power_domains->power_wells[plt_idx++].desc = 4583 &power_well_descs[i]; 4584 4585 if (id == DISP_PW_ID_NONE) 4586 continue; 4587 4588 drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8); 4589 drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id)); 4590 power_well_ids |= BIT_ULL(id); 4591 } 4592 4593 return 0; 4594 } 4595 4596 #define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \ 4597 __set_power_wells(power_domains, __power_well_descs, \ 4598 ARRAY_SIZE(__power_well_descs), skip_mask) 4599 4600 #define set_power_wells(power_domains, __power_well_descs) \ 4601 set_power_wells_mask(power_domains, __power_well_descs, 0) 4602 4603 /** 4604 * intel_power_domains_init - initializes the power domain structures 4605 * @dev_priv: i915 device instance 4606 * 4607 * Initializes the power domain structures for @dev_priv depending upon the 4608 * supported platform. 4609 */ 4610 int intel_power_domains_init(struct drm_i915_private *dev_priv) 4611 { 4612 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4613 int err; 4614 4615 dev_priv->params.disable_power_well = 4616 sanitize_disable_power_well_option(dev_priv, 4617 dev_priv->params.disable_power_well); 4618 dev_priv->csr.allowed_dc_mask = 4619 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); 4620 4621 dev_priv->csr.target_dc_state = 4622 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 4623 4624 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); 4625 4626 mutex_init(&power_domains->lock); 4627 4628 INIT_DELAYED_WORK(&power_domains->async_put_work, 4629 intel_display_power_put_async_work); 4630 4631 /* 4632 * The enabling order will be from lower to higher indexed wells, 4633 * the disabling order is reversed. 4634 */ 4635 if (IS_DG1(dev_priv)) { 4636 err = set_power_wells_mask(power_domains, tgl_power_wells, 4637 BIT_ULL(TGL_DISP_PW_TC_COLD_OFF)); 4638 } else if (IS_ROCKETLAKE(dev_priv)) { 4639 err = set_power_wells(power_domains, rkl_power_wells); 4640 } else if (IS_GEN(dev_priv, 12)) { 4641 err = set_power_wells(power_domains, tgl_power_wells); 4642 } else if (IS_GEN(dev_priv, 11)) { 4643 err = set_power_wells(power_domains, icl_power_wells); 4644 } else if (IS_CNL_WITH_PORT_F(dev_priv)) { 4645 err = set_power_wells(power_domains, cnl_power_wells); 4646 } else if (IS_CANNONLAKE(dev_priv)) { 4647 err = set_power_wells_mask(power_domains, cnl_power_wells, 4648 BIT_ULL(CNL_DISP_PW_DDI_F_IO) | 4649 BIT_ULL(CNL_DISP_PW_DDI_F_AUX)); 4650 } else if (IS_GEMINILAKE(dev_priv)) { 4651 err = set_power_wells(power_domains, glk_power_wells); 4652 } else if (IS_BROXTON(dev_priv)) { 4653 err = set_power_wells(power_domains, bxt_power_wells); 4654 } else if (IS_GEN9_BC(dev_priv)) { 4655 err = set_power_wells(power_domains, skl_power_wells); 4656 } else if (IS_CHERRYVIEW(dev_priv)) { 4657 err = set_power_wells(power_domains, chv_power_wells); 4658 } else if (IS_BROADWELL(dev_priv)) { 4659 err = set_power_wells(power_domains, bdw_power_wells); 4660 } else if (IS_HASWELL(dev_priv)) { 4661 err = set_power_wells(power_domains, hsw_power_wells); 4662 } else if (IS_VALLEYVIEW(dev_priv)) { 4663 err = set_power_wells(power_domains, vlv_power_wells); 4664 } else if (IS_I830(dev_priv)) { 4665 err = set_power_wells(power_domains, i830_power_wells); 4666 } else { 4667 err = set_power_wells(power_domains, i9xx_always_on_power_well); 4668 } 4669 4670 return err; 4671 } 4672 4673 /** 4674 * intel_power_domains_cleanup - clean up power domains resources 4675 * @dev_priv: i915 device instance 4676 * 4677 * Release any resources acquired by intel_power_domains_init() 4678 */ 4679 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 4680 { 4681 kfree(dev_priv->power_domains.power_wells); 4682 } 4683 4684 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 4685 { 4686 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4687 struct i915_power_well *power_well; 4688 4689 mutex_lock(&power_domains->lock); 4690 for_each_power_well(dev_priv, power_well) { 4691 power_well->desc->ops->sync_hw(dev_priv, power_well); 4692 power_well->hw_enabled = 4693 power_well->desc->ops->is_enabled(dev_priv, power_well); 4694 } 4695 mutex_unlock(&power_domains->lock); 4696 } 4697 4698 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, 4699 enum dbuf_slice slice, bool enable) 4700 { 4701 i915_reg_t reg = DBUF_CTL_S(slice); 4702 bool state; 4703 u32 val; 4704 4705 val = intel_de_read(dev_priv, reg); 4706 if (enable) 4707 val |= DBUF_POWER_REQUEST; 4708 else 4709 val &= ~DBUF_POWER_REQUEST; 4710 intel_de_write(dev_priv, reg, val); 4711 intel_de_posting_read(dev_priv, reg); 4712 udelay(10); 4713 4714 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; 4715 drm_WARN(&dev_priv->drm, enable != state, 4716 "DBuf slice %d power %s timeout!\n", 4717 slice, enable ? "enable" : "disable"); 4718 } 4719 4720 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, 4721 u8 req_slices) 4722 { 4723 int num_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices; 4724 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4725 enum dbuf_slice slice; 4726 4727 drm_WARN(&dev_priv->drm, req_slices & ~(BIT(num_slices) - 1), 4728 "Invalid set of dbuf slices (0x%x) requested (num dbuf slices %d)\n", 4729 req_slices, num_slices); 4730 4731 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", 4732 req_slices); 4733 4734 /* 4735 * Might be running this in parallel to gen9_dc_off_power_well_enable 4736 * being called from intel_dp_detect for instance, 4737 * which causes assertion triggered by race condition, 4738 * as gen9_assert_dbuf_enabled might preempt this when registers 4739 * were already updated, while dev_priv was not. 4740 */ 4741 mutex_lock(&power_domains->lock); 4742 4743 for (slice = DBUF_S1; slice < num_slices; slice++) 4744 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); 4745 4746 dev_priv->dbuf.enabled_slices = req_slices; 4747 4748 mutex_unlock(&power_domains->lock); 4749 } 4750 4751 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 4752 { 4753 dev_priv->dbuf.enabled_slices = 4754 intel_enabled_dbuf_slices_mask(dev_priv); 4755 4756 /* 4757 * Just power up at least 1 slice, we will 4758 * figure out later which slices we have and what we need. 4759 */ 4760 gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | 4761 dev_priv->dbuf.enabled_slices); 4762 } 4763 4764 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 4765 { 4766 gen9_dbuf_slices_update(dev_priv, 0); 4767 } 4768 4769 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) 4770 { 4771 const int num_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices; 4772 enum dbuf_slice slice; 4773 4774 for (slice = DBUF_S1; slice < (DBUF_S1 + num_slices); slice++) 4775 intel_de_rmw(dev_priv, DBUF_CTL_S(slice), 4776 DBUF_TRACKER_STATE_SERVICE_MASK, 4777 DBUF_TRACKER_STATE_SERVICE(8)); 4778 } 4779 4780 static void icl_mbus_init(struct drm_i915_private *dev_priv) 4781 { 4782 unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask; 4783 u32 mask, val, i; 4784 4785 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | 4786 MBUS_ABOX_BT_CREDIT_POOL2_MASK | 4787 MBUS_ABOX_B_CREDIT_MASK | 4788 MBUS_ABOX_BW_CREDIT_MASK; 4789 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 4790 MBUS_ABOX_BT_CREDIT_POOL2(16) | 4791 MBUS_ABOX_B_CREDIT(1) | 4792 MBUS_ABOX_BW_CREDIT(1); 4793 4794 /* 4795 * gen12 platforms that use abox1 and abox2 for pixel data reads still 4796 * expect us to program the abox_ctl0 register as well, even though 4797 * we don't have to program other instance-0 registers like BW_BUDDY. 4798 */ 4799 if (IS_GEN(dev_priv, 12)) 4800 abox_regs |= BIT(0); 4801 4802 for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) 4803 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val); 4804 } 4805 4806 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 4807 { 4808 u32 val = intel_de_read(dev_priv, LCPLL_CTL); 4809 4810 /* 4811 * The LCPLL register should be turned on by the BIOS. For now 4812 * let's just check its state and print errors in case 4813 * something is wrong. Don't even try to turn it on. 4814 */ 4815 4816 if (val & LCPLL_CD_SOURCE_FCLK) 4817 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); 4818 4819 if (val & LCPLL_PLL_DISABLE) 4820 drm_err(&dev_priv->drm, "LCPLL is disabled\n"); 4821 4822 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 4823 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); 4824 } 4825 4826 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 4827 { 4828 struct drm_device *dev = &dev_priv->drm; 4829 struct intel_crtc *crtc; 4830 4831 for_each_intel_crtc(dev, crtc) 4832 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 4833 pipe_name(crtc->pipe)); 4834 4835 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), 4836 "Display power well on\n"); 4837 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, 4838 "SPLL enabled\n"); 4839 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 4840 "WRPLL1 enabled\n"); 4841 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 4842 "WRPLL2 enabled\n"); 4843 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, 4844 "Panel power on\n"); 4845 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 4846 "CPU PWM1 enabled\n"); 4847 if (IS_HASWELL(dev_priv)) 4848 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 4849 "CPU PWM2 enabled\n"); 4850 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 4851 "PCH PWM1 enabled\n"); 4852 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 4853 "Utility pin enabled\n"); 4854 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, 4855 "PCH GTC enabled\n"); 4856 4857 /* 4858 * In theory we can still leave IRQs enabled, as long as only the HPD 4859 * interrupts remain enabled. We used to check for that, but since it's 4860 * gen-specific and since we only disable LCPLL after we fully disable 4861 * the interrupts, the check below should be enough. 4862 */ 4863 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 4864 } 4865 4866 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 4867 { 4868 if (IS_HASWELL(dev_priv)) 4869 return intel_de_read(dev_priv, D_COMP_HSW); 4870 else 4871 return intel_de_read(dev_priv, D_COMP_BDW); 4872 } 4873 4874 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 4875 { 4876 if (IS_HASWELL(dev_priv)) { 4877 if (sandybridge_pcode_write(dev_priv, 4878 GEN6_PCODE_WRITE_D_COMP, val)) 4879 drm_dbg_kms(&dev_priv->drm, 4880 "Failed to write to D_COMP\n"); 4881 } else { 4882 intel_de_write(dev_priv, D_COMP_BDW, val); 4883 intel_de_posting_read(dev_priv, D_COMP_BDW); 4884 } 4885 } 4886 4887 /* 4888 * This function implements pieces of two sequences from BSpec: 4889 * - Sequence for display software to disable LCPLL 4890 * - Sequence for display software to allow package C8+ 4891 * The steps implemented here are just the steps that actually touch the LCPLL 4892 * register. Callers should take care of disabling all the display engine 4893 * functions, doing the mode unset, fixing interrupts, etc. 4894 */ 4895 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 4896 bool switch_to_fclk, bool allow_power_down) 4897 { 4898 u32 val; 4899 4900 assert_can_disable_lcpll(dev_priv); 4901 4902 val = intel_de_read(dev_priv, LCPLL_CTL); 4903 4904 if (switch_to_fclk) { 4905 val |= LCPLL_CD_SOURCE_FCLK; 4906 intel_de_write(dev_priv, LCPLL_CTL, val); 4907 4908 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & 4909 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 4910 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); 4911 4912 val = intel_de_read(dev_priv, LCPLL_CTL); 4913 } 4914 4915 val |= LCPLL_PLL_DISABLE; 4916 intel_de_write(dev_priv, LCPLL_CTL, val); 4917 intel_de_posting_read(dev_priv, LCPLL_CTL); 4918 4919 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 4920 drm_err(&dev_priv->drm, "LCPLL still locked\n"); 4921 4922 val = hsw_read_dcomp(dev_priv); 4923 val |= D_COMP_COMP_DISABLE; 4924 hsw_write_dcomp(dev_priv, val); 4925 ndelay(100); 4926 4927 if (wait_for((hsw_read_dcomp(dev_priv) & 4928 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 4929 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); 4930 4931 if (allow_power_down) { 4932 val = intel_de_read(dev_priv, LCPLL_CTL); 4933 val |= LCPLL_POWER_DOWN_ALLOW; 4934 intel_de_write(dev_priv, LCPLL_CTL, val); 4935 intel_de_posting_read(dev_priv, LCPLL_CTL); 4936 } 4937 } 4938 4939 /* 4940 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 4941 * source. 4942 */ 4943 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 4944 { 4945 u32 val; 4946 4947 val = intel_de_read(dev_priv, LCPLL_CTL); 4948 4949 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 4950 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 4951 return; 4952 4953 /* 4954 * Make sure we're not on PC8 state before disabling PC8, otherwise 4955 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 4956 */ 4957 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 4958 4959 if (val & LCPLL_POWER_DOWN_ALLOW) { 4960 val &= ~LCPLL_POWER_DOWN_ALLOW; 4961 intel_de_write(dev_priv, LCPLL_CTL, val); 4962 intel_de_posting_read(dev_priv, LCPLL_CTL); 4963 } 4964 4965 val = hsw_read_dcomp(dev_priv); 4966 val |= D_COMP_COMP_FORCE; 4967 val &= ~D_COMP_COMP_DISABLE; 4968 hsw_write_dcomp(dev_priv, val); 4969 4970 val = intel_de_read(dev_priv, LCPLL_CTL); 4971 val &= ~LCPLL_PLL_DISABLE; 4972 intel_de_write(dev_priv, LCPLL_CTL, val); 4973 4974 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 4975 drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); 4976 4977 if (val & LCPLL_CD_SOURCE_FCLK) { 4978 val = intel_de_read(dev_priv, LCPLL_CTL); 4979 val &= ~LCPLL_CD_SOURCE_FCLK; 4980 intel_de_write(dev_priv, LCPLL_CTL, val); 4981 4982 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & 4983 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 4984 drm_err(&dev_priv->drm, 4985 "Switching back to LCPLL failed\n"); 4986 } 4987 4988 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 4989 4990 intel_update_cdclk(dev_priv); 4991 intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); 4992 } 4993 4994 /* 4995 * Package states C8 and deeper are really deep PC states that can only be 4996 * reached when all the devices on the system allow it, so even if the graphics 4997 * device allows PC8+, it doesn't mean the system will actually get to these 4998 * states. Our driver only allows PC8+ when going into runtime PM. 4999 * 5000 * The requirements for PC8+ are that all the outputs are disabled, the power 5001 * well is disabled and most interrupts are disabled, and these are also 5002 * requirements for runtime PM. When these conditions are met, we manually do 5003 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 5004 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 5005 * hang the machine. 5006 * 5007 * When we really reach PC8 or deeper states (not just when we allow it) we lose 5008 * the state of some registers, so when we come back from PC8+ we need to 5009 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 5010 * need to take care of the registers kept by RC6. Notice that this happens even 5011 * if we don't put the device in PCI D3 state (which is what currently happens 5012 * because of the runtime PM support). 5013 * 5014 * For more, read "Display Sequences for Package C8" on the hardware 5015 * documentation. 5016 */ 5017 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 5018 { 5019 u32 val; 5020 5021 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); 5022 5023 if (HAS_PCH_LPT_LP(dev_priv)) { 5024 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5025 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 5026 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5027 } 5028 5029 lpt_disable_clkout_dp(dev_priv); 5030 hsw_disable_lcpll(dev_priv, true, true); 5031 } 5032 5033 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 5034 { 5035 u32 val; 5036 5037 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); 5038 5039 hsw_restore_lcpll(dev_priv); 5040 intel_init_pch_refclk(dev_priv); 5041 5042 if (HAS_PCH_LPT_LP(dev_priv)) { 5043 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5044 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 5045 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5046 } 5047 } 5048 5049 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 5050 bool enable) 5051 { 5052 i915_reg_t reg; 5053 u32 reset_bits, val; 5054 5055 if (IS_IVYBRIDGE(dev_priv)) { 5056 reg = GEN7_MSG_CTL; 5057 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 5058 } else { 5059 reg = HSW_NDE_RSTWRN_OPT; 5060 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 5061 } 5062 5063 val = intel_de_read(dev_priv, reg); 5064 5065 if (enable) 5066 val |= reset_bits; 5067 else 5068 val &= ~reset_bits; 5069 5070 intel_de_write(dev_priv, reg, val); 5071 } 5072 5073 static void skl_display_core_init(struct drm_i915_private *dev_priv, 5074 bool resume) 5075 { 5076 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5077 struct i915_power_well *well; 5078 5079 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5080 5081 /* enable PCH reset handshake */ 5082 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5083 5084 /* enable PG1 and Misc I/O */ 5085 mutex_lock(&power_domains->lock); 5086 5087 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5088 intel_power_well_enable(dev_priv, well); 5089 5090 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 5091 intel_power_well_enable(dev_priv, well); 5092 5093 mutex_unlock(&power_domains->lock); 5094 5095 intel_cdclk_init_hw(dev_priv); 5096 5097 gen9_dbuf_enable(dev_priv); 5098 5099 if (resume && dev_priv->csr.dmc_payload) 5100 intel_csr_load_program(dev_priv); 5101 } 5102 5103 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 5104 { 5105 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5106 struct i915_power_well *well; 5107 5108 gen9_disable_dc_states(dev_priv); 5109 5110 gen9_dbuf_disable(dev_priv); 5111 5112 intel_cdclk_uninit_hw(dev_priv); 5113 5114 /* The spec doesn't call for removing the reset handshake flag */ 5115 /* disable PG1 and Misc I/O */ 5116 5117 mutex_lock(&power_domains->lock); 5118 5119 /* 5120 * BSpec says to keep the MISC IO power well enabled here, only 5121 * remove our request for power well 1. 5122 * Note that even though the driver's request is removed power well 1 5123 * may stay enabled after this due to DMC's own request on it. 5124 */ 5125 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5126 intel_power_well_disable(dev_priv, well); 5127 5128 mutex_unlock(&power_domains->lock); 5129 5130 usleep_range(10, 30); /* 10 us delay per Bspec */ 5131 } 5132 5133 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 5134 { 5135 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5136 struct i915_power_well *well; 5137 5138 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5139 5140 /* 5141 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 5142 * or else the reset will hang because there is no PCH to respond. 5143 * Move the handshake programming to initialization sequence. 5144 * Previously was left up to BIOS. 5145 */ 5146 intel_pch_reset_handshake(dev_priv, false); 5147 5148 /* Enable PG1 */ 5149 mutex_lock(&power_domains->lock); 5150 5151 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5152 intel_power_well_enable(dev_priv, well); 5153 5154 mutex_unlock(&power_domains->lock); 5155 5156 intel_cdclk_init_hw(dev_priv); 5157 5158 gen9_dbuf_enable(dev_priv); 5159 5160 if (resume && dev_priv->csr.dmc_payload) 5161 intel_csr_load_program(dev_priv); 5162 } 5163 5164 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 5165 { 5166 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5167 struct i915_power_well *well; 5168 5169 gen9_disable_dc_states(dev_priv); 5170 5171 gen9_dbuf_disable(dev_priv); 5172 5173 intel_cdclk_uninit_hw(dev_priv); 5174 5175 /* The spec doesn't call for removing the reset handshake flag */ 5176 5177 /* 5178 * Disable PW1 (PG1). 5179 * Note that even though the driver's request is removed power well 1 5180 * may stay enabled after this due to DMC's own request on it. 5181 */ 5182 mutex_lock(&power_domains->lock); 5183 5184 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5185 intel_power_well_disable(dev_priv, well); 5186 5187 mutex_unlock(&power_domains->lock); 5188 5189 usleep_range(10, 30); /* 10 us delay per Bspec */ 5190 } 5191 5192 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) 5193 { 5194 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5195 struct i915_power_well *well; 5196 5197 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5198 5199 /* 1. Enable PCH Reset Handshake */ 5200 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5201 5202 /* 2-3. */ 5203 intel_combo_phy_init(dev_priv); 5204 5205 /* 5206 * 4. Enable Power Well 1 (PG1). 5207 * The AUX IO power wells will be enabled on demand. 5208 */ 5209 mutex_lock(&power_domains->lock); 5210 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5211 intel_power_well_enable(dev_priv, well); 5212 mutex_unlock(&power_domains->lock); 5213 5214 /* 5. Enable CD clock */ 5215 intel_cdclk_init_hw(dev_priv); 5216 5217 /* 6. Enable DBUF */ 5218 gen9_dbuf_enable(dev_priv); 5219 5220 if (resume && dev_priv->csr.dmc_payload) 5221 intel_csr_load_program(dev_priv); 5222 } 5223 5224 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) 5225 { 5226 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5227 struct i915_power_well *well; 5228 5229 gen9_disable_dc_states(dev_priv); 5230 5231 /* 1. Disable all display engine functions -> aready done */ 5232 5233 /* 2. Disable DBUF */ 5234 gen9_dbuf_disable(dev_priv); 5235 5236 /* 3. Disable CD clock */ 5237 intel_cdclk_uninit_hw(dev_priv); 5238 5239 /* 5240 * 4. Disable Power Well 1 (PG1). 5241 * The AUX IO power wells are toggled on demand, so they are already 5242 * disabled at this point. 5243 */ 5244 mutex_lock(&power_domains->lock); 5245 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5246 intel_power_well_disable(dev_priv, well); 5247 mutex_unlock(&power_domains->lock); 5248 5249 usleep_range(10, 30); /* 10 us delay per Bspec */ 5250 5251 /* 5. */ 5252 intel_combo_phy_uninit(dev_priv); 5253 } 5254 5255 struct buddy_page_mask { 5256 u32 page_mask; 5257 u8 type; 5258 u8 num_channels; 5259 }; 5260 5261 static const struct buddy_page_mask tgl_buddy_page_masks[] = { 5262 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, 5263 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, 5264 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, 5265 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, 5266 {} 5267 }; 5268 5269 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { 5270 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, 5271 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, 5272 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, 5273 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, 5274 {} 5275 }; 5276 5277 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) 5278 { 5279 enum intel_dram_type type = dev_priv->dram_info.type; 5280 u8 num_channels = dev_priv->dram_info.num_channels; 5281 const struct buddy_page_mask *table; 5282 unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask; 5283 int config, i; 5284 5285 if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) || 5286 IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0)) 5287 /* Wa_1409767108:tgl,dg1 */ 5288 table = wa_1409767108_buddy_page_masks; 5289 else 5290 table = tgl_buddy_page_masks; 5291 5292 for (config = 0; table[config].page_mask != 0; config++) 5293 if (table[config].num_channels == num_channels && 5294 table[config].type == type) 5295 break; 5296 5297 if (table[config].page_mask == 0) { 5298 drm_dbg(&dev_priv->drm, 5299 "Unknown memory configuration; disabling address buddy logic.\n"); 5300 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) 5301 intel_de_write(dev_priv, BW_BUDDY_CTL(i), 5302 BW_BUDDY_DISABLE); 5303 } else { 5304 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { 5305 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i), 5306 table[config].page_mask); 5307 5308 /* Wa_22010178259:tgl,rkl */ 5309 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i), 5310 BW_BUDDY_TLB_REQ_TIMER_MASK, 5311 BW_BUDDY_TLB_REQ_TIMER(0x8)); 5312 } 5313 } 5314 } 5315 5316 static void icl_display_core_init(struct drm_i915_private *dev_priv, 5317 bool resume) 5318 { 5319 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5320 struct i915_power_well *well; 5321 u32 val; 5322 5323 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5324 5325 /* Wa_14011294188:ehl,jsl,tgl,rkl */ 5326 if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP && 5327 INTEL_PCH_TYPE(dev_priv) < PCH_DG1) 5328 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0, 5329 PCH_DPMGUNIT_CLOCK_GATE_DISABLE); 5330 5331 /* 1. Enable PCH reset handshake. */ 5332 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5333 5334 /* 2. Initialize all combo phys */ 5335 intel_combo_phy_init(dev_priv); 5336 5337 /* 5338 * 3. Enable Power Well 1 (PG1). 5339 * The AUX IO power wells will be enabled on demand. 5340 */ 5341 mutex_lock(&power_domains->lock); 5342 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5343 intel_power_well_enable(dev_priv, well); 5344 mutex_unlock(&power_domains->lock); 5345 5346 /* 4. Enable CDCLK. */ 5347 intel_cdclk_init_hw(dev_priv); 5348 5349 if (INTEL_GEN(dev_priv) >= 12) 5350 gen12_dbuf_slices_config(dev_priv); 5351 5352 /* 5. Enable DBUF. */ 5353 gen9_dbuf_enable(dev_priv); 5354 5355 /* 6. Setup MBUS. */ 5356 icl_mbus_init(dev_priv); 5357 5358 /* 7. Program arbiter BW_BUDDY registers */ 5359 if (INTEL_GEN(dev_priv) >= 12) 5360 tgl_bw_buddy_init(dev_priv); 5361 5362 if (resume && dev_priv->csr.dmc_payload) 5363 intel_csr_load_program(dev_priv); 5364 5365 /* Wa_14011508470 */ 5366 if (IS_GEN(dev_priv, 12)) { 5367 val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | 5368 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR; 5369 intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val); 5370 } 5371 } 5372 5373 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 5374 { 5375 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5376 struct i915_power_well *well; 5377 5378 gen9_disable_dc_states(dev_priv); 5379 5380 /* 1. Disable all display engine functions -> aready done */ 5381 5382 /* 2. Disable DBUF */ 5383 gen9_dbuf_disable(dev_priv); 5384 5385 /* 3. Disable CD clock */ 5386 intel_cdclk_uninit_hw(dev_priv); 5387 5388 /* 5389 * 4. Disable Power Well 1 (PG1). 5390 * The AUX IO power wells are toggled on demand, so they are already 5391 * disabled at this point. 5392 */ 5393 mutex_lock(&power_domains->lock); 5394 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5395 intel_power_well_disable(dev_priv, well); 5396 mutex_unlock(&power_domains->lock); 5397 5398 /* 5. */ 5399 intel_combo_phy_uninit(dev_priv); 5400 } 5401 5402 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 5403 { 5404 struct i915_power_well *cmn_bc = 5405 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5406 struct i915_power_well *cmn_d = 5407 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 5408 5409 /* 5410 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 5411 * workaround never ever read DISPLAY_PHY_CONTROL, and 5412 * instead maintain a shadow copy ourselves. Use the actual 5413 * power well state and lane status to reconstruct the 5414 * expected initial value. 5415 */ 5416 dev_priv->chv_phy_control = 5417 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 5418 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 5419 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 5420 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 5421 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 5422 5423 /* 5424 * If all lanes are disabled we leave the override disabled 5425 * with all power down bits cleared to match the state we 5426 * would use after disabling the port. Otherwise enable the 5427 * override and set the lane powerdown bits accding to the 5428 * current lane status. 5429 */ 5430 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 5431 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)); 5432 unsigned int mask; 5433 5434 mask = status & DPLL_PORTB_READY_MASK; 5435 if (mask == 0xf) 5436 mask = 0x0; 5437 else 5438 dev_priv->chv_phy_control |= 5439 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 5440 5441 dev_priv->chv_phy_control |= 5442 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 5443 5444 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 5445 if (mask == 0xf) 5446 mask = 0x0; 5447 else 5448 dev_priv->chv_phy_control |= 5449 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 5450 5451 dev_priv->chv_phy_control |= 5452 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 5453 5454 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 5455 5456 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 5457 } else { 5458 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 5459 } 5460 5461 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 5462 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS); 5463 unsigned int mask; 5464 5465 mask = status & DPLL_PORTD_READY_MASK; 5466 5467 if (mask == 0xf) 5468 mask = 0x0; 5469 else 5470 dev_priv->chv_phy_control |= 5471 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 5472 5473 dev_priv->chv_phy_control |= 5474 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 5475 5476 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 5477 5478 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 5479 } else { 5480 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 5481 } 5482 5483 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", 5484 dev_priv->chv_phy_control); 5485 5486 /* Defer application of initial phy_control to enabling the powerwell */ 5487 } 5488 5489 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 5490 { 5491 struct i915_power_well *cmn = 5492 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5493 struct i915_power_well *disp2d = 5494 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 5495 5496 /* If the display might be already active skip this */ 5497 if (cmn->desc->ops->is_enabled(dev_priv, cmn) && 5498 disp2d->desc->ops->is_enabled(dev_priv, disp2d) && 5499 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST) 5500 return; 5501 5502 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); 5503 5504 /* cmnlane needs DPLL registers */ 5505 disp2d->desc->ops->enable(dev_priv, disp2d); 5506 5507 /* 5508 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 5509 * Need to assert and de-assert PHY SB reset by gating the 5510 * common lane power, then un-gating it. 5511 * Simply ungating isn't enough to reset the PHY enough to get 5512 * ports and lanes running. 5513 */ 5514 cmn->desc->ops->disable(dev_priv, cmn); 5515 } 5516 5517 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 5518 { 5519 bool ret; 5520 5521 vlv_punit_get(dev_priv); 5522 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 5523 vlv_punit_put(dev_priv); 5524 5525 return ret; 5526 } 5527 5528 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 5529 { 5530 drm_WARN(&dev_priv->drm, 5531 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 5532 "VED not power gated\n"); 5533 } 5534 5535 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 5536 { 5537 static const struct pci_device_id isp_ids[] = { 5538 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 5539 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 5540 {} 5541 }; 5542 5543 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && 5544 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 5545 "ISP not power gated\n"); 5546 } 5547 5548 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 5549 5550 /** 5551 * intel_power_domains_init_hw - initialize hardware power domain state 5552 * @i915: i915 device instance 5553 * @resume: Called from resume code paths or not 5554 * 5555 * This function initializes the hardware power domain state and enables all 5556 * power wells belonging to the INIT power domain. Power wells in other 5557 * domains (and not in the INIT domain) are referenced or disabled by 5558 * intel_modeset_readout_hw_state(). After that the reference count of each 5559 * power well must match its HW enabled state, see 5560 * intel_power_domains_verify_state(). 5561 * 5562 * It will return with power domains disabled (to be enabled later by 5563 * intel_power_domains_enable()) and must be paired with 5564 * intel_power_domains_driver_remove(). 5565 */ 5566 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 5567 { 5568 struct i915_power_domains *power_domains = &i915->power_domains; 5569 5570 power_domains->initializing = true; 5571 5572 if (INTEL_GEN(i915) >= 11) { 5573 icl_display_core_init(i915, resume); 5574 } else if (IS_CANNONLAKE(i915)) { 5575 cnl_display_core_init(i915, resume); 5576 } else if (IS_GEN9_BC(i915)) { 5577 skl_display_core_init(i915, resume); 5578 } else if (IS_GEN9_LP(i915)) { 5579 bxt_display_core_init(i915, resume); 5580 } else if (IS_CHERRYVIEW(i915)) { 5581 mutex_lock(&power_domains->lock); 5582 chv_phy_control_init(i915); 5583 mutex_unlock(&power_domains->lock); 5584 assert_isp_power_gated(i915); 5585 } else if (IS_VALLEYVIEW(i915)) { 5586 mutex_lock(&power_domains->lock); 5587 vlv_cmnlane_wa(i915); 5588 mutex_unlock(&power_domains->lock); 5589 assert_ved_power_gated(i915); 5590 assert_isp_power_gated(i915); 5591 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 5592 hsw_assert_cdclk(i915); 5593 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 5594 } else if (IS_IVYBRIDGE(i915)) { 5595 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 5596 } 5597 5598 /* 5599 * Keep all power wells enabled for any dependent HW access during 5600 * initialization and to make sure we keep BIOS enabled display HW 5601 * resources powered until display HW readout is complete. We drop 5602 * this reference in intel_power_domains_enable(). 5603 */ 5604 power_domains->wakeref = 5605 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5606 5607 /* Disable power support if the user asked so. */ 5608 if (!i915->params.disable_power_well) 5609 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5610 intel_power_domains_sync_hw(i915); 5611 5612 power_domains->initializing = false; 5613 } 5614 5615 /** 5616 * intel_power_domains_driver_remove - deinitialize hw power domain state 5617 * @i915: i915 device instance 5618 * 5619 * De-initializes the display power domain HW state. It also ensures that the 5620 * device stays powered up so that the driver can be reloaded. 5621 * 5622 * It must be called with power domains already disabled (after a call to 5623 * intel_power_domains_disable()) and must be paired with 5624 * intel_power_domains_init_hw(). 5625 */ 5626 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 5627 { 5628 intel_wakeref_t wakeref __maybe_unused = 5629 fetch_and_zero(&i915->power_domains.wakeref); 5630 5631 /* Remove the refcount we took to keep power well support disabled. */ 5632 if (!i915->params.disable_power_well) 5633 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); 5634 5635 intel_display_power_flush_work_sync(i915); 5636 5637 intel_power_domains_verify_state(i915); 5638 5639 /* Keep the power well enabled, but cancel its rpm wakeref. */ 5640 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 5641 } 5642 5643 /** 5644 * intel_power_domains_enable - enable toggling of display power wells 5645 * @i915: i915 device instance 5646 * 5647 * Enable the ondemand enabling/disabling of the display power wells. Note that 5648 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 5649 * only at specific points of the display modeset sequence, thus they are not 5650 * affected by the intel_power_domains_enable()/disable() calls. The purpose 5651 * of these function is to keep the rest of power wells enabled until the end 5652 * of display HW readout (which will acquire the power references reflecting 5653 * the current HW state). 5654 */ 5655 void intel_power_domains_enable(struct drm_i915_private *i915) 5656 { 5657 intel_wakeref_t wakeref __maybe_unused = 5658 fetch_and_zero(&i915->power_domains.wakeref); 5659 5660 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 5661 intel_power_domains_verify_state(i915); 5662 } 5663 5664 /** 5665 * intel_power_domains_disable - disable toggling of display power wells 5666 * @i915: i915 device instance 5667 * 5668 * Disable the ondemand enabling/disabling of the display power wells. See 5669 * intel_power_domains_enable() for which power wells this call controls. 5670 */ 5671 void intel_power_domains_disable(struct drm_i915_private *i915) 5672 { 5673 struct i915_power_domains *power_domains = &i915->power_domains; 5674 5675 drm_WARN_ON(&i915->drm, power_domains->wakeref); 5676 power_domains->wakeref = 5677 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5678 5679 intel_power_domains_verify_state(i915); 5680 } 5681 5682 /** 5683 * intel_power_domains_suspend - suspend power domain state 5684 * @i915: i915 device instance 5685 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 5686 * 5687 * This function prepares the hardware power domain state before entering 5688 * system suspend. 5689 * 5690 * It must be called with power domains already disabled (after a call to 5691 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 5692 */ 5693 void intel_power_domains_suspend(struct drm_i915_private *i915, 5694 enum i915_drm_suspend_mode suspend_mode) 5695 { 5696 struct i915_power_domains *power_domains = &i915->power_domains; 5697 intel_wakeref_t wakeref __maybe_unused = 5698 fetch_and_zero(&power_domains->wakeref); 5699 5700 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 5701 5702 /* 5703 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 5704 * support don't manually deinit the power domains. This also means the 5705 * CSR/DMC firmware will stay active, it will power down any HW 5706 * resources as required and also enable deeper system power states 5707 * that would be blocked if the firmware was inactive. 5708 */ 5709 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) && 5710 suspend_mode == I915_DRM_SUSPEND_IDLE && 5711 i915->csr.dmc_payload) { 5712 intel_display_power_flush_work(i915); 5713 intel_power_domains_verify_state(i915); 5714 return; 5715 } 5716 5717 /* 5718 * Even if power well support was disabled we still want to disable 5719 * power wells if power domains must be deinitialized for suspend. 5720 */ 5721 if (!i915->params.disable_power_well) 5722 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); 5723 5724 intel_display_power_flush_work(i915); 5725 intel_power_domains_verify_state(i915); 5726 5727 if (INTEL_GEN(i915) >= 11) 5728 icl_display_core_uninit(i915); 5729 else if (IS_CANNONLAKE(i915)) 5730 cnl_display_core_uninit(i915); 5731 else if (IS_GEN9_BC(i915)) 5732 skl_display_core_uninit(i915); 5733 else if (IS_GEN9_LP(i915)) 5734 bxt_display_core_uninit(i915); 5735 5736 power_domains->display_core_suspended = true; 5737 } 5738 5739 /** 5740 * intel_power_domains_resume - resume power domain state 5741 * @i915: i915 device instance 5742 * 5743 * This function resume the hardware power domain state during system resume. 5744 * 5745 * It will return with power domain support disabled (to be enabled later by 5746 * intel_power_domains_enable()) and must be paired with 5747 * intel_power_domains_suspend(). 5748 */ 5749 void intel_power_domains_resume(struct drm_i915_private *i915) 5750 { 5751 struct i915_power_domains *power_domains = &i915->power_domains; 5752 5753 if (power_domains->display_core_suspended) { 5754 intel_power_domains_init_hw(i915, true); 5755 power_domains->display_core_suspended = false; 5756 } else { 5757 drm_WARN_ON(&i915->drm, power_domains->wakeref); 5758 power_domains->wakeref = 5759 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5760 } 5761 5762 intel_power_domains_verify_state(i915); 5763 } 5764 5765 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 5766 5767 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 5768 { 5769 struct i915_power_domains *power_domains = &i915->power_domains; 5770 struct i915_power_well *power_well; 5771 5772 for_each_power_well(i915, power_well) { 5773 enum intel_display_power_domain domain; 5774 5775 drm_dbg(&i915->drm, "%-25s %d\n", 5776 power_well->desc->name, power_well->count); 5777 5778 for_each_power_domain(domain, power_well->desc->domains) 5779 drm_dbg(&i915->drm, " %-23s %d\n", 5780 intel_display_power_domain_str(domain), 5781 power_domains->domain_use_count[domain]); 5782 } 5783 } 5784 5785 /** 5786 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 5787 * @i915: i915 device instance 5788 * 5789 * Verify if the reference count of each power well matches its HW enabled 5790 * state and the total refcount of the domains it belongs to. This must be 5791 * called after modeset HW state sanitization, which is responsible for 5792 * acquiring reference counts for any power wells in use and disabling the 5793 * ones left on by BIOS but not required by any active output. 5794 */ 5795 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 5796 { 5797 struct i915_power_domains *power_domains = &i915->power_domains; 5798 struct i915_power_well *power_well; 5799 bool dump_domain_info; 5800 5801 mutex_lock(&power_domains->lock); 5802 5803 verify_async_put_domains_state(power_domains); 5804 5805 dump_domain_info = false; 5806 for_each_power_well(i915, power_well) { 5807 enum intel_display_power_domain domain; 5808 int domains_count; 5809 bool enabled; 5810 5811 enabled = power_well->desc->ops->is_enabled(i915, power_well); 5812 if ((power_well->count || power_well->desc->always_on) != 5813 enabled) 5814 drm_err(&i915->drm, 5815 "power well %s state mismatch (refcount %d/enabled %d)", 5816 power_well->desc->name, 5817 power_well->count, enabled); 5818 5819 domains_count = 0; 5820 for_each_power_domain(domain, power_well->desc->domains) 5821 domains_count += power_domains->domain_use_count[domain]; 5822 5823 if (power_well->count != domains_count) { 5824 drm_err(&i915->drm, 5825 "power well %s refcount/domain refcount mismatch " 5826 "(refcount %d/domains refcount %d)\n", 5827 power_well->desc->name, power_well->count, 5828 domains_count); 5829 dump_domain_info = true; 5830 } 5831 } 5832 5833 if (dump_domain_info) { 5834 static bool dumped; 5835 5836 if (!dumped) { 5837 intel_power_domains_dump_info(i915); 5838 dumped = true; 5839 } 5840 } 5841 5842 mutex_unlock(&power_domains->lock); 5843 } 5844 5845 #else 5846 5847 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 5848 { 5849 } 5850 5851 #endif 5852 5853 void intel_display_power_suspend_late(struct drm_i915_private *i915) 5854 { 5855 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) { 5856 bxt_enable_dc9(i915); 5857 /* Tweaked Wa_14010685332:icp,jsp,mcc */ 5858 if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC) 5859 intel_de_rmw(i915, SOUTH_CHICKEN1, 5860 SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); 5861 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5862 hsw_enable_pc8(i915); 5863 } 5864 } 5865 5866 void intel_display_power_resume_early(struct drm_i915_private *i915) 5867 { 5868 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) { 5869 gen9_sanitize_dc_state(i915); 5870 bxt_disable_dc9(i915); 5871 /* Tweaked Wa_14010685332:icp,jsp,mcc */ 5872 if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC) 5873 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); 5874 5875 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5876 hsw_disable_pc8(i915); 5877 } 5878 } 5879 5880 void intel_display_power_suspend(struct drm_i915_private *i915) 5881 { 5882 if (INTEL_GEN(i915) >= 11) { 5883 icl_display_core_uninit(i915); 5884 bxt_enable_dc9(i915); 5885 } else if (IS_GEN9_LP(i915)) { 5886 bxt_display_core_uninit(i915); 5887 bxt_enable_dc9(i915); 5888 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5889 hsw_enable_pc8(i915); 5890 } 5891 } 5892 5893 void intel_display_power_resume(struct drm_i915_private *i915) 5894 { 5895 if (INTEL_GEN(i915) >= 11) { 5896 bxt_disable_dc9(i915); 5897 icl_display_core_init(i915, true); 5898 if (i915->csr.dmc_payload) { 5899 if (i915->csr.allowed_dc_mask & 5900 DC_STATE_EN_UPTO_DC6) 5901 skl_enable_dc6(i915); 5902 else if (i915->csr.allowed_dc_mask & 5903 DC_STATE_EN_UPTO_DC5) 5904 gen9_enable_dc5(i915); 5905 } 5906 } else if (IS_GEN9_LP(i915)) { 5907 bxt_disable_dc9(i915); 5908 bxt_display_core_init(i915, true); 5909 if (i915->csr.dmc_payload && 5910 (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 5911 gen9_enable_dc5(i915); 5912 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5913 hsw_disable_pc8(i915); 5914 } 5915 } 5916