1 /* 2 * Copyright © 2012-2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 * 27 */ 28 29 #include <linux/pm_runtime.h> 30 #include <linux/vgaarb.h> 31 32 #include "i915_drv.h" 33 #include "intel_drv.h" 34 35 /** 36 * DOC: runtime pm 37 * 38 * The i915 driver supports dynamic enabling and disabling of entire hardware 39 * blocks at runtime. This is especially important on the display side where 40 * software is supposed to control many power gates manually on recent hardware, 41 * since on the GT side a lot of the power management is done by the hardware. 42 * But even there some manual control at the device level is required. 43 * 44 * Since i915 supports a diverse set of platforms with a unified codebase and 45 * hardware engineers just love to shuffle functionality around between power 46 * domains there's a sizeable amount of indirection required. This file provides 47 * generic functions to the driver for grabbing and releasing references for 48 * abstract power domains. It then maps those to the actual power wells 49 * present for a given platform. 50 */ 51 52 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 53 enum i915_power_well_id power_well_id); 54 55 const char * 56 intel_display_power_domain_str(enum intel_display_power_domain domain) 57 { 58 switch (domain) { 59 case POWER_DOMAIN_PIPE_A: 60 return "PIPE_A"; 61 case POWER_DOMAIN_PIPE_B: 62 return "PIPE_B"; 63 case POWER_DOMAIN_PIPE_C: 64 return "PIPE_C"; 65 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 66 return "PIPE_A_PANEL_FITTER"; 67 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 68 return "PIPE_B_PANEL_FITTER"; 69 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 70 return "PIPE_C_PANEL_FITTER"; 71 case POWER_DOMAIN_TRANSCODER_A: 72 return "TRANSCODER_A"; 73 case POWER_DOMAIN_TRANSCODER_B: 74 return "TRANSCODER_B"; 75 case POWER_DOMAIN_TRANSCODER_C: 76 return "TRANSCODER_C"; 77 case POWER_DOMAIN_TRANSCODER_EDP: 78 return "TRANSCODER_EDP"; 79 case POWER_DOMAIN_TRANSCODER_DSI_A: 80 return "TRANSCODER_DSI_A"; 81 case POWER_DOMAIN_TRANSCODER_DSI_C: 82 return "TRANSCODER_DSI_C"; 83 case POWER_DOMAIN_PORT_DDI_A_LANES: 84 return "PORT_DDI_A_LANES"; 85 case POWER_DOMAIN_PORT_DDI_B_LANES: 86 return "PORT_DDI_B_LANES"; 87 case POWER_DOMAIN_PORT_DDI_C_LANES: 88 return "PORT_DDI_C_LANES"; 89 case POWER_DOMAIN_PORT_DDI_D_LANES: 90 return "PORT_DDI_D_LANES"; 91 case POWER_DOMAIN_PORT_DDI_E_LANES: 92 return "PORT_DDI_E_LANES"; 93 case POWER_DOMAIN_PORT_DDI_F_LANES: 94 return "PORT_DDI_F_LANES"; 95 case POWER_DOMAIN_PORT_DDI_A_IO: 96 return "PORT_DDI_A_IO"; 97 case POWER_DOMAIN_PORT_DDI_B_IO: 98 return "PORT_DDI_B_IO"; 99 case POWER_DOMAIN_PORT_DDI_C_IO: 100 return "PORT_DDI_C_IO"; 101 case POWER_DOMAIN_PORT_DDI_D_IO: 102 return "PORT_DDI_D_IO"; 103 case POWER_DOMAIN_PORT_DDI_E_IO: 104 return "PORT_DDI_E_IO"; 105 case POWER_DOMAIN_PORT_DDI_F_IO: 106 return "PORT_DDI_F_IO"; 107 case POWER_DOMAIN_PORT_DSI: 108 return "PORT_DSI"; 109 case POWER_DOMAIN_PORT_CRT: 110 return "PORT_CRT"; 111 case POWER_DOMAIN_PORT_OTHER: 112 return "PORT_OTHER"; 113 case POWER_DOMAIN_VGA: 114 return "VGA"; 115 case POWER_DOMAIN_AUDIO: 116 return "AUDIO"; 117 case POWER_DOMAIN_PLLS: 118 return "PLLS"; 119 case POWER_DOMAIN_AUX_A: 120 return "AUX_A"; 121 case POWER_DOMAIN_AUX_B: 122 return "AUX_B"; 123 case POWER_DOMAIN_AUX_C: 124 return "AUX_C"; 125 case POWER_DOMAIN_AUX_D: 126 return "AUX_D"; 127 case POWER_DOMAIN_AUX_E: 128 return "AUX_E"; 129 case POWER_DOMAIN_AUX_F: 130 return "AUX_F"; 131 case POWER_DOMAIN_AUX_IO_A: 132 return "AUX_IO_A"; 133 case POWER_DOMAIN_AUX_TBT1: 134 return "AUX_TBT1"; 135 case POWER_DOMAIN_AUX_TBT2: 136 return "AUX_TBT2"; 137 case POWER_DOMAIN_AUX_TBT3: 138 return "AUX_TBT3"; 139 case POWER_DOMAIN_AUX_TBT4: 140 return "AUX_TBT4"; 141 case POWER_DOMAIN_GMBUS: 142 return "GMBUS"; 143 case POWER_DOMAIN_INIT: 144 return "INIT"; 145 case POWER_DOMAIN_MODESET: 146 return "MODESET"; 147 case POWER_DOMAIN_GT_IRQ: 148 return "GT_IRQ"; 149 default: 150 MISSING_CASE(domain); 151 return "?"; 152 } 153 } 154 155 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 156 struct i915_power_well *power_well) 157 { 158 DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name); 159 power_well->desc->ops->enable(dev_priv, power_well); 160 power_well->hw_enabled = true; 161 } 162 163 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 164 struct i915_power_well *power_well) 165 { 166 DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name); 167 power_well->hw_enabled = false; 168 power_well->desc->ops->disable(dev_priv, power_well); 169 } 170 171 static void intel_power_well_get(struct drm_i915_private *dev_priv, 172 struct i915_power_well *power_well) 173 { 174 if (!power_well->count++) 175 intel_power_well_enable(dev_priv, power_well); 176 } 177 178 static void intel_power_well_put(struct drm_i915_private *dev_priv, 179 struct i915_power_well *power_well) 180 { 181 WARN(!power_well->count, "Use count on power well %s is already zero", 182 power_well->desc->name); 183 184 if (!--power_well->count) 185 intel_power_well_disable(dev_priv, power_well); 186 } 187 188 /** 189 * __intel_display_power_is_enabled - unlocked check for a power domain 190 * @dev_priv: i915 device instance 191 * @domain: power domain to check 192 * 193 * This is the unlocked version of intel_display_power_is_enabled() and should 194 * only be used from error capture and recovery code where deadlocks are 195 * possible. 196 * 197 * Returns: 198 * True when the power domain is enabled, false otherwise. 199 */ 200 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 201 enum intel_display_power_domain domain) 202 { 203 struct i915_power_well *power_well; 204 bool is_enabled; 205 206 if (dev_priv->runtime_pm.suspended) 207 return false; 208 209 is_enabled = true; 210 211 for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) { 212 if (power_well->desc->always_on) 213 continue; 214 215 if (!power_well->hw_enabled) { 216 is_enabled = false; 217 break; 218 } 219 } 220 221 return is_enabled; 222 } 223 224 /** 225 * intel_display_power_is_enabled - check for a power domain 226 * @dev_priv: i915 device instance 227 * @domain: power domain to check 228 * 229 * This function can be used to check the hw power domain state. It is mostly 230 * used in hardware state readout functions. Everywhere else code should rely 231 * upon explicit power domain reference counting to ensure that the hardware 232 * block is powered up before accessing it. 233 * 234 * Callers must hold the relevant modesetting locks to ensure that concurrent 235 * threads can't disable the power well while the caller tries to read a few 236 * registers. 237 * 238 * Returns: 239 * True when the power domain is enabled, false otherwise. 240 */ 241 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 242 enum intel_display_power_domain domain) 243 { 244 struct i915_power_domains *power_domains; 245 bool ret; 246 247 power_domains = &dev_priv->power_domains; 248 249 mutex_lock(&power_domains->lock); 250 ret = __intel_display_power_is_enabled(dev_priv, domain); 251 mutex_unlock(&power_domains->lock); 252 253 return ret; 254 } 255 256 /* 257 * Starting with Haswell, we have a "Power Down Well" that can be turned off 258 * when not needed anymore. We have 4 registers that can request the power well 259 * to be enabled, and it will only be disabled if none of the registers is 260 * requesting it to be enabled. 261 */ 262 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, 263 u8 irq_pipe_mask, bool has_vga) 264 { 265 struct pci_dev *pdev = dev_priv->drm.pdev; 266 267 /* 268 * After we re-enable the power well, if we touch VGA register 0x3d5 269 * we'll get unclaimed register interrupts. This stops after we write 270 * anything to the VGA MSR register. The vgacon module uses this 271 * register all the time, so if we unbind our driver and, as a 272 * consequence, bind vgacon, we'll get stuck in an infinite loop at 273 * console_unlock(). So make here we touch the VGA MSR register, making 274 * sure vgacon can keep working normally without triggering interrupts 275 * and error messages. 276 */ 277 if (has_vga) { 278 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 279 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 280 vga_put(pdev, VGA_RSRC_LEGACY_IO); 281 } 282 283 if (irq_pipe_mask) 284 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); 285 } 286 287 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, 288 u8 irq_pipe_mask) 289 { 290 if (irq_pipe_mask) 291 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); 292 } 293 294 295 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, 296 struct i915_power_well *power_well) 297 { 298 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 299 int pw_idx = power_well->desc->hsw.idx; 300 301 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 302 WARN_ON(intel_wait_for_register(dev_priv, 303 regs->driver, 304 HSW_PWR_WELL_CTL_STATE(pw_idx), 305 HSW_PWR_WELL_CTL_STATE(pw_idx), 306 1)); 307 } 308 309 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, 310 const struct i915_power_well_regs *regs, 311 int pw_idx) 312 { 313 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 314 u32 ret; 315 316 ret = I915_READ(regs->bios) & req_mask ? 1 : 0; 317 ret |= I915_READ(regs->driver) & req_mask ? 2 : 0; 318 if (regs->kvmr.reg) 319 ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0; 320 ret |= I915_READ(regs->debug) & req_mask ? 8 : 0; 321 322 return ret; 323 } 324 325 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, 326 struct i915_power_well *power_well) 327 { 328 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 329 int pw_idx = power_well->desc->hsw.idx; 330 bool disabled; 331 u32 reqs; 332 333 /* 334 * Bspec doesn't require waiting for PWs to get disabled, but still do 335 * this for paranoia. The known cases where a PW will be forced on: 336 * - a KVMR request on any power well via the KVMR request register 337 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 338 * DEBUG request registers 339 * Skip the wait in case any of the request bits are set and print a 340 * diagnostic message. 341 */ 342 wait_for((disabled = !(I915_READ(regs->driver) & 343 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 344 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); 345 if (disabled) 346 return; 347 348 DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 349 power_well->desc->name, 350 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 351 } 352 353 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, 354 enum skl_power_gate pg) 355 { 356 /* Timeout 5us for PG#0, for other PGs 1us */ 357 WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS, 358 SKL_FUSE_PG_DIST_STATUS(pg), 359 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 360 } 361 362 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 363 struct i915_power_well *power_well) 364 { 365 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 366 int pw_idx = power_well->desc->hsw.idx; 367 bool wait_fuses = power_well->desc->hsw.has_fuses; 368 enum skl_power_gate uninitialized_var(pg); 369 u32 val; 370 371 if (wait_fuses) { 372 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 373 SKL_PW_CTL_IDX_TO_PG(pw_idx); 374 /* 375 * For PW1 we have to wait both for the PW0/PG0 fuse state 376 * before enabling the power well and PW1/PG1's own fuse 377 * state after the enabling. For all other power wells with 378 * fuses we only have to wait for that PW/PG's fuse state 379 * after the enabling. 380 */ 381 if (pg == SKL_PG1) 382 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 383 } 384 385 val = I915_READ(regs->driver); 386 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 387 hsw_wait_for_power_well_enable(dev_priv, power_well); 388 389 /* Display WA #1178: cnl */ 390 if (IS_CANNONLAKE(dev_priv) && 391 pw_idx >= GLK_PW_CTL_IDX_AUX_B && 392 pw_idx <= CNL_PW_CTL_IDX_AUX_F) { 393 val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx)); 394 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS; 395 I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val); 396 } 397 398 if (wait_fuses) 399 gen9_wait_for_power_well_fuses(dev_priv, pg); 400 401 hsw_power_well_post_enable(dev_priv, 402 power_well->desc->hsw.irq_pipe_mask, 403 power_well->desc->hsw.has_vga); 404 } 405 406 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 407 struct i915_power_well *power_well) 408 { 409 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 410 int pw_idx = power_well->desc->hsw.idx; 411 u32 val; 412 413 hsw_power_well_pre_disable(dev_priv, 414 power_well->desc->hsw.irq_pipe_mask); 415 416 val = I915_READ(regs->driver); 417 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 418 hsw_wait_for_power_well_disable(dev_priv, power_well); 419 } 420 421 #define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) 422 423 static void 424 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 425 struct i915_power_well *power_well) 426 { 427 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 428 int pw_idx = power_well->desc->hsw.idx; 429 enum port port = ICL_AUX_PW_TO_PORT(pw_idx); 430 u32 val; 431 432 val = I915_READ(regs->driver); 433 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 434 435 val = I915_READ(ICL_PORT_CL_DW12(port)); 436 I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX); 437 438 hsw_wait_for_power_well_enable(dev_priv, power_well); 439 440 /* Display WA #1178: icl */ 441 if (IS_ICELAKE(dev_priv) && 442 pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && 443 !intel_bios_is_port_edp(dev_priv, port)) { 444 val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx)); 445 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; 446 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val); 447 } 448 } 449 450 static void 451 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 452 struct i915_power_well *power_well) 453 { 454 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 455 int pw_idx = power_well->desc->hsw.idx; 456 enum port port = ICL_AUX_PW_TO_PORT(pw_idx); 457 u32 val; 458 459 val = I915_READ(ICL_PORT_CL_DW12(port)); 460 I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX); 461 462 val = I915_READ(regs->driver); 463 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 464 465 hsw_wait_for_power_well_disable(dev_priv, power_well); 466 } 467 468 #define ICL_AUX_PW_TO_CH(pw_idx) \ 469 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 470 471 static void 472 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 473 struct i915_power_well *power_well) 474 { 475 enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx); 476 u32 val; 477 478 val = I915_READ(DP_AUX_CH_CTL(aux_ch)); 479 val &= ~DP_AUX_CH_CTL_TBT_IO; 480 if (power_well->desc->hsw.is_tc_tbt) 481 val |= DP_AUX_CH_CTL_TBT_IO; 482 I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); 483 484 hsw_power_well_enable(dev_priv, power_well); 485 } 486 487 /* 488 * We should only use the power well if we explicitly asked the hardware to 489 * enable it, so check if it's enabled and also check if we've requested it to 490 * be enabled. 491 */ 492 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 493 struct i915_power_well *power_well) 494 { 495 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 496 int pw_idx = power_well->desc->hsw.idx; 497 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 498 HSW_PWR_WELL_CTL_STATE(pw_idx); 499 500 return (I915_READ(regs->driver) & mask) == mask; 501 } 502 503 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 504 { 505 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 506 "DC9 already programmed to be enabled.\n"); 507 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 508 "DC5 still not disabled to enable DC9.\n"); 509 WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) & 510 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 511 "Power well 2 on.\n"); 512 WARN_ONCE(intel_irqs_enabled(dev_priv), 513 "Interrupts not disabled yet.\n"); 514 515 /* 516 * TODO: check for the following to verify the conditions to enter DC9 517 * state are satisfied: 518 * 1] Check relevant display engine registers to verify if mode set 519 * disable sequence was followed. 520 * 2] Check if display uninitialize sequence is initialized. 521 */ 522 } 523 524 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 525 { 526 WARN_ONCE(intel_irqs_enabled(dev_priv), 527 "Interrupts not disabled yet.\n"); 528 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 529 "DC5 still not disabled.\n"); 530 531 /* 532 * TODO: check for the following to verify DC9 state was indeed 533 * entered before programming to disable it: 534 * 1] Check relevant display engine registers to verify if mode 535 * set disable sequence was followed. 536 * 2] Check if display uninitialize sequence is initialized. 537 */ 538 } 539 540 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 541 u32 state) 542 { 543 int rewrites = 0; 544 int rereads = 0; 545 u32 v; 546 547 I915_WRITE(DC_STATE_EN, state); 548 549 /* It has been observed that disabling the dc6 state sometimes 550 * doesn't stick and dmc keeps returning old value. Make sure 551 * the write really sticks enough times and also force rewrite until 552 * we are confident that state is exactly what we want. 553 */ 554 do { 555 v = I915_READ(DC_STATE_EN); 556 557 if (v != state) { 558 I915_WRITE(DC_STATE_EN, state); 559 rewrites++; 560 rereads = 0; 561 } else if (rereads++ > 5) { 562 break; 563 } 564 565 } while (rewrites < 100); 566 567 if (v != state) 568 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n", 569 state, v); 570 571 /* Most of the times we need one retry, avoid spam */ 572 if (rewrites > 1) 573 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n", 574 state, rewrites); 575 } 576 577 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 578 { 579 u32 mask; 580 581 mask = DC_STATE_EN_UPTO_DC5; 582 if (INTEL_GEN(dev_priv) >= 11) 583 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 584 else if (IS_GEN9_LP(dev_priv)) 585 mask |= DC_STATE_EN_DC9; 586 else 587 mask |= DC_STATE_EN_UPTO_DC6; 588 589 return mask; 590 } 591 592 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 593 { 594 u32 val; 595 596 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv); 597 598 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n", 599 dev_priv->csr.dc_state, val); 600 dev_priv->csr.dc_state = val; 601 } 602 603 /** 604 * gen9_set_dc_state - set target display C power state 605 * @dev_priv: i915 device instance 606 * @state: target DC power state 607 * - DC_STATE_DISABLE 608 * - DC_STATE_EN_UPTO_DC5 609 * - DC_STATE_EN_UPTO_DC6 610 * - DC_STATE_EN_DC9 611 * 612 * Signal to DMC firmware/HW the target DC power state passed in @state. 613 * DMC/HW can turn off individual display clocks and power rails when entering 614 * a deeper DC power state (higher in number) and turns these back when exiting 615 * that state to a shallower power state (lower in number). The HW will decide 616 * when to actually enter a given state on an on-demand basis, for instance 617 * depending on the active state of display pipes. The state of display 618 * registers backed by affected power rails are saved/restored as needed. 619 * 620 * Based on the above enabling a deeper DC power state is asynchronous wrt. 621 * enabling it. Disabling a deeper power state is synchronous: for instance 622 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 623 * back on and register state is restored. This is guaranteed by the MMIO write 624 * to DC_STATE_EN blocking until the state is restored. 625 */ 626 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) 627 { 628 uint32_t val; 629 uint32_t mask; 630 631 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) 632 state &= dev_priv->csr.allowed_dc_mask; 633 634 val = I915_READ(DC_STATE_EN); 635 mask = gen9_dc_mask(dev_priv); 636 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", 637 val & mask, state); 638 639 /* Check if DMC is ignoring our DC state requests */ 640 if ((val & mask) != dev_priv->csr.dc_state) 641 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n", 642 dev_priv->csr.dc_state, val & mask); 643 644 val &= ~mask; 645 val |= state; 646 647 gen9_write_dc_state(dev_priv, val); 648 649 dev_priv->csr.dc_state = val & mask; 650 } 651 652 void bxt_enable_dc9(struct drm_i915_private *dev_priv) 653 { 654 assert_can_enable_dc9(dev_priv); 655 656 DRM_DEBUG_KMS("Enabling DC9\n"); 657 /* 658 * Power sequencer reset is not needed on 659 * platforms with South Display Engine on PCH, 660 * because PPS registers are always on. 661 */ 662 if (!HAS_PCH_SPLIT(dev_priv)) 663 intel_power_sequencer_reset(dev_priv); 664 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 665 } 666 667 void bxt_disable_dc9(struct drm_i915_private *dev_priv) 668 { 669 assert_can_disable_dc9(dev_priv); 670 671 DRM_DEBUG_KMS("Disabling DC9\n"); 672 673 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 674 675 intel_pps_unlock_regs_wa(dev_priv); 676 } 677 678 static void assert_csr_loaded(struct drm_i915_private *dev_priv) 679 { 680 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), 681 "CSR program storage start is NULL\n"); 682 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); 683 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); 684 } 685 686 static struct i915_power_well * 687 lookup_power_well(struct drm_i915_private *dev_priv, 688 enum i915_power_well_id power_well_id) 689 { 690 struct i915_power_well *power_well; 691 692 for_each_power_well(dev_priv, power_well) 693 if (power_well->desc->id == power_well_id) 694 return power_well; 695 696 /* 697 * It's not feasible to add error checking code to the callers since 698 * this condition really shouldn't happen and it doesn't even make sense 699 * to abort things like display initialization sequences. Just return 700 * the first power well and hope the WARN gets reported so we can fix 701 * our driver. 702 */ 703 WARN(1, "Power well %d not defined for this platform\n", power_well_id); 704 return &dev_priv->power_domains.power_wells[0]; 705 } 706 707 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 708 { 709 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, 710 SKL_DISP_PW_2); 711 712 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); 713 714 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), 715 "DC5 already programmed to be enabled.\n"); 716 assert_rpm_wakelock_held(dev_priv); 717 718 assert_csr_loaded(dev_priv); 719 } 720 721 void gen9_enable_dc5(struct drm_i915_private *dev_priv) 722 { 723 assert_can_enable_dc5(dev_priv); 724 725 DRM_DEBUG_KMS("Enabling DC5\n"); 726 727 /* Wa Display #1183: skl,kbl,cfl */ 728 if (IS_GEN9_BC(dev_priv)) 729 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | 730 SKL_SELECT_ALTERNATE_DC_EXIT); 731 732 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 733 } 734 735 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 736 { 737 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 738 "Backlight is not disabled.\n"); 739 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 740 "DC6 already programmed to be enabled.\n"); 741 742 assert_csr_loaded(dev_priv); 743 } 744 745 void skl_enable_dc6(struct drm_i915_private *dev_priv) 746 { 747 assert_can_enable_dc6(dev_priv); 748 749 DRM_DEBUG_KMS("Enabling DC6\n"); 750 751 /* Wa Display #1183: skl,kbl,cfl */ 752 if (IS_GEN9_BC(dev_priv)) 753 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | 754 SKL_SELECT_ALTERNATE_DC_EXIT); 755 756 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 757 } 758 759 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 760 struct i915_power_well *power_well) 761 { 762 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 763 int pw_idx = power_well->desc->hsw.idx; 764 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 765 u32 bios_req = I915_READ(regs->bios); 766 767 /* Take over the request bit if set by BIOS. */ 768 if (bios_req & mask) { 769 u32 drv_req = I915_READ(regs->driver); 770 771 if (!(drv_req & mask)) 772 I915_WRITE(regs->driver, drv_req | mask); 773 I915_WRITE(regs->bios, bios_req & ~mask); 774 } 775 } 776 777 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 778 struct i915_power_well *power_well) 779 { 780 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); 781 } 782 783 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 784 struct i915_power_well *power_well) 785 { 786 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); 787 } 788 789 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 790 struct i915_power_well *power_well) 791 { 792 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); 793 } 794 795 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 796 { 797 struct i915_power_well *power_well; 798 799 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); 800 if (power_well->count > 0) 801 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 802 803 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 804 if (power_well->count > 0) 805 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 806 807 if (IS_GEMINILAKE(dev_priv)) { 808 power_well = lookup_power_well(dev_priv, 809 GLK_DISP_PW_DPIO_CMN_C); 810 if (power_well->count > 0) 811 bxt_ddi_phy_verify_state(dev_priv, 812 power_well->desc->bxt.phy); 813 } 814 } 815 816 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 817 struct i915_power_well *power_well) 818 { 819 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; 820 } 821 822 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 823 { 824 u32 tmp = I915_READ(DBUF_CTL); 825 826 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) != 827 (DBUF_POWER_STATE | DBUF_POWER_REQUEST), 828 "Unexpected DBuf power power state (0x%08x)\n", tmp); 829 } 830 831 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 832 struct i915_power_well *power_well) 833 { 834 struct intel_cdclk_state cdclk_state = {}; 835 836 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 837 838 dev_priv->display.get_cdclk(dev_priv, &cdclk_state); 839 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 840 WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state)); 841 842 gen9_assert_dbuf_enabled(dev_priv); 843 844 if (IS_GEN9_LP(dev_priv)) 845 bxt_verify_ddi_phy_power_wells(dev_priv); 846 } 847 848 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 849 struct i915_power_well *power_well) 850 { 851 if (!dev_priv->csr.dmc_payload) 852 return; 853 854 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6) 855 skl_enable_dc6(dev_priv); 856 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) 857 gen9_enable_dc5(dev_priv); 858 } 859 860 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, 861 struct i915_power_well *power_well) 862 { 863 } 864 865 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 866 struct i915_power_well *power_well) 867 { 868 } 869 870 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 871 struct i915_power_well *power_well) 872 { 873 return true; 874 } 875 876 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 877 struct i915_power_well *power_well) 878 { 879 if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) 880 i830_enable_pipe(dev_priv, PIPE_A); 881 if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) 882 i830_enable_pipe(dev_priv, PIPE_B); 883 } 884 885 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, 886 struct i915_power_well *power_well) 887 { 888 i830_disable_pipe(dev_priv, PIPE_B); 889 i830_disable_pipe(dev_priv, PIPE_A); 890 } 891 892 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 893 struct i915_power_well *power_well) 894 { 895 return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE && 896 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE; 897 } 898 899 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, 900 struct i915_power_well *power_well) 901 { 902 if (power_well->count > 0) 903 i830_pipes_power_well_enable(dev_priv, power_well); 904 else 905 i830_pipes_power_well_disable(dev_priv, power_well); 906 } 907 908 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 909 struct i915_power_well *power_well, bool enable) 910 { 911 int pw_idx = power_well->desc->vlv.idx; 912 u32 mask; 913 u32 state; 914 u32 ctrl; 915 916 mask = PUNIT_PWRGT_MASK(pw_idx); 917 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 918 PUNIT_PWRGT_PWR_GATE(pw_idx); 919 920 mutex_lock(&dev_priv->pcu_lock); 921 922 #define COND \ 923 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 924 925 if (COND) 926 goto out; 927 928 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 929 ctrl &= ~mask; 930 ctrl |= state; 931 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 932 933 if (wait_for(COND, 100)) 934 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 935 state, 936 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 937 938 #undef COND 939 940 out: 941 mutex_unlock(&dev_priv->pcu_lock); 942 } 943 944 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 945 struct i915_power_well *power_well) 946 { 947 vlv_set_power_well(dev_priv, power_well, true); 948 } 949 950 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 951 struct i915_power_well *power_well) 952 { 953 vlv_set_power_well(dev_priv, power_well, false); 954 } 955 956 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 957 struct i915_power_well *power_well) 958 { 959 int pw_idx = power_well->desc->vlv.idx; 960 bool enabled = false; 961 u32 mask; 962 u32 state; 963 u32 ctrl; 964 965 mask = PUNIT_PWRGT_MASK(pw_idx); 966 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 967 968 mutex_lock(&dev_priv->pcu_lock); 969 970 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 971 /* 972 * We only ever set the power-on and power-gate states, anything 973 * else is unexpected. 974 */ 975 WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) && 976 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 977 if (state == ctrl) 978 enabled = true; 979 980 /* 981 * A transient state at this point would mean some unexpected party 982 * is poking at the power controls too. 983 */ 984 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 985 WARN_ON(ctrl != state); 986 987 mutex_unlock(&dev_priv->pcu_lock); 988 989 return enabled; 990 } 991 992 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 993 { 994 u32 val; 995 996 /* 997 * On driver load, a pipe may be active and driving a DSI display. 998 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 999 * (and never recovering) in this case. intel_dsi_post_disable() will 1000 * clear it when we turn off the display. 1001 */ 1002 val = I915_READ(DSPCLK_GATE_D); 1003 val &= DPOUNIT_CLOCK_GATE_DISABLE; 1004 val |= VRHUNIT_CLOCK_GATE_DISABLE; 1005 I915_WRITE(DSPCLK_GATE_D, val); 1006 1007 /* 1008 * Disable trickle feed and enable pnd deadline calculation 1009 */ 1010 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1011 I915_WRITE(CBR1_VLV, 0); 1012 1013 WARN_ON(dev_priv->rawclk_freq == 0); 1014 1015 I915_WRITE(RAWCLK_FREQ_VLV, 1016 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000)); 1017 } 1018 1019 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1020 { 1021 struct intel_encoder *encoder; 1022 enum pipe pipe; 1023 1024 /* 1025 * Enable the CRI clock source so we can get at the 1026 * display and the reference clock for VGA 1027 * hotplug / manual detection. Supposedly DSI also 1028 * needs the ref clock up and running. 1029 * 1030 * CHV DPLL B/C have some issues if VGA mode is enabled. 1031 */ 1032 for_each_pipe(dev_priv, pipe) { 1033 u32 val = I915_READ(DPLL(pipe)); 1034 1035 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1036 if (pipe != PIPE_A) 1037 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1038 1039 I915_WRITE(DPLL(pipe), val); 1040 } 1041 1042 vlv_init_display_clock_gating(dev_priv); 1043 1044 spin_lock_irq(&dev_priv->irq_lock); 1045 valleyview_enable_display_irqs(dev_priv); 1046 spin_unlock_irq(&dev_priv->irq_lock); 1047 1048 /* 1049 * During driver initialization/resume we can avoid restoring the 1050 * part of the HW/SW state that will be inited anyway explicitly. 1051 */ 1052 if (dev_priv->power_domains.initializing) 1053 return; 1054 1055 intel_hpd_init(dev_priv); 1056 1057 /* Re-enable the ADPA, if we have one */ 1058 for_each_intel_encoder(&dev_priv->drm, encoder) { 1059 if (encoder->type == INTEL_OUTPUT_ANALOG) 1060 intel_crt_reset(&encoder->base); 1061 } 1062 1063 i915_redisable_vga_power_on(dev_priv); 1064 1065 intel_pps_unlock_regs_wa(dev_priv); 1066 } 1067 1068 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1069 { 1070 spin_lock_irq(&dev_priv->irq_lock); 1071 valleyview_disable_display_irqs(dev_priv); 1072 spin_unlock_irq(&dev_priv->irq_lock); 1073 1074 /* make sure we're done processing display irqs */ 1075 synchronize_irq(dev_priv->drm.irq); 1076 1077 intel_power_sequencer_reset(dev_priv); 1078 1079 /* Prevent us from re-enabling polling on accident in late suspend */ 1080 if (!dev_priv->drm.dev->power.is_suspended) 1081 intel_hpd_poll_init(dev_priv); 1082 } 1083 1084 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1085 struct i915_power_well *power_well) 1086 { 1087 vlv_set_power_well(dev_priv, power_well, true); 1088 1089 vlv_display_power_well_init(dev_priv); 1090 } 1091 1092 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1093 struct i915_power_well *power_well) 1094 { 1095 vlv_display_power_well_deinit(dev_priv); 1096 1097 vlv_set_power_well(dev_priv, power_well, false); 1098 } 1099 1100 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1101 struct i915_power_well *power_well) 1102 { 1103 /* since ref/cri clock was enabled */ 1104 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1105 1106 vlv_set_power_well(dev_priv, power_well, true); 1107 1108 /* 1109 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1110 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1111 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1112 * b. The other bits such as sfr settings / modesel may all 1113 * be set to 0. 1114 * 1115 * This should only be done on init and resume from S3 with 1116 * both PLLs disabled, or we risk losing DPIO and PLL 1117 * synchronization. 1118 */ 1119 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); 1120 } 1121 1122 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1123 struct i915_power_well *power_well) 1124 { 1125 enum pipe pipe; 1126 1127 for_each_pipe(dev_priv, pipe) 1128 assert_pll_disabled(dev_priv, pipe); 1129 1130 /* Assert common reset */ 1131 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); 1132 1133 vlv_set_power_well(dev_priv, power_well, false); 1134 } 1135 1136 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 1137 1138 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1139 1140 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1141 { 1142 struct i915_power_well *cmn_bc = 1143 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1144 struct i915_power_well *cmn_d = 1145 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1146 u32 phy_control = dev_priv->chv_phy_control; 1147 u32 phy_status = 0; 1148 u32 phy_status_mask = 0xffffffff; 1149 1150 /* 1151 * The BIOS can leave the PHY is some weird state 1152 * where it doesn't fully power down some parts. 1153 * Disable the asserts until the PHY has been fully 1154 * reset (ie. the power well has been disabled at 1155 * least once). 1156 */ 1157 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1158 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1159 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1160 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1161 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1162 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1163 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1164 1165 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1166 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1167 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1168 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1169 1170 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 1171 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1172 1173 /* this assumes override is only used to enable lanes */ 1174 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1175 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1176 1177 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1178 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1179 1180 /* CL1 is on whenever anything is on in either channel */ 1181 if (BITS_SET(phy_control, 1182 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1183 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1184 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1185 1186 /* 1187 * The DPLLB check accounts for the pipe B + port A usage 1188 * with CL2 powered up but all the lanes in the second channel 1189 * powered down. 1190 */ 1191 if (BITS_SET(phy_control, 1192 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1193 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1194 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1195 1196 if (BITS_SET(phy_control, 1197 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1198 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1199 if (BITS_SET(phy_control, 1200 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1201 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1202 1203 if (BITS_SET(phy_control, 1204 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1205 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1206 if (BITS_SET(phy_control, 1207 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1208 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1209 } 1210 1211 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 1212 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1213 1214 /* this assumes override is only used to enable lanes */ 1215 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1216 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1217 1218 if (BITS_SET(phy_control, 1219 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1220 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1221 1222 if (BITS_SET(phy_control, 1223 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1224 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1225 if (BITS_SET(phy_control, 1226 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1227 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1228 } 1229 1230 phy_status &= phy_status_mask; 1231 1232 /* 1233 * The PHY may be busy with some initial calibration and whatnot, 1234 * so the power state can take a while to actually change. 1235 */ 1236 if (intel_wait_for_register(dev_priv, 1237 DISPLAY_PHY_STATUS, 1238 phy_status_mask, 1239 phy_status, 1240 10)) 1241 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1242 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask, 1243 phy_status, dev_priv->chv_phy_control); 1244 } 1245 1246 #undef BITS_SET 1247 1248 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1249 struct i915_power_well *power_well) 1250 { 1251 enum dpio_phy phy; 1252 enum pipe pipe; 1253 uint32_t tmp; 1254 1255 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1256 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1257 1258 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1259 pipe = PIPE_A; 1260 phy = DPIO_PHY0; 1261 } else { 1262 pipe = PIPE_C; 1263 phy = DPIO_PHY1; 1264 } 1265 1266 /* since ref/cri clock was enabled */ 1267 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1268 vlv_set_power_well(dev_priv, power_well, true); 1269 1270 /* Poll for phypwrgood signal */ 1271 if (intel_wait_for_register(dev_priv, 1272 DISPLAY_PHY_STATUS, 1273 PHY_POWERGOOD(phy), 1274 PHY_POWERGOOD(phy), 1275 1)) 1276 DRM_ERROR("Display PHY %d is not power up\n", phy); 1277 1278 mutex_lock(&dev_priv->sb_lock); 1279 1280 /* Enable dynamic power down */ 1281 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1282 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1283 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1284 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1285 1286 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1287 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1288 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1289 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1290 } else { 1291 /* 1292 * Force the non-existing CL2 off. BXT does this 1293 * too, so maybe it saves some power even though 1294 * CL2 doesn't exist? 1295 */ 1296 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1297 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1298 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1299 } 1300 1301 mutex_unlock(&dev_priv->sb_lock); 1302 1303 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1304 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1305 1306 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1307 phy, dev_priv->chv_phy_control); 1308 1309 assert_chv_phy_status(dev_priv); 1310 } 1311 1312 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1313 struct i915_power_well *power_well) 1314 { 1315 enum dpio_phy phy; 1316 1317 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1318 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1319 1320 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1321 phy = DPIO_PHY0; 1322 assert_pll_disabled(dev_priv, PIPE_A); 1323 assert_pll_disabled(dev_priv, PIPE_B); 1324 } else { 1325 phy = DPIO_PHY1; 1326 assert_pll_disabled(dev_priv, PIPE_C); 1327 } 1328 1329 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1330 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1331 1332 vlv_set_power_well(dev_priv, power_well, false); 1333 1334 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1335 phy, dev_priv->chv_phy_control); 1336 1337 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1338 dev_priv->chv_phy_assert[phy] = true; 1339 1340 assert_chv_phy_status(dev_priv); 1341 } 1342 1343 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1344 enum dpio_channel ch, bool override, unsigned int mask) 1345 { 1346 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1347 u32 reg, val, expected, actual; 1348 1349 /* 1350 * The BIOS can leave the PHY is some weird state 1351 * where it doesn't fully power down some parts. 1352 * Disable the asserts until the PHY has been fully 1353 * reset (ie. the power well has been disabled at 1354 * least once). 1355 */ 1356 if (!dev_priv->chv_phy_assert[phy]) 1357 return; 1358 1359 if (ch == DPIO_CH0) 1360 reg = _CHV_CMN_DW0_CH0; 1361 else 1362 reg = _CHV_CMN_DW6_CH1; 1363 1364 mutex_lock(&dev_priv->sb_lock); 1365 val = vlv_dpio_read(dev_priv, pipe, reg); 1366 mutex_unlock(&dev_priv->sb_lock); 1367 1368 /* 1369 * This assumes !override is only used when the port is disabled. 1370 * All lanes should power down even without the override when 1371 * the port is disabled. 1372 */ 1373 if (!override || mask == 0xf) { 1374 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1375 /* 1376 * If CH1 common lane is not active anymore 1377 * (eg. for pipe B DPLL) the entire channel will 1378 * shut down, which causes the common lane registers 1379 * to read as 0. That means we can't actually check 1380 * the lane power down status bits, but as the entire 1381 * register reads as 0 it's a good indication that the 1382 * channel is indeed entirely powered down. 1383 */ 1384 if (ch == DPIO_CH1 && val == 0) 1385 expected = 0; 1386 } else if (mask != 0x0) { 1387 expected = DPIO_ANYDL_POWERDOWN; 1388 } else { 1389 expected = 0; 1390 } 1391 1392 if (ch == DPIO_CH0) 1393 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1394 else 1395 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1396 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1397 1398 WARN(actual != expected, 1399 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1400 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), 1401 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), 1402 reg, val); 1403 } 1404 1405 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1406 enum dpio_channel ch, bool override) 1407 { 1408 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1409 bool was_override; 1410 1411 mutex_lock(&power_domains->lock); 1412 1413 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1414 1415 if (override == was_override) 1416 goto out; 1417 1418 if (override) 1419 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1420 else 1421 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1422 1423 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1424 1425 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1426 phy, ch, dev_priv->chv_phy_control); 1427 1428 assert_chv_phy_status(dev_priv); 1429 1430 out: 1431 mutex_unlock(&power_domains->lock); 1432 1433 return was_override; 1434 } 1435 1436 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1437 bool override, unsigned int mask) 1438 { 1439 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1440 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1441 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); 1442 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); 1443 1444 mutex_lock(&power_domains->lock); 1445 1446 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1447 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1448 1449 if (override) 1450 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1451 else 1452 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1453 1454 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1455 1456 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1457 phy, ch, mask, dev_priv->chv_phy_control); 1458 1459 assert_chv_phy_status(dev_priv); 1460 1461 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1462 1463 mutex_unlock(&power_domains->lock); 1464 } 1465 1466 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1467 struct i915_power_well *power_well) 1468 { 1469 enum pipe pipe = PIPE_A; 1470 bool enabled; 1471 u32 state, ctrl; 1472 1473 mutex_lock(&dev_priv->pcu_lock); 1474 1475 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); 1476 /* 1477 * We only ever set the power-on and power-gate states, anything 1478 * else is unexpected. 1479 */ 1480 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); 1481 enabled = state == DP_SSS_PWR_ON(pipe); 1482 1483 /* 1484 * A transient state at this point would mean some unexpected party 1485 * is poking at the power controls too. 1486 */ 1487 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); 1488 WARN_ON(ctrl << 16 != state); 1489 1490 mutex_unlock(&dev_priv->pcu_lock); 1491 1492 return enabled; 1493 } 1494 1495 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1496 struct i915_power_well *power_well, 1497 bool enable) 1498 { 1499 enum pipe pipe = PIPE_A; 1500 u32 state; 1501 u32 ctrl; 1502 1503 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1504 1505 mutex_lock(&dev_priv->pcu_lock); 1506 1507 #define COND \ 1508 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) 1509 1510 if (COND) 1511 goto out; 1512 1513 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 1514 ctrl &= ~DP_SSC_MASK(pipe); 1515 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1516 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); 1517 1518 if (wait_for(COND, 100)) 1519 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1520 state, 1521 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); 1522 1523 #undef COND 1524 1525 out: 1526 mutex_unlock(&dev_priv->pcu_lock); 1527 } 1528 1529 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1530 struct i915_power_well *power_well) 1531 { 1532 chv_set_pipe_power_well(dev_priv, power_well, true); 1533 1534 vlv_display_power_well_init(dev_priv); 1535 } 1536 1537 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1538 struct i915_power_well *power_well) 1539 { 1540 vlv_display_power_well_deinit(dev_priv); 1541 1542 chv_set_pipe_power_well(dev_priv, power_well, false); 1543 } 1544 1545 static void 1546 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 1547 enum intel_display_power_domain domain) 1548 { 1549 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1550 struct i915_power_well *power_well; 1551 1552 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) 1553 intel_power_well_get(dev_priv, power_well); 1554 1555 power_domains->domain_use_count[domain]++; 1556 } 1557 1558 /** 1559 * intel_display_power_get - grab a power domain reference 1560 * @dev_priv: i915 device instance 1561 * @domain: power domain to reference 1562 * 1563 * This function grabs a power domain reference for @domain and ensures that the 1564 * power domain and all its parents are powered up. Therefore users should only 1565 * grab a reference to the innermost power domain they need. 1566 * 1567 * Any power domain reference obtained by this function must have a symmetric 1568 * call to intel_display_power_put() to release the reference again. 1569 */ 1570 void intel_display_power_get(struct drm_i915_private *dev_priv, 1571 enum intel_display_power_domain domain) 1572 { 1573 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1574 1575 intel_runtime_pm_get(dev_priv); 1576 1577 mutex_lock(&power_domains->lock); 1578 1579 __intel_display_power_get_domain(dev_priv, domain); 1580 1581 mutex_unlock(&power_domains->lock); 1582 } 1583 1584 /** 1585 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 1586 * @dev_priv: i915 device instance 1587 * @domain: power domain to reference 1588 * 1589 * This function grabs a power domain reference for @domain and ensures that the 1590 * power domain and all its parents are powered up. Therefore users should only 1591 * grab a reference to the innermost power domain they need. 1592 * 1593 * Any power domain reference obtained by this function must have a symmetric 1594 * call to intel_display_power_put() to release the reference again. 1595 */ 1596 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 1597 enum intel_display_power_domain domain) 1598 { 1599 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1600 bool is_enabled; 1601 1602 if (!intel_runtime_pm_get_if_in_use(dev_priv)) 1603 return false; 1604 1605 mutex_lock(&power_domains->lock); 1606 1607 if (__intel_display_power_is_enabled(dev_priv, domain)) { 1608 __intel_display_power_get_domain(dev_priv, domain); 1609 is_enabled = true; 1610 } else { 1611 is_enabled = false; 1612 } 1613 1614 mutex_unlock(&power_domains->lock); 1615 1616 if (!is_enabled) 1617 intel_runtime_pm_put(dev_priv); 1618 1619 return is_enabled; 1620 } 1621 1622 /** 1623 * intel_display_power_put - release a power domain reference 1624 * @dev_priv: i915 device instance 1625 * @domain: power domain to reference 1626 * 1627 * This function drops the power domain reference obtained by 1628 * intel_display_power_get() and might power down the corresponding hardware 1629 * block right away if this is the last reference. 1630 */ 1631 void intel_display_power_put(struct drm_i915_private *dev_priv, 1632 enum intel_display_power_domain domain) 1633 { 1634 struct i915_power_domains *power_domains; 1635 struct i915_power_well *power_well; 1636 1637 power_domains = &dev_priv->power_domains; 1638 1639 mutex_lock(&power_domains->lock); 1640 1641 WARN(!power_domains->domain_use_count[domain], 1642 "Use count on domain %s is already zero\n", 1643 intel_display_power_domain_str(domain)); 1644 power_domains->domain_use_count[domain]--; 1645 1646 for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) 1647 intel_power_well_put(dev_priv, power_well); 1648 1649 mutex_unlock(&power_domains->lock); 1650 1651 intel_runtime_pm_put(dev_priv); 1652 } 1653 1654 #define I830_PIPES_POWER_DOMAINS ( \ 1655 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 1656 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 1657 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1658 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1659 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 1660 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 1661 BIT_ULL(POWER_DOMAIN_INIT)) 1662 1663 #define VLV_DISPLAY_POWER_DOMAINS ( \ 1664 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 1665 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 1666 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1667 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1668 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 1669 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 1670 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1671 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1672 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 1673 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 1674 BIT_ULL(POWER_DOMAIN_VGA) | \ 1675 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 1676 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 1677 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 1678 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 1679 BIT_ULL(POWER_DOMAIN_INIT)) 1680 1681 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1682 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1683 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1684 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 1685 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 1686 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 1687 BIT_ULL(POWER_DOMAIN_INIT)) 1688 1689 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 1690 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1691 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 1692 BIT_ULL(POWER_DOMAIN_INIT)) 1693 1694 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 1695 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1696 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 1697 BIT_ULL(POWER_DOMAIN_INIT)) 1698 1699 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 1700 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1701 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 1702 BIT_ULL(POWER_DOMAIN_INIT)) 1703 1704 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 1705 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1706 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 1707 BIT_ULL(POWER_DOMAIN_INIT)) 1708 1709 #define CHV_DISPLAY_POWER_DOMAINS ( \ 1710 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 1711 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 1712 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 1713 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1714 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1715 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1716 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 1717 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 1718 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 1719 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1720 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1721 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1722 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 1723 BIT_ULL(POWER_DOMAIN_VGA) | \ 1724 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 1725 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 1726 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 1727 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 1728 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 1729 BIT_ULL(POWER_DOMAIN_INIT)) 1730 1731 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1732 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1733 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1734 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 1735 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 1736 BIT_ULL(POWER_DOMAIN_INIT)) 1737 1738 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 1739 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1740 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 1741 BIT_ULL(POWER_DOMAIN_INIT)) 1742 1743 #define HSW_DISPLAY_POWER_DOMAINS ( \ 1744 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 1745 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 1746 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1747 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1748 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1749 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 1750 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 1751 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 1752 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1753 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1754 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1755 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 1756 BIT_ULL(POWER_DOMAIN_VGA) | \ 1757 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 1758 BIT_ULL(POWER_DOMAIN_INIT)) 1759 1760 #define BDW_DISPLAY_POWER_DOMAINS ( \ 1761 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 1762 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 1763 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1764 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1765 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 1766 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 1767 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 1768 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1769 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1770 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1771 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 1772 BIT_ULL(POWER_DOMAIN_VGA) | \ 1773 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 1774 BIT_ULL(POWER_DOMAIN_INIT)) 1775 1776 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 1777 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 1778 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 1779 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 1780 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 1781 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 1782 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1783 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1784 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1785 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1786 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1787 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 1788 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 1789 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 1790 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 1791 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 1792 BIT_ULL(POWER_DOMAIN_VGA) | \ 1793 BIT_ULL(POWER_DOMAIN_INIT)) 1794 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ 1795 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 1796 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ 1797 BIT_ULL(POWER_DOMAIN_INIT)) 1798 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 1799 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 1800 BIT_ULL(POWER_DOMAIN_INIT)) 1801 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 1802 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 1803 BIT_ULL(POWER_DOMAIN_INIT)) 1804 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ 1805 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 1806 BIT_ULL(POWER_DOMAIN_INIT)) 1807 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 1808 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 1809 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 1810 BIT_ULL(POWER_DOMAIN_MODESET) | \ 1811 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 1812 BIT_ULL(POWER_DOMAIN_INIT)) 1813 1814 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 1815 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 1816 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 1817 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 1818 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 1819 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 1820 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1821 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1822 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1823 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1824 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 1825 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 1826 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 1827 BIT_ULL(POWER_DOMAIN_VGA) | \ 1828 BIT_ULL(POWER_DOMAIN_INIT)) 1829 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 1830 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 1831 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 1832 BIT_ULL(POWER_DOMAIN_MODESET) | \ 1833 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 1834 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 1835 BIT_ULL(POWER_DOMAIN_INIT)) 1836 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 1837 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 1838 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 1839 BIT_ULL(POWER_DOMAIN_INIT)) 1840 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 1841 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1842 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1843 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 1844 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 1845 BIT_ULL(POWER_DOMAIN_INIT)) 1846 1847 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 1848 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 1849 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 1850 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 1851 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 1852 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 1853 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1854 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1855 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1856 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1857 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 1858 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 1859 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 1860 BIT_ULL(POWER_DOMAIN_VGA) | \ 1861 BIT_ULL(POWER_DOMAIN_INIT)) 1862 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ 1863 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 1864 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 1865 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 1866 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 1867 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 1868 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ 1869 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 1870 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 1871 BIT_ULL(POWER_DOMAIN_INIT)) 1872 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ 1873 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1874 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 1875 BIT_ULL(POWER_DOMAIN_INIT)) 1876 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ 1877 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1878 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 1879 BIT_ULL(POWER_DOMAIN_INIT)) 1880 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ 1881 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 1882 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 1883 BIT_ULL(POWER_DOMAIN_INIT)) 1884 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ 1885 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 1886 BIT_ULL(POWER_DOMAIN_INIT)) 1887 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ 1888 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 1889 BIT_ULL(POWER_DOMAIN_INIT)) 1890 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 1891 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 1892 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 1893 BIT_ULL(POWER_DOMAIN_MODESET) | \ 1894 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 1895 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 1896 BIT_ULL(POWER_DOMAIN_INIT)) 1897 1898 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 1899 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 1900 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 1901 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 1902 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 1903 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 1904 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1905 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1906 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1907 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1908 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1909 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 1910 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 1911 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 1912 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 1913 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 1914 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 1915 BIT_ULL(POWER_DOMAIN_VGA) | \ 1916 BIT_ULL(POWER_DOMAIN_INIT)) 1917 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \ 1918 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 1919 BIT_ULL(POWER_DOMAIN_INIT)) 1920 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \ 1921 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 1922 BIT_ULL(POWER_DOMAIN_INIT)) 1923 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \ 1924 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 1925 BIT_ULL(POWER_DOMAIN_INIT)) 1926 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \ 1927 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 1928 BIT_ULL(POWER_DOMAIN_INIT)) 1929 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \ 1930 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 1931 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 1932 BIT_ULL(POWER_DOMAIN_INIT)) 1933 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \ 1934 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 1935 BIT_ULL(POWER_DOMAIN_INIT)) 1936 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \ 1937 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 1938 BIT_ULL(POWER_DOMAIN_INIT)) 1939 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \ 1940 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 1941 BIT_ULL(POWER_DOMAIN_INIT)) 1942 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \ 1943 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 1944 BIT_ULL(POWER_DOMAIN_INIT)) 1945 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \ 1946 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ 1947 BIT_ULL(POWER_DOMAIN_INIT)) 1948 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 1949 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 1950 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 1951 BIT_ULL(POWER_DOMAIN_MODESET) | \ 1952 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 1953 BIT_ULL(POWER_DOMAIN_INIT)) 1954 1955 /* 1956 * ICL PW_0/PG_0 domains (HW/DMC control): 1957 * - PCI 1958 * - clocks except port PLL 1959 * - central power except FBC 1960 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers 1961 * ICL PW_1/PG_1 domains (HW/DMC control): 1962 * - DBUF function 1963 * - PIPE_A and its planes, except VGA 1964 * - transcoder EDP + PSR 1965 * - transcoder DSI 1966 * - DDI_A 1967 * - FBC 1968 */ 1969 #define ICL_PW_4_POWER_DOMAINS ( \ 1970 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 1971 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1972 BIT_ULL(POWER_DOMAIN_INIT)) 1973 /* VDSC/joining */ 1974 #define ICL_PW_3_POWER_DOMAINS ( \ 1975 ICL_PW_4_POWER_DOMAINS | \ 1976 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 1977 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 1978 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 1979 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 1980 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1981 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1982 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 1983 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1984 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 1985 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1986 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 1987 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 1988 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ 1989 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 1990 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ 1991 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 1992 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 1993 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 1994 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 1995 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 1996 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 1997 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 1998 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 1999 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 2000 BIT_ULL(POWER_DOMAIN_VGA) | \ 2001 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2002 BIT_ULL(POWER_DOMAIN_INIT)) 2003 /* 2004 * - transcoder WD 2005 * - KVMR (HW control) 2006 */ 2007 #define ICL_PW_2_POWER_DOMAINS ( \ 2008 ICL_PW_3_POWER_DOMAINS | \ 2009 BIT_ULL(POWER_DOMAIN_INIT)) 2010 /* 2011 * - eDP/DSI VDSC 2012 * - KVMR (HW control) 2013 */ 2014 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2015 ICL_PW_2_POWER_DOMAINS | \ 2016 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2017 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2018 BIT_ULL(POWER_DOMAIN_INIT)) 2019 2020 #define ICL_DDI_IO_A_POWER_DOMAINS ( \ 2021 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2022 #define ICL_DDI_IO_B_POWER_DOMAINS ( \ 2023 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2024 #define ICL_DDI_IO_C_POWER_DOMAINS ( \ 2025 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2026 #define ICL_DDI_IO_D_POWER_DOMAINS ( \ 2027 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2028 #define ICL_DDI_IO_E_POWER_DOMAINS ( \ 2029 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2030 #define ICL_DDI_IO_F_POWER_DOMAINS ( \ 2031 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2032 2033 #define ICL_AUX_A_IO_POWER_DOMAINS ( \ 2034 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2035 BIT_ULL(POWER_DOMAIN_AUX_A)) 2036 #define ICL_AUX_B_IO_POWER_DOMAINS ( \ 2037 BIT_ULL(POWER_DOMAIN_AUX_B)) 2038 #define ICL_AUX_C_IO_POWER_DOMAINS ( \ 2039 BIT_ULL(POWER_DOMAIN_AUX_C)) 2040 #define ICL_AUX_D_IO_POWER_DOMAINS ( \ 2041 BIT_ULL(POWER_DOMAIN_AUX_D)) 2042 #define ICL_AUX_E_IO_POWER_DOMAINS ( \ 2043 BIT_ULL(POWER_DOMAIN_AUX_E)) 2044 #define ICL_AUX_F_IO_POWER_DOMAINS ( \ 2045 BIT_ULL(POWER_DOMAIN_AUX_F)) 2046 #define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \ 2047 BIT_ULL(POWER_DOMAIN_AUX_TBT1)) 2048 #define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \ 2049 BIT_ULL(POWER_DOMAIN_AUX_TBT2)) 2050 #define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \ 2051 BIT_ULL(POWER_DOMAIN_AUX_TBT3)) 2052 #define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \ 2053 BIT_ULL(POWER_DOMAIN_AUX_TBT4)) 2054 2055 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 2056 .sync_hw = i9xx_power_well_sync_hw_noop, 2057 .enable = i9xx_always_on_power_well_noop, 2058 .disable = i9xx_always_on_power_well_noop, 2059 .is_enabled = i9xx_always_on_power_well_enabled, 2060 }; 2061 2062 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 2063 .sync_hw = i9xx_power_well_sync_hw_noop, 2064 .enable = chv_pipe_power_well_enable, 2065 .disable = chv_pipe_power_well_disable, 2066 .is_enabled = chv_pipe_power_well_enabled, 2067 }; 2068 2069 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 2070 .sync_hw = i9xx_power_well_sync_hw_noop, 2071 .enable = chv_dpio_cmn_power_well_enable, 2072 .disable = chv_dpio_cmn_power_well_disable, 2073 .is_enabled = vlv_power_well_enabled, 2074 }; 2075 2076 static const struct i915_power_well_desc i9xx_always_on_power_well[] = { 2077 { 2078 .name = "always-on", 2079 .always_on = 1, 2080 .domains = POWER_DOMAIN_MASK, 2081 .ops = &i9xx_always_on_power_well_ops, 2082 .id = DISP_PW_ID_NONE, 2083 }, 2084 }; 2085 2086 static const struct i915_power_well_ops i830_pipes_power_well_ops = { 2087 .sync_hw = i830_pipes_power_well_sync_hw, 2088 .enable = i830_pipes_power_well_enable, 2089 .disable = i830_pipes_power_well_disable, 2090 .is_enabled = i830_pipes_power_well_enabled, 2091 }; 2092 2093 static const struct i915_power_well_desc i830_power_wells[] = { 2094 { 2095 .name = "always-on", 2096 .always_on = 1, 2097 .domains = POWER_DOMAIN_MASK, 2098 .ops = &i9xx_always_on_power_well_ops, 2099 .id = DISP_PW_ID_NONE, 2100 }, 2101 { 2102 .name = "pipes", 2103 .domains = I830_PIPES_POWER_DOMAINS, 2104 .ops = &i830_pipes_power_well_ops, 2105 .id = DISP_PW_ID_NONE, 2106 }, 2107 }; 2108 2109 static const struct i915_power_well_ops hsw_power_well_ops = { 2110 .sync_hw = hsw_power_well_sync_hw, 2111 .enable = hsw_power_well_enable, 2112 .disable = hsw_power_well_disable, 2113 .is_enabled = hsw_power_well_enabled, 2114 }; 2115 2116 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 2117 .sync_hw = i9xx_power_well_sync_hw_noop, 2118 .enable = gen9_dc_off_power_well_enable, 2119 .disable = gen9_dc_off_power_well_disable, 2120 .is_enabled = gen9_dc_off_power_well_enabled, 2121 }; 2122 2123 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 2124 .sync_hw = i9xx_power_well_sync_hw_noop, 2125 .enable = bxt_dpio_cmn_power_well_enable, 2126 .disable = bxt_dpio_cmn_power_well_disable, 2127 .is_enabled = bxt_dpio_cmn_power_well_enabled, 2128 }; 2129 2130 static const struct i915_power_well_regs hsw_power_well_regs = { 2131 .bios = HSW_PWR_WELL_CTL1, 2132 .driver = HSW_PWR_WELL_CTL2, 2133 .kvmr = HSW_PWR_WELL_CTL3, 2134 .debug = HSW_PWR_WELL_CTL4, 2135 }; 2136 2137 static const struct i915_power_well_desc hsw_power_wells[] = { 2138 { 2139 .name = "always-on", 2140 .always_on = 1, 2141 .domains = POWER_DOMAIN_MASK, 2142 .ops = &i9xx_always_on_power_well_ops, 2143 .id = DISP_PW_ID_NONE, 2144 }, 2145 { 2146 .name = "display", 2147 .domains = HSW_DISPLAY_POWER_DOMAINS, 2148 .ops = &hsw_power_well_ops, 2149 .id = HSW_DISP_PW_GLOBAL, 2150 { 2151 .hsw.regs = &hsw_power_well_regs, 2152 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 2153 .hsw.has_vga = true, 2154 }, 2155 }, 2156 }; 2157 2158 static const struct i915_power_well_desc bdw_power_wells[] = { 2159 { 2160 .name = "always-on", 2161 .always_on = 1, 2162 .domains = POWER_DOMAIN_MASK, 2163 .ops = &i9xx_always_on_power_well_ops, 2164 .id = DISP_PW_ID_NONE, 2165 }, 2166 { 2167 .name = "display", 2168 .domains = BDW_DISPLAY_POWER_DOMAINS, 2169 .ops = &hsw_power_well_ops, 2170 .id = HSW_DISP_PW_GLOBAL, 2171 { 2172 .hsw.regs = &hsw_power_well_regs, 2173 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 2174 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 2175 .hsw.has_vga = true, 2176 }, 2177 }, 2178 }; 2179 2180 static const struct i915_power_well_ops vlv_display_power_well_ops = { 2181 .sync_hw = i9xx_power_well_sync_hw_noop, 2182 .enable = vlv_display_power_well_enable, 2183 .disable = vlv_display_power_well_disable, 2184 .is_enabled = vlv_power_well_enabled, 2185 }; 2186 2187 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 2188 .sync_hw = i9xx_power_well_sync_hw_noop, 2189 .enable = vlv_dpio_cmn_power_well_enable, 2190 .disable = vlv_dpio_cmn_power_well_disable, 2191 .is_enabled = vlv_power_well_enabled, 2192 }; 2193 2194 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 2195 .sync_hw = i9xx_power_well_sync_hw_noop, 2196 .enable = vlv_power_well_enable, 2197 .disable = vlv_power_well_disable, 2198 .is_enabled = vlv_power_well_enabled, 2199 }; 2200 2201 static const struct i915_power_well_desc vlv_power_wells[] = { 2202 { 2203 .name = "always-on", 2204 .always_on = 1, 2205 .domains = POWER_DOMAIN_MASK, 2206 .ops = &i9xx_always_on_power_well_ops, 2207 .id = DISP_PW_ID_NONE, 2208 }, 2209 { 2210 .name = "display", 2211 .domains = VLV_DISPLAY_POWER_DOMAINS, 2212 .ops = &vlv_display_power_well_ops, 2213 .id = VLV_DISP_PW_DISP2D, 2214 { 2215 .vlv.idx = PUNIT_PWGT_IDX_DISP2D, 2216 }, 2217 }, 2218 { 2219 .name = "dpio-tx-b-01", 2220 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2221 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2222 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2223 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2224 .ops = &vlv_dpio_power_well_ops, 2225 .id = DISP_PW_ID_NONE, 2226 { 2227 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, 2228 }, 2229 }, 2230 { 2231 .name = "dpio-tx-b-23", 2232 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2233 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2234 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2235 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2236 .ops = &vlv_dpio_power_well_ops, 2237 .id = DISP_PW_ID_NONE, 2238 { 2239 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, 2240 }, 2241 }, 2242 { 2243 .name = "dpio-tx-c-01", 2244 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2245 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2246 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2247 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2248 .ops = &vlv_dpio_power_well_ops, 2249 .id = DISP_PW_ID_NONE, 2250 { 2251 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, 2252 }, 2253 }, 2254 { 2255 .name = "dpio-tx-c-23", 2256 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2257 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2258 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2259 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2260 .ops = &vlv_dpio_power_well_ops, 2261 .id = DISP_PW_ID_NONE, 2262 { 2263 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, 2264 }, 2265 }, 2266 { 2267 .name = "dpio-common", 2268 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 2269 .ops = &vlv_dpio_cmn_power_well_ops, 2270 .id = VLV_DISP_PW_DPIO_CMN_BC, 2271 { 2272 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 2273 }, 2274 }, 2275 }; 2276 2277 static const struct i915_power_well_desc chv_power_wells[] = { 2278 { 2279 .name = "always-on", 2280 .always_on = 1, 2281 .domains = POWER_DOMAIN_MASK, 2282 .ops = &i9xx_always_on_power_well_ops, 2283 .id = DISP_PW_ID_NONE, 2284 }, 2285 { 2286 .name = "display", 2287 /* 2288 * Pipe A power well is the new disp2d well. Pipe B and C 2289 * power wells don't actually exist. Pipe A power well is 2290 * required for any pipe to work. 2291 */ 2292 .domains = CHV_DISPLAY_POWER_DOMAINS, 2293 .ops = &chv_pipe_power_well_ops, 2294 .id = DISP_PW_ID_NONE, 2295 }, 2296 { 2297 .name = "dpio-common-bc", 2298 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 2299 .ops = &chv_dpio_cmn_power_well_ops, 2300 .id = VLV_DISP_PW_DPIO_CMN_BC, 2301 { 2302 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 2303 }, 2304 }, 2305 { 2306 .name = "dpio-common-d", 2307 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 2308 .ops = &chv_dpio_cmn_power_well_ops, 2309 .id = CHV_DISP_PW_DPIO_CMN_D, 2310 { 2311 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, 2312 }, 2313 }, 2314 }; 2315 2316 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 2317 enum i915_power_well_id power_well_id) 2318 { 2319 struct i915_power_well *power_well; 2320 bool ret; 2321 2322 power_well = lookup_power_well(dev_priv, power_well_id); 2323 ret = power_well->desc->ops->is_enabled(dev_priv, power_well); 2324 2325 return ret; 2326 } 2327 2328 static const struct i915_power_well_desc skl_power_wells[] = { 2329 { 2330 .name = "always-on", 2331 .always_on = 1, 2332 .domains = POWER_DOMAIN_MASK, 2333 .ops = &i9xx_always_on_power_well_ops, 2334 .id = DISP_PW_ID_NONE, 2335 }, 2336 { 2337 .name = "power well 1", 2338 /* Handled by the DMC firmware */ 2339 .domains = 0, 2340 .ops = &hsw_power_well_ops, 2341 .id = SKL_DISP_PW_1, 2342 { 2343 .hsw.regs = &hsw_power_well_regs, 2344 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 2345 .hsw.has_fuses = true, 2346 }, 2347 }, 2348 { 2349 .name = "MISC IO power well", 2350 /* Handled by the DMC firmware */ 2351 .domains = 0, 2352 .ops = &hsw_power_well_ops, 2353 .id = SKL_DISP_PW_MISC_IO, 2354 { 2355 .hsw.regs = &hsw_power_well_regs, 2356 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, 2357 }, 2358 }, 2359 { 2360 .name = "DC off", 2361 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 2362 .ops = &gen9_dc_off_power_well_ops, 2363 .id = DISP_PW_ID_NONE, 2364 }, 2365 { 2366 .name = "power well 2", 2367 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 2368 .ops = &hsw_power_well_ops, 2369 .id = SKL_DISP_PW_2, 2370 { 2371 .hsw.regs = &hsw_power_well_regs, 2372 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 2373 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 2374 .hsw.has_vga = true, 2375 .hsw.has_fuses = true, 2376 }, 2377 }, 2378 { 2379 .name = "DDI A/E IO power well", 2380 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, 2381 .ops = &hsw_power_well_ops, 2382 .id = DISP_PW_ID_NONE, 2383 { 2384 .hsw.regs = &hsw_power_well_regs, 2385 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, 2386 }, 2387 }, 2388 { 2389 .name = "DDI B IO power well", 2390 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, 2391 .ops = &hsw_power_well_ops, 2392 .id = DISP_PW_ID_NONE, 2393 { 2394 .hsw.regs = &hsw_power_well_regs, 2395 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 2396 }, 2397 }, 2398 { 2399 .name = "DDI C IO power well", 2400 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, 2401 .ops = &hsw_power_well_ops, 2402 .id = DISP_PW_ID_NONE, 2403 { 2404 .hsw.regs = &hsw_power_well_regs, 2405 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 2406 }, 2407 }, 2408 { 2409 .name = "DDI D IO power well", 2410 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, 2411 .ops = &hsw_power_well_ops, 2412 .id = DISP_PW_ID_NONE, 2413 { 2414 .hsw.regs = &hsw_power_well_regs, 2415 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 2416 }, 2417 }, 2418 }; 2419 2420 static const struct i915_power_well_desc bxt_power_wells[] = { 2421 { 2422 .name = "always-on", 2423 .always_on = 1, 2424 .domains = POWER_DOMAIN_MASK, 2425 .ops = &i9xx_always_on_power_well_ops, 2426 .id = DISP_PW_ID_NONE, 2427 }, 2428 { 2429 .name = "power well 1", 2430 .domains = 0, 2431 .ops = &hsw_power_well_ops, 2432 .id = SKL_DISP_PW_1, 2433 { 2434 .hsw.regs = &hsw_power_well_regs, 2435 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 2436 .hsw.has_fuses = true, 2437 }, 2438 }, 2439 { 2440 .name = "DC off", 2441 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 2442 .ops = &gen9_dc_off_power_well_ops, 2443 .id = DISP_PW_ID_NONE, 2444 }, 2445 { 2446 .name = "power well 2", 2447 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 2448 .ops = &hsw_power_well_ops, 2449 .id = SKL_DISP_PW_2, 2450 { 2451 .hsw.regs = &hsw_power_well_regs, 2452 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 2453 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 2454 .hsw.has_vga = true, 2455 .hsw.has_fuses = true, 2456 }, 2457 }, 2458 { 2459 .name = "dpio-common-a", 2460 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 2461 .ops = &bxt_dpio_cmn_power_well_ops, 2462 .id = BXT_DISP_PW_DPIO_CMN_A, 2463 { 2464 .bxt.phy = DPIO_PHY1, 2465 }, 2466 }, 2467 { 2468 .name = "dpio-common-bc", 2469 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 2470 .ops = &bxt_dpio_cmn_power_well_ops, 2471 .id = VLV_DISP_PW_DPIO_CMN_BC, 2472 { 2473 .bxt.phy = DPIO_PHY0, 2474 }, 2475 }, 2476 }; 2477 2478 static const struct i915_power_well_desc glk_power_wells[] = { 2479 { 2480 .name = "always-on", 2481 .always_on = 1, 2482 .domains = POWER_DOMAIN_MASK, 2483 .ops = &i9xx_always_on_power_well_ops, 2484 .id = DISP_PW_ID_NONE, 2485 }, 2486 { 2487 .name = "power well 1", 2488 /* Handled by the DMC firmware */ 2489 .domains = 0, 2490 .ops = &hsw_power_well_ops, 2491 .id = SKL_DISP_PW_1, 2492 { 2493 .hsw.regs = &hsw_power_well_regs, 2494 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 2495 .hsw.has_fuses = true, 2496 }, 2497 }, 2498 { 2499 .name = "DC off", 2500 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, 2501 .ops = &gen9_dc_off_power_well_ops, 2502 .id = DISP_PW_ID_NONE, 2503 }, 2504 { 2505 .name = "power well 2", 2506 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, 2507 .ops = &hsw_power_well_ops, 2508 .id = SKL_DISP_PW_2, 2509 { 2510 .hsw.regs = &hsw_power_well_regs, 2511 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 2512 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 2513 .hsw.has_vga = true, 2514 .hsw.has_fuses = true, 2515 }, 2516 }, 2517 { 2518 .name = "dpio-common-a", 2519 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, 2520 .ops = &bxt_dpio_cmn_power_well_ops, 2521 .id = BXT_DISP_PW_DPIO_CMN_A, 2522 { 2523 .bxt.phy = DPIO_PHY1, 2524 }, 2525 }, 2526 { 2527 .name = "dpio-common-b", 2528 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, 2529 .ops = &bxt_dpio_cmn_power_well_ops, 2530 .id = VLV_DISP_PW_DPIO_CMN_BC, 2531 { 2532 .bxt.phy = DPIO_PHY0, 2533 }, 2534 }, 2535 { 2536 .name = "dpio-common-c", 2537 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, 2538 .ops = &bxt_dpio_cmn_power_well_ops, 2539 .id = GLK_DISP_PW_DPIO_CMN_C, 2540 { 2541 .bxt.phy = DPIO_PHY2, 2542 }, 2543 }, 2544 { 2545 .name = "AUX A", 2546 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, 2547 .ops = &hsw_power_well_ops, 2548 .id = DISP_PW_ID_NONE, 2549 { 2550 .hsw.regs = &hsw_power_well_regs, 2551 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 2552 }, 2553 }, 2554 { 2555 .name = "AUX B", 2556 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, 2557 .ops = &hsw_power_well_ops, 2558 .id = DISP_PW_ID_NONE, 2559 { 2560 .hsw.regs = &hsw_power_well_regs, 2561 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 2562 }, 2563 }, 2564 { 2565 .name = "AUX C", 2566 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, 2567 .ops = &hsw_power_well_ops, 2568 .id = DISP_PW_ID_NONE, 2569 { 2570 .hsw.regs = &hsw_power_well_regs, 2571 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 2572 }, 2573 }, 2574 { 2575 .name = "DDI A IO power well", 2576 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, 2577 .ops = &hsw_power_well_ops, 2578 .id = DISP_PW_ID_NONE, 2579 { 2580 .hsw.regs = &hsw_power_well_regs, 2581 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 2582 }, 2583 }, 2584 { 2585 .name = "DDI B IO power well", 2586 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, 2587 .ops = &hsw_power_well_ops, 2588 .id = DISP_PW_ID_NONE, 2589 { 2590 .hsw.regs = &hsw_power_well_regs, 2591 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 2592 }, 2593 }, 2594 { 2595 .name = "DDI C IO power well", 2596 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, 2597 .ops = &hsw_power_well_ops, 2598 .id = DISP_PW_ID_NONE, 2599 { 2600 .hsw.regs = &hsw_power_well_regs, 2601 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 2602 }, 2603 }, 2604 }; 2605 2606 static const struct i915_power_well_desc cnl_power_wells[] = { 2607 { 2608 .name = "always-on", 2609 .always_on = 1, 2610 .domains = POWER_DOMAIN_MASK, 2611 .ops = &i9xx_always_on_power_well_ops, 2612 .id = DISP_PW_ID_NONE, 2613 }, 2614 { 2615 .name = "power well 1", 2616 /* Handled by the DMC firmware */ 2617 .domains = 0, 2618 .ops = &hsw_power_well_ops, 2619 .id = SKL_DISP_PW_1, 2620 { 2621 .hsw.regs = &hsw_power_well_regs, 2622 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 2623 .hsw.has_fuses = true, 2624 }, 2625 }, 2626 { 2627 .name = "AUX A", 2628 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS, 2629 .ops = &hsw_power_well_ops, 2630 .id = DISP_PW_ID_NONE, 2631 { 2632 .hsw.regs = &hsw_power_well_regs, 2633 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 2634 }, 2635 }, 2636 { 2637 .name = "AUX B", 2638 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS, 2639 .ops = &hsw_power_well_ops, 2640 .id = DISP_PW_ID_NONE, 2641 { 2642 .hsw.regs = &hsw_power_well_regs, 2643 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 2644 }, 2645 }, 2646 { 2647 .name = "AUX C", 2648 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS, 2649 .ops = &hsw_power_well_ops, 2650 .id = DISP_PW_ID_NONE, 2651 { 2652 .hsw.regs = &hsw_power_well_regs, 2653 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 2654 }, 2655 }, 2656 { 2657 .name = "AUX D", 2658 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS, 2659 .ops = &hsw_power_well_ops, 2660 .id = DISP_PW_ID_NONE, 2661 { 2662 .hsw.regs = &hsw_power_well_regs, 2663 .hsw.idx = CNL_PW_CTL_IDX_AUX_D, 2664 }, 2665 }, 2666 { 2667 .name = "DC off", 2668 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, 2669 .ops = &gen9_dc_off_power_well_ops, 2670 .id = DISP_PW_ID_NONE, 2671 }, 2672 { 2673 .name = "power well 2", 2674 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 2675 .ops = &hsw_power_well_ops, 2676 .id = SKL_DISP_PW_2, 2677 { 2678 .hsw.regs = &hsw_power_well_regs, 2679 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 2680 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 2681 .hsw.has_vga = true, 2682 .hsw.has_fuses = true, 2683 }, 2684 }, 2685 { 2686 .name = "DDI A IO power well", 2687 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS, 2688 .ops = &hsw_power_well_ops, 2689 .id = DISP_PW_ID_NONE, 2690 { 2691 .hsw.regs = &hsw_power_well_regs, 2692 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 2693 }, 2694 }, 2695 { 2696 .name = "DDI B IO power well", 2697 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS, 2698 .ops = &hsw_power_well_ops, 2699 .id = DISP_PW_ID_NONE, 2700 { 2701 .hsw.regs = &hsw_power_well_regs, 2702 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 2703 }, 2704 }, 2705 { 2706 .name = "DDI C IO power well", 2707 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS, 2708 .ops = &hsw_power_well_ops, 2709 .id = DISP_PW_ID_NONE, 2710 { 2711 .hsw.regs = &hsw_power_well_regs, 2712 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 2713 }, 2714 }, 2715 { 2716 .name = "DDI D IO power well", 2717 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS, 2718 .ops = &hsw_power_well_ops, 2719 .id = DISP_PW_ID_NONE, 2720 { 2721 .hsw.regs = &hsw_power_well_regs, 2722 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 2723 }, 2724 }, 2725 { 2726 .name = "DDI F IO power well", 2727 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS, 2728 .ops = &hsw_power_well_ops, 2729 .id = DISP_PW_ID_NONE, 2730 { 2731 .hsw.regs = &hsw_power_well_regs, 2732 .hsw.idx = CNL_PW_CTL_IDX_DDI_F, 2733 }, 2734 }, 2735 { 2736 .name = "AUX F", 2737 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS, 2738 .ops = &hsw_power_well_ops, 2739 .id = DISP_PW_ID_NONE, 2740 { 2741 .hsw.regs = &hsw_power_well_regs, 2742 .hsw.idx = CNL_PW_CTL_IDX_AUX_F, 2743 }, 2744 }, 2745 }; 2746 2747 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { 2748 .sync_hw = hsw_power_well_sync_hw, 2749 .enable = icl_combo_phy_aux_power_well_enable, 2750 .disable = icl_combo_phy_aux_power_well_disable, 2751 .is_enabled = hsw_power_well_enabled, 2752 }; 2753 2754 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = { 2755 .sync_hw = hsw_power_well_sync_hw, 2756 .enable = icl_tc_phy_aux_power_well_enable, 2757 .disable = hsw_power_well_disable, 2758 .is_enabled = hsw_power_well_enabled, 2759 }; 2760 2761 static const struct i915_power_well_regs icl_aux_power_well_regs = { 2762 .bios = ICL_PWR_WELL_CTL_AUX1, 2763 .driver = ICL_PWR_WELL_CTL_AUX2, 2764 .debug = ICL_PWR_WELL_CTL_AUX4, 2765 }; 2766 2767 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 2768 .bios = ICL_PWR_WELL_CTL_DDI1, 2769 .driver = ICL_PWR_WELL_CTL_DDI2, 2770 .debug = ICL_PWR_WELL_CTL_DDI4, 2771 }; 2772 2773 static const struct i915_power_well_desc icl_power_wells[] = { 2774 { 2775 .name = "always-on", 2776 .always_on = 1, 2777 .domains = POWER_DOMAIN_MASK, 2778 .ops = &i9xx_always_on_power_well_ops, 2779 .id = DISP_PW_ID_NONE, 2780 }, 2781 { 2782 .name = "power well 1", 2783 /* Handled by the DMC firmware */ 2784 .domains = 0, 2785 .ops = &hsw_power_well_ops, 2786 .id = SKL_DISP_PW_1, 2787 { 2788 .hsw.regs = &hsw_power_well_regs, 2789 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 2790 .hsw.has_fuses = true, 2791 }, 2792 }, 2793 { 2794 .name = "power well 2", 2795 .domains = ICL_PW_2_POWER_DOMAINS, 2796 .ops = &hsw_power_well_ops, 2797 .id = SKL_DISP_PW_2, 2798 { 2799 .hsw.regs = &hsw_power_well_regs, 2800 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 2801 .hsw.has_fuses = true, 2802 }, 2803 }, 2804 { 2805 .name = "DC off", 2806 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, 2807 .ops = &gen9_dc_off_power_well_ops, 2808 .id = DISP_PW_ID_NONE, 2809 }, 2810 { 2811 .name = "power well 3", 2812 .domains = ICL_PW_3_POWER_DOMAINS, 2813 .ops = &hsw_power_well_ops, 2814 .id = DISP_PW_ID_NONE, 2815 { 2816 .hsw.regs = &hsw_power_well_regs, 2817 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 2818 .hsw.irq_pipe_mask = BIT(PIPE_B), 2819 .hsw.has_vga = true, 2820 .hsw.has_fuses = true, 2821 }, 2822 }, 2823 { 2824 .name = "DDI A IO", 2825 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 2826 .ops = &hsw_power_well_ops, 2827 .id = DISP_PW_ID_NONE, 2828 { 2829 .hsw.regs = &icl_ddi_power_well_regs, 2830 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 2831 }, 2832 }, 2833 { 2834 .name = "DDI B IO", 2835 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 2836 .ops = &hsw_power_well_ops, 2837 .id = DISP_PW_ID_NONE, 2838 { 2839 .hsw.regs = &icl_ddi_power_well_regs, 2840 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 2841 }, 2842 }, 2843 { 2844 .name = "DDI C IO", 2845 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 2846 .ops = &hsw_power_well_ops, 2847 .id = DISP_PW_ID_NONE, 2848 { 2849 .hsw.regs = &icl_ddi_power_well_regs, 2850 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 2851 }, 2852 }, 2853 { 2854 .name = "DDI D IO", 2855 .domains = ICL_DDI_IO_D_POWER_DOMAINS, 2856 .ops = &hsw_power_well_ops, 2857 .id = DISP_PW_ID_NONE, 2858 { 2859 .hsw.regs = &icl_ddi_power_well_regs, 2860 .hsw.idx = ICL_PW_CTL_IDX_DDI_D, 2861 }, 2862 }, 2863 { 2864 .name = "DDI E IO", 2865 .domains = ICL_DDI_IO_E_POWER_DOMAINS, 2866 .ops = &hsw_power_well_ops, 2867 .id = DISP_PW_ID_NONE, 2868 { 2869 .hsw.regs = &icl_ddi_power_well_regs, 2870 .hsw.idx = ICL_PW_CTL_IDX_DDI_E, 2871 }, 2872 }, 2873 { 2874 .name = "DDI F IO", 2875 .domains = ICL_DDI_IO_F_POWER_DOMAINS, 2876 .ops = &hsw_power_well_ops, 2877 .id = DISP_PW_ID_NONE, 2878 { 2879 .hsw.regs = &icl_ddi_power_well_regs, 2880 .hsw.idx = ICL_PW_CTL_IDX_DDI_F, 2881 }, 2882 }, 2883 { 2884 .name = "AUX A", 2885 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 2886 .ops = &icl_combo_phy_aux_power_well_ops, 2887 .id = DISP_PW_ID_NONE, 2888 { 2889 .hsw.regs = &icl_aux_power_well_regs, 2890 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 2891 }, 2892 }, 2893 { 2894 .name = "AUX B", 2895 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 2896 .ops = &icl_combo_phy_aux_power_well_ops, 2897 .id = DISP_PW_ID_NONE, 2898 { 2899 .hsw.regs = &icl_aux_power_well_regs, 2900 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 2901 }, 2902 }, 2903 { 2904 .name = "AUX C", 2905 .domains = ICL_AUX_C_IO_POWER_DOMAINS, 2906 .ops = &icl_tc_phy_aux_power_well_ops, 2907 .id = DISP_PW_ID_NONE, 2908 { 2909 .hsw.regs = &icl_aux_power_well_regs, 2910 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 2911 .hsw.is_tc_tbt = false, 2912 }, 2913 }, 2914 { 2915 .name = "AUX D", 2916 .domains = ICL_AUX_D_IO_POWER_DOMAINS, 2917 .ops = &icl_tc_phy_aux_power_well_ops, 2918 .id = DISP_PW_ID_NONE, 2919 { 2920 .hsw.regs = &icl_aux_power_well_regs, 2921 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 2922 .hsw.is_tc_tbt = false, 2923 }, 2924 }, 2925 { 2926 .name = "AUX E", 2927 .domains = ICL_AUX_E_IO_POWER_DOMAINS, 2928 .ops = &icl_tc_phy_aux_power_well_ops, 2929 .id = DISP_PW_ID_NONE, 2930 { 2931 .hsw.regs = &icl_aux_power_well_regs, 2932 .hsw.idx = ICL_PW_CTL_IDX_AUX_E, 2933 .hsw.is_tc_tbt = false, 2934 }, 2935 }, 2936 { 2937 .name = "AUX F", 2938 .domains = ICL_AUX_F_IO_POWER_DOMAINS, 2939 .ops = &icl_tc_phy_aux_power_well_ops, 2940 .id = DISP_PW_ID_NONE, 2941 { 2942 .hsw.regs = &icl_aux_power_well_regs, 2943 .hsw.idx = ICL_PW_CTL_IDX_AUX_F, 2944 .hsw.is_tc_tbt = false, 2945 }, 2946 }, 2947 { 2948 .name = "AUX TBT1", 2949 .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, 2950 .ops = &icl_tc_phy_aux_power_well_ops, 2951 .id = DISP_PW_ID_NONE, 2952 { 2953 .hsw.regs = &icl_aux_power_well_regs, 2954 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, 2955 .hsw.is_tc_tbt = true, 2956 }, 2957 }, 2958 { 2959 .name = "AUX TBT2", 2960 .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, 2961 .ops = &icl_tc_phy_aux_power_well_ops, 2962 .id = DISP_PW_ID_NONE, 2963 { 2964 .hsw.regs = &icl_aux_power_well_regs, 2965 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, 2966 .hsw.is_tc_tbt = true, 2967 }, 2968 }, 2969 { 2970 .name = "AUX TBT3", 2971 .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, 2972 .ops = &icl_tc_phy_aux_power_well_ops, 2973 .id = DISP_PW_ID_NONE, 2974 { 2975 .hsw.regs = &icl_aux_power_well_regs, 2976 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, 2977 .hsw.is_tc_tbt = true, 2978 }, 2979 }, 2980 { 2981 .name = "AUX TBT4", 2982 .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, 2983 .ops = &icl_tc_phy_aux_power_well_ops, 2984 .id = DISP_PW_ID_NONE, 2985 { 2986 .hsw.regs = &icl_aux_power_well_regs, 2987 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, 2988 .hsw.is_tc_tbt = true, 2989 }, 2990 }, 2991 { 2992 .name = "power well 4", 2993 .domains = ICL_PW_4_POWER_DOMAINS, 2994 .ops = &hsw_power_well_ops, 2995 .id = DISP_PW_ID_NONE, 2996 { 2997 .hsw.regs = &hsw_power_well_regs, 2998 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 2999 .hsw.has_fuses = true, 3000 .hsw.irq_pipe_mask = BIT(PIPE_C), 3001 }, 3002 }, 3003 }; 3004 3005 static int 3006 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 3007 int disable_power_well) 3008 { 3009 if (disable_power_well >= 0) 3010 return !!disable_power_well; 3011 3012 return 1; 3013 } 3014 3015 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 3016 int enable_dc) 3017 { 3018 uint32_t mask; 3019 int requested_dc; 3020 int max_dc; 3021 3022 if (INTEL_GEN(dev_priv) >= 11) { 3023 max_dc = 2; 3024 /* 3025 * DC9 has a separate HW flow from the rest of the DC states, 3026 * not depending on the DMC firmware. It's needed by system 3027 * suspend/resume, so allow it unconditionally. 3028 */ 3029 mask = DC_STATE_EN_DC9; 3030 } else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) { 3031 max_dc = 2; 3032 mask = 0; 3033 } else if (IS_GEN9_LP(dev_priv)) { 3034 max_dc = 1; 3035 mask = DC_STATE_EN_DC9; 3036 } else { 3037 max_dc = 0; 3038 mask = 0; 3039 } 3040 3041 if (!i915_modparams.disable_power_well) 3042 max_dc = 0; 3043 3044 if (enable_dc >= 0 && enable_dc <= max_dc) { 3045 requested_dc = enable_dc; 3046 } else if (enable_dc == -1) { 3047 requested_dc = max_dc; 3048 } else if (enable_dc > max_dc && enable_dc <= 2) { 3049 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n", 3050 enable_dc, max_dc); 3051 requested_dc = max_dc; 3052 } else { 3053 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc); 3054 requested_dc = max_dc; 3055 } 3056 3057 if (requested_dc > 1) 3058 mask |= DC_STATE_EN_UPTO_DC6; 3059 if (requested_dc > 0) 3060 mask |= DC_STATE_EN_UPTO_DC5; 3061 3062 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask); 3063 3064 return mask; 3065 } 3066 3067 static int 3068 __set_power_wells(struct i915_power_domains *power_domains, 3069 const struct i915_power_well_desc *power_well_descs, 3070 int power_well_count) 3071 { 3072 u64 power_well_ids = 0; 3073 int i; 3074 3075 power_domains->power_well_count = power_well_count; 3076 power_domains->power_wells = 3077 kcalloc(power_well_count, 3078 sizeof(*power_domains->power_wells), 3079 GFP_KERNEL); 3080 if (!power_domains->power_wells) 3081 return -ENOMEM; 3082 3083 for (i = 0; i < power_well_count; i++) { 3084 enum i915_power_well_id id = power_well_descs[i].id; 3085 3086 power_domains->power_wells[i].desc = &power_well_descs[i]; 3087 3088 if (id == DISP_PW_ID_NONE) 3089 continue; 3090 3091 WARN_ON(id >= sizeof(power_well_ids) * 8); 3092 WARN_ON(power_well_ids & BIT_ULL(id)); 3093 power_well_ids |= BIT_ULL(id); 3094 } 3095 3096 return 0; 3097 } 3098 3099 #define set_power_wells(power_domains, __power_well_descs) \ 3100 __set_power_wells(power_domains, __power_well_descs, \ 3101 ARRAY_SIZE(__power_well_descs)) 3102 3103 /** 3104 * intel_power_domains_init - initializes the power domain structures 3105 * @dev_priv: i915 device instance 3106 * 3107 * Initializes the power domain structures for @dev_priv depending upon the 3108 * supported platform. 3109 */ 3110 int intel_power_domains_init(struct drm_i915_private *dev_priv) 3111 { 3112 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3113 int err; 3114 3115 i915_modparams.disable_power_well = 3116 sanitize_disable_power_well_option(dev_priv, 3117 i915_modparams.disable_power_well); 3118 dev_priv->csr.allowed_dc_mask = 3119 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc); 3120 3121 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); 3122 3123 mutex_init(&power_domains->lock); 3124 3125 /* 3126 * The enabling order will be from lower to higher indexed wells, 3127 * the disabling order is reversed. 3128 */ 3129 if (IS_ICELAKE(dev_priv)) { 3130 err = set_power_wells(power_domains, icl_power_wells); 3131 } else if (IS_CANNONLAKE(dev_priv)) { 3132 err = set_power_wells(power_domains, cnl_power_wells); 3133 3134 /* 3135 * DDI and Aux IO are getting enabled for all ports 3136 * regardless the presence or use. So, in order to avoid 3137 * timeouts, lets remove them from the list 3138 * for the SKUs without port F. 3139 */ 3140 if (!IS_CNL_WITH_PORT_F(dev_priv)) 3141 power_domains->power_well_count -= 2; 3142 } else if (IS_GEMINILAKE(dev_priv)) { 3143 err = set_power_wells(power_domains, glk_power_wells); 3144 } else if (IS_BROXTON(dev_priv)) { 3145 err = set_power_wells(power_domains, bxt_power_wells); 3146 } else if (IS_GEN9_BC(dev_priv)) { 3147 err = set_power_wells(power_domains, skl_power_wells); 3148 } else if (IS_CHERRYVIEW(dev_priv)) { 3149 err = set_power_wells(power_domains, chv_power_wells); 3150 } else if (IS_BROADWELL(dev_priv)) { 3151 err = set_power_wells(power_domains, bdw_power_wells); 3152 } else if (IS_HASWELL(dev_priv)) { 3153 err = set_power_wells(power_domains, hsw_power_wells); 3154 } else if (IS_VALLEYVIEW(dev_priv)) { 3155 err = set_power_wells(power_domains, vlv_power_wells); 3156 } else if (IS_I830(dev_priv)) { 3157 err = set_power_wells(power_domains, i830_power_wells); 3158 } else { 3159 err = set_power_wells(power_domains, i9xx_always_on_power_well); 3160 } 3161 3162 return err; 3163 } 3164 3165 /** 3166 * intel_power_domains_cleanup - clean up power domains resources 3167 * @dev_priv: i915 device instance 3168 * 3169 * Release any resources acquired by intel_power_domains_init() 3170 */ 3171 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 3172 { 3173 kfree(dev_priv->power_domains.power_wells); 3174 } 3175 3176 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 3177 { 3178 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3179 struct i915_power_well *power_well; 3180 3181 mutex_lock(&power_domains->lock); 3182 for_each_power_well(dev_priv, power_well) { 3183 power_well->desc->ops->sync_hw(dev_priv, power_well); 3184 power_well->hw_enabled = 3185 power_well->desc->ops->is_enabled(dev_priv, power_well); 3186 } 3187 mutex_unlock(&power_domains->lock); 3188 } 3189 3190 static inline 3191 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv, 3192 i915_reg_t reg, bool enable) 3193 { 3194 u32 val, status; 3195 3196 val = I915_READ(reg); 3197 val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST); 3198 I915_WRITE(reg, val); 3199 POSTING_READ(reg); 3200 udelay(10); 3201 3202 status = I915_READ(reg) & DBUF_POWER_STATE; 3203 if ((enable && !status) || (!enable && status)) { 3204 DRM_ERROR("DBus power %s timeout!\n", 3205 enable ? "enable" : "disable"); 3206 return false; 3207 } 3208 return true; 3209 } 3210 3211 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 3212 { 3213 intel_dbuf_slice_set(dev_priv, DBUF_CTL, true); 3214 } 3215 3216 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 3217 { 3218 intel_dbuf_slice_set(dev_priv, DBUF_CTL, false); 3219 } 3220 3221 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv) 3222 { 3223 if (INTEL_GEN(dev_priv) < 11) 3224 return 1; 3225 return 2; 3226 } 3227 3228 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, 3229 u8 req_slices) 3230 { 3231 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; 3232 u32 val; 3233 bool ret; 3234 3235 if (req_slices > intel_dbuf_max_slices(dev_priv)) { 3236 DRM_ERROR("Invalid number of dbuf slices requested\n"); 3237 return; 3238 } 3239 3240 if (req_slices == hw_enabled_slices || req_slices == 0) 3241 return; 3242 3243 val = I915_READ(DBUF_CTL_S2); 3244 if (req_slices > hw_enabled_slices) 3245 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); 3246 else 3247 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false); 3248 3249 if (ret) 3250 dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices; 3251 } 3252 3253 static void icl_dbuf_enable(struct drm_i915_private *dev_priv) 3254 { 3255 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST); 3256 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST); 3257 POSTING_READ(DBUF_CTL_S2); 3258 3259 udelay(10); 3260 3261 if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || 3262 !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) 3263 DRM_ERROR("DBuf power enable timeout\n"); 3264 else 3265 dev_priv->wm.skl_hw.ddb.enabled_slices = 2; 3266 } 3267 3268 static void icl_dbuf_disable(struct drm_i915_private *dev_priv) 3269 { 3270 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST); 3271 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST); 3272 POSTING_READ(DBUF_CTL_S2); 3273 3274 udelay(10); 3275 3276 if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || 3277 (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) 3278 DRM_ERROR("DBuf power disable timeout!\n"); 3279 else 3280 dev_priv->wm.skl_hw.ddb.enabled_slices = 0; 3281 } 3282 3283 static void icl_mbus_init(struct drm_i915_private *dev_priv) 3284 { 3285 uint32_t val; 3286 3287 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 3288 MBUS_ABOX_BT_CREDIT_POOL2(16) | 3289 MBUS_ABOX_B_CREDIT(1) | 3290 MBUS_ABOX_BW_CREDIT(1); 3291 3292 I915_WRITE(MBUS_ABOX_CTL, val); 3293 } 3294 3295 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 3296 bool enable) 3297 { 3298 i915_reg_t reg; 3299 u32 reset_bits, val; 3300 3301 if (IS_IVYBRIDGE(dev_priv)) { 3302 reg = GEN7_MSG_CTL; 3303 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 3304 } else { 3305 reg = HSW_NDE_RSTWRN_OPT; 3306 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 3307 } 3308 3309 val = I915_READ(reg); 3310 3311 if (enable) 3312 val |= reset_bits; 3313 else 3314 val &= ~reset_bits; 3315 3316 I915_WRITE(reg, val); 3317 } 3318 3319 static void skl_display_core_init(struct drm_i915_private *dev_priv, 3320 bool resume) 3321 { 3322 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3323 struct i915_power_well *well; 3324 3325 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3326 3327 /* enable PCH reset handshake */ 3328 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 3329 3330 /* enable PG1 and Misc I/O */ 3331 mutex_lock(&power_domains->lock); 3332 3333 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 3334 intel_power_well_enable(dev_priv, well); 3335 3336 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 3337 intel_power_well_enable(dev_priv, well); 3338 3339 mutex_unlock(&power_domains->lock); 3340 3341 skl_init_cdclk(dev_priv); 3342 3343 gen9_dbuf_enable(dev_priv); 3344 3345 if (resume && dev_priv->csr.dmc_payload) 3346 intel_csr_load_program(dev_priv); 3347 } 3348 3349 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 3350 { 3351 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3352 struct i915_power_well *well; 3353 3354 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3355 3356 gen9_dbuf_disable(dev_priv); 3357 3358 skl_uninit_cdclk(dev_priv); 3359 3360 /* The spec doesn't call for removing the reset handshake flag */ 3361 /* disable PG1 and Misc I/O */ 3362 3363 mutex_lock(&power_domains->lock); 3364 3365 /* 3366 * BSpec says to keep the MISC IO power well enabled here, only 3367 * remove our request for power well 1. 3368 * Note that even though the driver's request is removed power well 1 3369 * may stay enabled after this due to DMC's own request on it. 3370 */ 3371 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 3372 intel_power_well_disable(dev_priv, well); 3373 3374 mutex_unlock(&power_domains->lock); 3375 3376 usleep_range(10, 30); /* 10 us delay per Bspec */ 3377 } 3378 3379 void bxt_display_core_init(struct drm_i915_private *dev_priv, 3380 bool resume) 3381 { 3382 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3383 struct i915_power_well *well; 3384 3385 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3386 3387 /* 3388 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 3389 * or else the reset will hang because there is no PCH to respond. 3390 * Move the handshake programming to initialization sequence. 3391 * Previously was left up to BIOS. 3392 */ 3393 intel_pch_reset_handshake(dev_priv, false); 3394 3395 /* Enable PG1 */ 3396 mutex_lock(&power_domains->lock); 3397 3398 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 3399 intel_power_well_enable(dev_priv, well); 3400 3401 mutex_unlock(&power_domains->lock); 3402 3403 bxt_init_cdclk(dev_priv); 3404 3405 gen9_dbuf_enable(dev_priv); 3406 3407 if (resume && dev_priv->csr.dmc_payload) 3408 intel_csr_load_program(dev_priv); 3409 } 3410 3411 void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 3412 { 3413 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3414 struct i915_power_well *well; 3415 3416 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3417 3418 gen9_dbuf_disable(dev_priv); 3419 3420 bxt_uninit_cdclk(dev_priv); 3421 3422 /* The spec doesn't call for removing the reset handshake flag */ 3423 3424 /* 3425 * Disable PW1 (PG1). 3426 * Note that even though the driver's request is removed power well 1 3427 * may stay enabled after this due to DMC's own request on it. 3428 */ 3429 mutex_lock(&power_domains->lock); 3430 3431 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 3432 intel_power_well_disable(dev_priv, well); 3433 3434 mutex_unlock(&power_domains->lock); 3435 3436 usleep_range(10, 30); /* 10 us delay per Bspec */ 3437 } 3438 3439 enum { 3440 PROCMON_0_85V_DOT_0, 3441 PROCMON_0_95V_DOT_0, 3442 PROCMON_0_95V_DOT_1, 3443 PROCMON_1_05V_DOT_0, 3444 PROCMON_1_05V_DOT_1, 3445 }; 3446 3447 static const struct cnl_procmon { 3448 u32 dw1, dw9, dw10; 3449 } cnl_procmon_values[] = { 3450 [PROCMON_0_85V_DOT_0] = 3451 { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, }, 3452 [PROCMON_0_95V_DOT_0] = 3453 { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, }, 3454 [PROCMON_0_95V_DOT_1] = 3455 { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, }, 3456 [PROCMON_1_05V_DOT_0] = 3457 { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, }, 3458 [PROCMON_1_05V_DOT_1] = 3459 { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, }, 3460 }; 3461 3462 /* 3463 * CNL has just one set of registers, while ICL has two sets: one for port A and 3464 * the other for port B. The CNL registers are equivalent to the ICL port A 3465 * registers, that's why we call the ICL macros even though the function has CNL 3466 * on its name. 3467 */ 3468 static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv, 3469 enum port port) 3470 { 3471 const struct cnl_procmon *procmon; 3472 u32 val; 3473 3474 val = I915_READ(ICL_PORT_COMP_DW3(port)); 3475 switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { 3476 default: 3477 MISSING_CASE(val); 3478 /* fall through */ 3479 case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0: 3480 procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0]; 3481 break; 3482 case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0: 3483 procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0]; 3484 break; 3485 case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1: 3486 procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1]; 3487 break; 3488 case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0: 3489 procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0]; 3490 break; 3491 case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1: 3492 procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1]; 3493 break; 3494 } 3495 3496 val = I915_READ(ICL_PORT_COMP_DW1(port)); 3497 val &= ~((0xff << 16) | 0xff); 3498 val |= procmon->dw1; 3499 I915_WRITE(ICL_PORT_COMP_DW1(port), val); 3500 3501 I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9); 3502 I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10); 3503 } 3504 3505 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) 3506 { 3507 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3508 struct i915_power_well *well; 3509 u32 val; 3510 3511 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3512 3513 /* 1. Enable PCH Reset Handshake */ 3514 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 3515 3516 /* 2. Enable Comp */ 3517 val = I915_READ(CHICKEN_MISC_2); 3518 val &= ~CNL_COMP_PWR_DOWN; 3519 I915_WRITE(CHICKEN_MISC_2, val); 3520 3521 /* Dummy PORT_A to get the correct CNL register from the ICL macro */ 3522 cnl_set_procmon_ref_values(dev_priv, PORT_A); 3523 3524 val = I915_READ(CNL_PORT_COMP_DW0); 3525 val |= COMP_INIT; 3526 I915_WRITE(CNL_PORT_COMP_DW0, val); 3527 3528 /* 3. */ 3529 val = I915_READ(CNL_PORT_CL1CM_DW5); 3530 val |= CL_POWER_DOWN_ENABLE; 3531 I915_WRITE(CNL_PORT_CL1CM_DW5, val); 3532 3533 /* 3534 * 4. Enable Power Well 1 (PG1). 3535 * The AUX IO power wells will be enabled on demand. 3536 */ 3537 mutex_lock(&power_domains->lock); 3538 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 3539 intel_power_well_enable(dev_priv, well); 3540 mutex_unlock(&power_domains->lock); 3541 3542 /* 5. Enable CD clock */ 3543 cnl_init_cdclk(dev_priv); 3544 3545 /* 6. Enable DBUF */ 3546 gen9_dbuf_enable(dev_priv); 3547 3548 if (resume && dev_priv->csr.dmc_payload) 3549 intel_csr_load_program(dev_priv); 3550 } 3551 3552 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) 3553 { 3554 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3555 struct i915_power_well *well; 3556 u32 val; 3557 3558 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3559 3560 /* 1. Disable all display engine functions -> aready done */ 3561 3562 /* 2. Disable DBUF */ 3563 gen9_dbuf_disable(dev_priv); 3564 3565 /* 3. Disable CD clock */ 3566 cnl_uninit_cdclk(dev_priv); 3567 3568 /* 3569 * 4. Disable Power Well 1 (PG1). 3570 * The AUX IO power wells are toggled on demand, so they are already 3571 * disabled at this point. 3572 */ 3573 mutex_lock(&power_domains->lock); 3574 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 3575 intel_power_well_disable(dev_priv, well); 3576 mutex_unlock(&power_domains->lock); 3577 3578 usleep_range(10, 30); /* 10 us delay per Bspec */ 3579 3580 /* 5. Disable Comp */ 3581 val = I915_READ(CHICKEN_MISC_2); 3582 val |= CNL_COMP_PWR_DOWN; 3583 I915_WRITE(CHICKEN_MISC_2, val); 3584 } 3585 3586 void icl_display_core_init(struct drm_i915_private *dev_priv, 3587 bool resume) 3588 { 3589 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3590 struct i915_power_well *well; 3591 enum port port; 3592 u32 val; 3593 3594 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3595 3596 /* 1. Enable PCH reset handshake. */ 3597 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 3598 3599 for (port = PORT_A; port <= PORT_B; port++) { 3600 /* 2. Enable DDI combo PHY comp. */ 3601 val = I915_READ(ICL_PHY_MISC(port)); 3602 val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; 3603 I915_WRITE(ICL_PHY_MISC(port), val); 3604 3605 cnl_set_procmon_ref_values(dev_priv, port); 3606 3607 val = I915_READ(ICL_PORT_COMP_DW0(port)); 3608 val |= COMP_INIT; 3609 I915_WRITE(ICL_PORT_COMP_DW0(port), val); 3610 3611 /* 3. Set power down enable. */ 3612 val = I915_READ(ICL_PORT_CL_DW5(port)); 3613 val |= CL_POWER_DOWN_ENABLE; 3614 I915_WRITE(ICL_PORT_CL_DW5(port), val); 3615 } 3616 3617 /* 3618 * 4. Enable Power Well 1 (PG1). 3619 * The AUX IO power wells will be enabled on demand. 3620 */ 3621 mutex_lock(&power_domains->lock); 3622 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 3623 intel_power_well_enable(dev_priv, well); 3624 mutex_unlock(&power_domains->lock); 3625 3626 /* 5. Enable CDCLK. */ 3627 icl_init_cdclk(dev_priv); 3628 3629 /* 6. Enable DBUF. */ 3630 icl_dbuf_enable(dev_priv); 3631 3632 /* 7. Setup MBUS. */ 3633 icl_mbus_init(dev_priv); 3634 3635 if (resume && dev_priv->csr.dmc_payload) 3636 intel_csr_load_program(dev_priv); 3637 } 3638 3639 void icl_display_core_uninit(struct drm_i915_private *dev_priv) 3640 { 3641 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3642 struct i915_power_well *well; 3643 enum port port; 3644 u32 val; 3645 3646 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 3647 3648 /* 1. Disable all display engine functions -> aready done */ 3649 3650 /* 2. Disable DBUF */ 3651 icl_dbuf_disable(dev_priv); 3652 3653 /* 3. Disable CD clock */ 3654 icl_uninit_cdclk(dev_priv); 3655 3656 /* 3657 * 4. Disable Power Well 1 (PG1). 3658 * The AUX IO power wells are toggled on demand, so they are already 3659 * disabled at this point. 3660 */ 3661 mutex_lock(&power_domains->lock); 3662 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 3663 intel_power_well_disable(dev_priv, well); 3664 mutex_unlock(&power_domains->lock); 3665 3666 /* 5. Disable Comp */ 3667 for (port = PORT_A; port <= PORT_B; port++) { 3668 val = I915_READ(ICL_PHY_MISC(port)); 3669 val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; 3670 I915_WRITE(ICL_PHY_MISC(port), val); 3671 } 3672 } 3673 3674 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 3675 { 3676 struct i915_power_well *cmn_bc = 3677 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 3678 struct i915_power_well *cmn_d = 3679 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 3680 3681 /* 3682 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 3683 * workaround never ever read DISPLAY_PHY_CONTROL, and 3684 * instead maintain a shadow copy ourselves. Use the actual 3685 * power well state and lane status to reconstruct the 3686 * expected initial value. 3687 */ 3688 dev_priv->chv_phy_control = 3689 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 3690 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 3691 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 3692 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 3693 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 3694 3695 /* 3696 * If all lanes are disabled we leave the override disabled 3697 * with all power down bits cleared to match the state we 3698 * would use after disabling the port. Otherwise enable the 3699 * override and set the lane powerdown bits accding to the 3700 * current lane status. 3701 */ 3702 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 3703 uint32_t status = I915_READ(DPLL(PIPE_A)); 3704 unsigned int mask; 3705 3706 mask = status & DPLL_PORTB_READY_MASK; 3707 if (mask == 0xf) 3708 mask = 0x0; 3709 else 3710 dev_priv->chv_phy_control |= 3711 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 3712 3713 dev_priv->chv_phy_control |= 3714 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 3715 3716 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 3717 if (mask == 0xf) 3718 mask = 0x0; 3719 else 3720 dev_priv->chv_phy_control |= 3721 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 3722 3723 dev_priv->chv_phy_control |= 3724 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 3725 3726 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 3727 3728 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 3729 } else { 3730 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 3731 } 3732 3733 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 3734 uint32_t status = I915_READ(DPIO_PHY_STATUS); 3735 unsigned int mask; 3736 3737 mask = status & DPLL_PORTD_READY_MASK; 3738 3739 if (mask == 0xf) 3740 mask = 0x0; 3741 else 3742 dev_priv->chv_phy_control |= 3743 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 3744 3745 dev_priv->chv_phy_control |= 3746 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 3747 3748 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 3749 3750 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 3751 } else { 3752 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 3753 } 3754 3755 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 3756 3757 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", 3758 dev_priv->chv_phy_control); 3759 } 3760 3761 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 3762 { 3763 struct i915_power_well *cmn = 3764 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 3765 struct i915_power_well *disp2d = 3766 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 3767 3768 /* If the display might be already active skip this */ 3769 if (cmn->desc->ops->is_enabled(dev_priv, cmn) && 3770 disp2d->desc->ops->is_enabled(dev_priv, disp2d) && 3771 I915_READ(DPIO_CTL) & DPIO_CMNRST) 3772 return; 3773 3774 DRM_DEBUG_KMS("toggling display PHY side reset\n"); 3775 3776 /* cmnlane needs DPLL registers */ 3777 disp2d->desc->ops->enable(dev_priv, disp2d); 3778 3779 /* 3780 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 3781 * Need to assert and de-assert PHY SB reset by gating the 3782 * common lane power, then un-gating it. 3783 * Simply ungating isn't enough to reset the PHY enough to get 3784 * ports and lanes running. 3785 */ 3786 cmn->desc->ops->disable(dev_priv, cmn); 3787 } 3788 3789 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 3790 3791 /** 3792 * intel_power_domains_init_hw - initialize hardware power domain state 3793 * @dev_priv: i915 device instance 3794 * @resume: Called from resume code paths or not 3795 * 3796 * This function initializes the hardware power domain state and enables all 3797 * power wells belonging to the INIT power domain. Power wells in other 3798 * domains (and not in the INIT domain) are referenced or disabled by 3799 * intel_modeset_readout_hw_state(). After that the reference count of each 3800 * power well must match its HW enabled state, see 3801 * intel_power_domains_verify_state(). 3802 * 3803 * It will return with power domains disabled (to be enabled later by 3804 * intel_power_domains_enable()) and must be paired with 3805 * intel_power_domains_fini_hw(). 3806 */ 3807 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) 3808 { 3809 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3810 3811 power_domains->initializing = true; 3812 3813 if (IS_ICELAKE(dev_priv)) { 3814 icl_display_core_init(dev_priv, resume); 3815 } else if (IS_CANNONLAKE(dev_priv)) { 3816 cnl_display_core_init(dev_priv, resume); 3817 } else if (IS_GEN9_BC(dev_priv)) { 3818 skl_display_core_init(dev_priv, resume); 3819 } else if (IS_GEN9_LP(dev_priv)) { 3820 bxt_display_core_init(dev_priv, resume); 3821 } else if (IS_CHERRYVIEW(dev_priv)) { 3822 mutex_lock(&power_domains->lock); 3823 chv_phy_control_init(dev_priv); 3824 mutex_unlock(&power_domains->lock); 3825 } else if (IS_VALLEYVIEW(dev_priv)) { 3826 mutex_lock(&power_domains->lock); 3827 vlv_cmnlane_wa(dev_priv); 3828 mutex_unlock(&power_domains->lock); 3829 } else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7) 3830 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 3831 3832 /* 3833 * Keep all power wells enabled for any dependent HW access during 3834 * initialization and to make sure we keep BIOS enabled display HW 3835 * resources powered until display HW readout is complete. We drop 3836 * this reference in intel_power_domains_enable(). 3837 */ 3838 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 3839 /* Disable power support if the user asked so. */ 3840 if (!i915_modparams.disable_power_well) 3841 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 3842 intel_power_domains_sync_hw(dev_priv); 3843 3844 power_domains->initializing = false; 3845 } 3846 3847 /** 3848 * intel_power_domains_fini_hw - deinitialize hw power domain state 3849 * @dev_priv: i915 device instance 3850 * 3851 * De-initializes the display power domain HW state. It also ensures that the 3852 * device stays powered up so that the driver can be reloaded. 3853 * 3854 * It must be called with power domains already disabled (after a call to 3855 * intel_power_domains_disable()) and must be paired with 3856 * intel_power_domains_init_hw(). 3857 */ 3858 void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv) 3859 { 3860 /* Keep the power well enabled, but cancel its rpm wakeref. */ 3861 intel_runtime_pm_put(dev_priv); 3862 3863 /* Remove the refcount we took to keep power well support disabled. */ 3864 if (!i915_modparams.disable_power_well) 3865 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 3866 3867 intel_power_domains_verify_state(dev_priv); 3868 } 3869 3870 /** 3871 * intel_power_domains_enable - enable toggling of display power wells 3872 * @dev_priv: i915 device instance 3873 * 3874 * Enable the ondemand enabling/disabling of the display power wells. Note that 3875 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 3876 * only at specific points of the display modeset sequence, thus they are not 3877 * affected by the intel_power_domains_enable()/disable() calls. The purpose 3878 * of these function is to keep the rest of power wells enabled until the end 3879 * of display HW readout (which will acquire the power references reflecting 3880 * the current HW state). 3881 */ 3882 void intel_power_domains_enable(struct drm_i915_private *dev_priv) 3883 { 3884 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 3885 3886 intel_power_domains_verify_state(dev_priv); 3887 } 3888 3889 /** 3890 * intel_power_domains_disable - disable toggling of display power wells 3891 * @dev_priv: i915 device instance 3892 * 3893 * Disable the ondemand enabling/disabling of the display power wells. See 3894 * intel_power_domains_enable() for which power wells this call controls. 3895 */ 3896 void intel_power_domains_disable(struct drm_i915_private *dev_priv) 3897 { 3898 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 3899 3900 intel_power_domains_verify_state(dev_priv); 3901 } 3902 3903 /** 3904 * intel_power_domains_suspend - suspend power domain state 3905 * @dev_priv: i915 device instance 3906 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 3907 * 3908 * This function prepares the hardware power domain state before entering 3909 * system suspend. 3910 * 3911 * It must be called with power domains already disabled (after a call to 3912 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 3913 */ 3914 void intel_power_domains_suspend(struct drm_i915_private *dev_priv, 3915 enum i915_drm_suspend_mode suspend_mode) 3916 { 3917 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3918 3919 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 3920 3921 /* 3922 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 3923 * support don't manually deinit the power domains. This also means the 3924 * CSR/DMC firmware will stay active, it will power down any HW 3925 * resources as required and also enable deeper system power states 3926 * that would be blocked if the firmware was inactive. 3927 */ 3928 if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC9) && 3929 suspend_mode == I915_DRM_SUSPEND_IDLE && 3930 dev_priv->csr.dmc_payload != NULL) { 3931 intel_power_domains_verify_state(dev_priv); 3932 return; 3933 } 3934 3935 /* 3936 * Even if power well support was disabled we still want to disable 3937 * power wells if power domains must be deinitialized for suspend. 3938 */ 3939 if (!i915_modparams.disable_power_well) { 3940 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 3941 intel_power_domains_verify_state(dev_priv); 3942 } 3943 3944 if (IS_ICELAKE(dev_priv)) 3945 icl_display_core_uninit(dev_priv); 3946 else if (IS_CANNONLAKE(dev_priv)) 3947 cnl_display_core_uninit(dev_priv); 3948 else if (IS_GEN9_BC(dev_priv)) 3949 skl_display_core_uninit(dev_priv); 3950 else if (IS_GEN9_LP(dev_priv)) 3951 bxt_display_core_uninit(dev_priv); 3952 3953 power_domains->display_core_suspended = true; 3954 } 3955 3956 /** 3957 * intel_power_domains_resume - resume power domain state 3958 * @dev_priv: i915 device instance 3959 * 3960 * This function resume the hardware power domain state during system resume. 3961 * 3962 * It will return with power domain support disabled (to be enabled later by 3963 * intel_power_domains_enable()) and must be paired with 3964 * intel_power_domains_suspend(). 3965 */ 3966 void intel_power_domains_resume(struct drm_i915_private *dev_priv) 3967 { 3968 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3969 3970 if (power_domains->display_core_suspended) { 3971 intel_power_domains_init_hw(dev_priv, true); 3972 power_domains->display_core_suspended = false; 3973 } else { 3974 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 3975 } 3976 3977 intel_power_domains_verify_state(dev_priv); 3978 } 3979 3980 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 3981 3982 static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv) 3983 { 3984 struct i915_power_domains *power_domains = &dev_priv->power_domains; 3985 struct i915_power_well *power_well; 3986 3987 for_each_power_well(dev_priv, power_well) { 3988 enum intel_display_power_domain domain; 3989 3990 DRM_DEBUG_DRIVER("%-25s %d\n", 3991 power_well->desc->name, power_well->count); 3992 3993 for_each_power_domain(domain, power_well->desc->domains) 3994 DRM_DEBUG_DRIVER(" %-23s %d\n", 3995 intel_display_power_domain_str(domain), 3996 power_domains->domain_use_count[domain]); 3997 } 3998 } 3999 4000 /** 4001 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 4002 * @dev_priv: i915 device instance 4003 * 4004 * Verify if the reference count of each power well matches its HW enabled 4005 * state and the total refcount of the domains it belongs to. This must be 4006 * called after modeset HW state sanitization, which is responsible for 4007 * acquiring reference counts for any power wells in use and disabling the 4008 * ones left on by BIOS but not required by any active output. 4009 */ 4010 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) 4011 { 4012 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4013 struct i915_power_well *power_well; 4014 bool dump_domain_info; 4015 4016 mutex_lock(&power_domains->lock); 4017 4018 dump_domain_info = false; 4019 for_each_power_well(dev_priv, power_well) { 4020 enum intel_display_power_domain domain; 4021 int domains_count; 4022 bool enabled; 4023 4024 /* 4025 * Power wells not belonging to any domain (like the MISC_IO 4026 * and PW1 power wells) are under FW control, so ignore them, 4027 * since their state can change asynchronously. 4028 */ 4029 if (!power_well->desc->domains) 4030 continue; 4031 4032 enabled = power_well->desc->ops->is_enabled(dev_priv, 4033 power_well); 4034 if ((power_well->count || power_well->desc->always_on) != 4035 enabled) 4036 DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)", 4037 power_well->desc->name, 4038 power_well->count, enabled); 4039 4040 domains_count = 0; 4041 for_each_power_domain(domain, power_well->desc->domains) 4042 domains_count += power_domains->domain_use_count[domain]; 4043 4044 if (power_well->count != domains_count) { 4045 DRM_ERROR("power well %s refcount/domain refcount mismatch " 4046 "(refcount %d/domains refcount %d)\n", 4047 power_well->desc->name, power_well->count, 4048 domains_count); 4049 dump_domain_info = true; 4050 } 4051 } 4052 4053 if (dump_domain_info) { 4054 static bool dumped; 4055 4056 if (!dumped) { 4057 intel_power_domains_dump_info(dev_priv); 4058 dumped = true; 4059 } 4060 } 4061 4062 mutex_unlock(&power_domains->lock); 4063 } 4064 4065 #else 4066 4067 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) 4068 { 4069 } 4070 4071 #endif 4072 4073 /** 4074 * intel_runtime_pm_get - grab a runtime pm reference 4075 * @dev_priv: i915 device instance 4076 * 4077 * This function grabs a device-level runtime pm reference (mostly used for GEM 4078 * code to ensure the GTT or GT is on) and ensures that it is powered up. 4079 * 4080 * Any runtime pm reference obtained by this function must have a symmetric 4081 * call to intel_runtime_pm_put() to release the reference again. 4082 */ 4083 void intel_runtime_pm_get(struct drm_i915_private *dev_priv) 4084 { 4085 struct pci_dev *pdev = dev_priv->drm.pdev; 4086 struct device *kdev = &pdev->dev; 4087 int ret; 4088 4089 ret = pm_runtime_get_sync(kdev); 4090 WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); 4091 4092 atomic_inc(&dev_priv->runtime_pm.wakeref_count); 4093 assert_rpm_wakelock_held(dev_priv); 4094 } 4095 4096 /** 4097 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use 4098 * @dev_priv: i915 device instance 4099 * 4100 * This function grabs a device-level runtime pm reference if the device is 4101 * already in use and ensures that it is powered up. It is illegal to try 4102 * and access the HW should intel_runtime_pm_get_if_in_use() report failure. 4103 * 4104 * Any runtime pm reference obtained by this function must have a symmetric 4105 * call to intel_runtime_pm_put() to release the reference again. 4106 * 4107 * Returns: True if the wakeref was acquired, or False otherwise. 4108 */ 4109 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) 4110 { 4111 if (IS_ENABLED(CONFIG_PM)) { 4112 struct pci_dev *pdev = dev_priv->drm.pdev; 4113 struct device *kdev = &pdev->dev; 4114 4115 /* 4116 * In cases runtime PM is disabled by the RPM core and we get 4117 * an -EINVAL return value we are not supposed to call this 4118 * function, since the power state is undefined. This applies 4119 * atm to the late/early system suspend/resume handlers. 4120 */ 4121 if (pm_runtime_get_if_in_use(kdev) <= 0) 4122 return false; 4123 } 4124 4125 atomic_inc(&dev_priv->runtime_pm.wakeref_count); 4126 assert_rpm_wakelock_held(dev_priv); 4127 4128 return true; 4129 } 4130 4131 /** 4132 * intel_runtime_pm_get_noresume - grab a runtime pm reference 4133 * @dev_priv: i915 device instance 4134 * 4135 * This function grabs a device-level runtime pm reference (mostly used for GEM 4136 * code to ensure the GTT or GT is on). 4137 * 4138 * It will _not_ power up the device but instead only check that it's powered 4139 * on. Therefore it is only valid to call this functions from contexts where 4140 * the device is known to be powered up and where trying to power it up would 4141 * result in hilarity and deadlocks. That pretty much means only the system 4142 * suspend/resume code where this is used to grab runtime pm references for 4143 * delayed setup down in work items. 4144 * 4145 * Any runtime pm reference obtained by this function must have a symmetric 4146 * call to intel_runtime_pm_put() to release the reference again. 4147 */ 4148 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) 4149 { 4150 struct pci_dev *pdev = dev_priv->drm.pdev; 4151 struct device *kdev = &pdev->dev; 4152 4153 assert_rpm_wakelock_held(dev_priv); 4154 pm_runtime_get_noresume(kdev); 4155 4156 atomic_inc(&dev_priv->runtime_pm.wakeref_count); 4157 } 4158 4159 /** 4160 * intel_runtime_pm_put - release a runtime pm reference 4161 * @dev_priv: i915 device instance 4162 * 4163 * This function drops the device-level runtime pm reference obtained by 4164 * intel_runtime_pm_get() and might power down the corresponding 4165 * hardware block right away if this is the last reference. 4166 */ 4167 void intel_runtime_pm_put(struct drm_i915_private *dev_priv) 4168 { 4169 struct pci_dev *pdev = dev_priv->drm.pdev; 4170 struct device *kdev = &pdev->dev; 4171 4172 assert_rpm_wakelock_held(dev_priv); 4173 atomic_dec(&dev_priv->runtime_pm.wakeref_count); 4174 4175 pm_runtime_mark_last_busy(kdev); 4176 pm_runtime_put_autosuspend(kdev); 4177 } 4178 4179 /** 4180 * intel_runtime_pm_enable - enable runtime pm 4181 * @dev_priv: i915 device instance 4182 * 4183 * This function enables runtime pm at the end of the driver load sequence. 4184 * 4185 * Note that this function does currently not enable runtime pm for the 4186 * subordinate display power domains. That is done by 4187 * intel_power_domains_enable(). 4188 */ 4189 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) 4190 { 4191 struct pci_dev *pdev = dev_priv->drm.pdev; 4192 struct device *kdev = &pdev->dev; 4193 4194 /* 4195 * Disable the system suspend direct complete optimization, which can 4196 * leave the device suspended skipping the driver's suspend handlers 4197 * if the device was already runtime suspended. This is needed due to 4198 * the difference in our runtime and system suspend sequence and 4199 * becaue the HDA driver may require us to enable the audio power 4200 * domain during system suspend. 4201 */ 4202 dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP); 4203 4204 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */ 4205 pm_runtime_mark_last_busy(kdev); 4206 4207 /* 4208 * Take a permanent reference to disable the RPM functionality and drop 4209 * it only when unloading the driver. Use the low level get/put helpers, 4210 * so the driver's own RPM reference tracking asserts also work on 4211 * platforms without RPM support. 4212 */ 4213 if (!HAS_RUNTIME_PM(dev_priv)) { 4214 int ret; 4215 4216 pm_runtime_dont_use_autosuspend(kdev); 4217 ret = pm_runtime_get_sync(kdev); 4218 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); 4219 } else { 4220 pm_runtime_use_autosuspend(kdev); 4221 } 4222 4223 /* 4224 * The core calls the driver load handler with an RPM reference held. 4225 * We drop that here and will reacquire it during unloading in 4226 * intel_power_domains_fini(). 4227 */ 4228 pm_runtime_put_autosuspend(kdev); 4229 } 4230 4231 void intel_runtime_pm_disable(struct drm_i915_private *dev_priv) 4232 { 4233 struct pci_dev *pdev = dev_priv->drm.pdev; 4234 struct device *kdev = &pdev->dev; 4235 4236 /* Transfer rpm ownership back to core */ 4237 WARN(pm_runtime_get_sync(&dev_priv->drm.pdev->dev) < 0, 4238 "Failed to pass rpm ownership back to core\n"); 4239 4240 pm_runtime_dont_use_autosuspend(kdev); 4241 4242 if (!HAS_RUNTIME_PM(dev_priv)) 4243 pm_runtime_put(kdev); 4244 } 4245