1 /* 2 * Copyright © 2012-2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 * 27 */ 28 29 #include <linux/pm_runtime.h> 30 #include <linux/vgaarb.h> 31 32 #include "i915_drv.h" 33 #include "intel_drv.h" 34 35 /** 36 * DOC: runtime pm 37 * 38 * The i915 driver supports dynamic enabling and disabling of entire hardware 39 * blocks at runtime. This is especially important on the display side where 40 * software is supposed to control many power gates manually on recent hardware, 41 * since on the GT side a lot of the power management is done by the hardware. 42 * But even there some manual control at the device level is required. 43 * 44 * Since i915 supports a diverse set of platforms with a unified codebase and 45 * hardware engineers just love to shuffle functionality around between power 46 * domains there's a sizeable amount of indirection required. This file provides 47 * generic functions to the driver for grabbing and releasing references for 48 * abstract power domains. It then maps those to the actual power wells 49 * present for a given platform. 50 */ 51 52 #define for_each_power_well(i, power_well, domain_mask, power_domains) \ 53 for (i = 0; \ 54 i < (power_domains)->power_well_count && \ 55 ((power_well) = &(power_domains)->power_wells[i]); \ 56 i++) \ 57 for_each_if ((power_well)->domains & (domain_mask)) 58 59 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ 60 for (i = (power_domains)->power_well_count - 1; \ 61 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ 62 i--) \ 63 for_each_if ((power_well)->domains & (domain_mask)) 64 65 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 66 int power_well_id); 67 68 static struct i915_power_well * 69 lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id); 70 71 const char * 72 intel_display_power_domain_str(enum intel_display_power_domain domain) 73 { 74 switch (domain) { 75 case POWER_DOMAIN_PIPE_A: 76 return "PIPE_A"; 77 case POWER_DOMAIN_PIPE_B: 78 return "PIPE_B"; 79 case POWER_DOMAIN_PIPE_C: 80 return "PIPE_C"; 81 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 82 return "PIPE_A_PANEL_FITTER"; 83 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 84 return "PIPE_B_PANEL_FITTER"; 85 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 86 return "PIPE_C_PANEL_FITTER"; 87 case POWER_DOMAIN_TRANSCODER_A: 88 return "TRANSCODER_A"; 89 case POWER_DOMAIN_TRANSCODER_B: 90 return "TRANSCODER_B"; 91 case POWER_DOMAIN_TRANSCODER_C: 92 return "TRANSCODER_C"; 93 case POWER_DOMAIN_TRANSCODER_EDP: 94 return "TRANSCODER_EDP"; 95 case POWER_DOMAIN_TRANSCODER_DSI_A: 96 return "TRANSCODER_DSI_A"; 97 case POWER_DOMAIN_TRANSCODER_DSI_C: 98 return "TRANSCODER_DSI_C"; 99 case POWER_DOMAIN_PORT_DDI_A_LANES: 100 return "PORT_DDI_A_LANES"; 101 case POWER_DOMAIN_PORT_DDI_B_LANES: 102 return "PORT_DDI_B_LANES"; 103 case POWER_DOMAIN_PORT_DDI_C_LANES: 104 return "PORT_DDI_C_LANES"; 105 case POWER_DOMAIN_PORT_DDI_D_LANES: 106 return "PORT_DDI_D_LANES"; 107 case POWER_DOMAIN_PORT_DDI_E_LANES: 108 return "PORT_DDI_E_LANES"; 109 case POWER_DOMAIN_PORT_DSI: 110 return "PORT_DSI"; 111 case POWER_DOMAIN_PORT_CRT: 112 return "PORT_CRT"; 113 case POWER_DOMAIN_PORT_OTHER: 114 return "PORT_OTHER"; 115 case POWER_DOMAIN_VGA: 116 return "VGA"; 117 case POWER_DOMAIN_AUDIO: 118 return "AUDIO"; 119 case POWER_DOMAIN_PLLS: 120 return "PLLS"; 121 case POWER_DOMAIN_AUX_A: 122 return "AUX_A"; 123 case POWER_DOMAIN_AUX_B: 124 return "AUX_B"; 125 case POWER_DOMAIN_AUX_C: 126 return "AUX_C"; 127 case POWER_DOMAIN_AUX_D: 128 return "AUX_D"; 129 case POWER_DOMAIN_GMBUS: 130 return "GMBUS"; 131 case POWER_DOMAIN_INIT: 132 return "INIT"; 133 case POWER_DOMAIN_MODESET: 134 return "MODESET"; 135 default: 136 MISSING_CASE(domain); 137 return "?"; 138 } 139 } 140 141 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 142 struct i915_power_well *power_well) 143 { 144 DRM_DEBUG_KMS("enabling %s\n", power_well->name); 145 power_well->ops->enable(dev_priv, power_well); 146 power_well->hw_enabled = true; 147 } 148 149 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 150 struct i915_power_well *power_well) 151 { 152 DRM_DEBUG_KMS("disabling %s\n", power_well->name); 153 power_well->hw_enabled = false; 154 power_well->ops->disable(dev_priv, power_well); 155 } 156 157 static void intel_power_well_get(struct drm_i915_private *dev_priv, 158 struct i915_power_well *power_well) 159 { 160 if (!power_well->count++) 161 intel_power_well_enable(dev_priv, power_well); 162 } 163 164 static void intel_power_well_put(struct drm_i915_private *dev_priv, 165 struct i915_power_well *power_well) 166 { 167 WARN(!power_well->count, "Use count on power well %s is already zero", 168 power_well->name); 169 170 if (!--power_well->count) 171 intel_power_well_disable(dev_priv, power_well); 172 } 173 174 /* 175 * We should only use the power well if we explicitly asked the hardware to 176 * enable it, so check if it's enabled and also check if we've requested it to 177 * be enabled. 178 */ 179 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 180 struct i915_power_well *power_well) 181 { 182 return I915_READ(HSW_PWR_WELL_DRIVER) == 183 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 184 } 185 186 /** 187 * __intel_display_power_is_enabled - unlocked check for a power domain 188 * @dev_priv: i915 device instance 189 * @domain: power domain to check 190 * 191 * This is the unlocked version of intel_display_power_is_enabled() and should 192 * only be used from error capture and recovery code where deadlocks are 193 * possible. 194 * 195 * Returns: 196 * True when the power domain is enabled, false otherwise. 197 */ 198 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 199 enum intel_display_power_domain domain) 200 { 201 struct i915_power_domains *power_domains; 202 struct i915_power_well *power_well; 203 bool is_enabled; 204 int i; 205 206 if (dev_priv->pm.suspended) 207 return false; 208 209 power_domains = &dev_priv->power_domains; 210 211 is_enabled = true; 212 213 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 214 if (power_well->always_on) 215 continue; 216 217 if (!power_well->hw_enabled) { 218 is_enabled = false; 219 break; 220 } 221 } 222 223 return is_enabled; 224 } 225 226 /** 227 * intel_display_power_is_enabled - check for a power domain 228 * @dev_priv: i915 device instance 229 * @domain: power domain to check 230 * 231 * This function can be used to check the hw power domain state. It is mostly 232 * used in hardware state readout functions. Everywhere else code should rely 233 * upon explicit power domain reference counting to ensure that the hardware 234 * block is powered up before accessing it. 235 * 236 * Callers must hold the relevant modesetting locks to ensure that concurrent 237 * threads can't disable the power well while the caller tries to read a few 238 * registers. 239 * 240 * Returns: 241 * True when the power domain is enabled, false otherwise. 242 */ 243 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 244 enum intel_display_power_domain domain) 245 { 246 struct i915_power_domains *power_domains; 247 bool ret; 248 249 power_domains = &dev_priv->power_domains; 250 251 mutex_lock(&power_domains->lock); 252 ret = __intel_display_power_is_enabled(dev_priv, domain); 253 mutex_unlock(&power_domains->lock); 254 255 return ret; 256 } 257 258 /** 259 * intel_display_set_init_power - set the initial power domain state 260 * @dev_priv: i915 device instance 261 * @enable: whether to enable or disable the initial power domain state 262 * 263 * For simplicity our driver load/unload and system suspend/resume code assumes 264 * that all power domains are always enabled. This functions controls the state 265 * of this little hack. While the initial power domain state is enabled runtime 266 * pm is effectively disabled. 267 */ 268 void intel_display_set_init_power(struct drm_i915_private *dev_priv, 269 bool enable) 270 { 271 if (dev_priv->power_domains.init_power_on == enable) 272 return; 273 274 if (enable) 275 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 276 else 277 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 278 279 dev_priv->power_domains.init_power_on = enable; 280 } 281 282 /* 283 * Starting with Haswell, we have a "Power Down Well" that can be turned off 284 * when not needed anymore. We have 4 registers that can request the power well 285 * to be enabled, and it will only be disabled if none of the registers is 286 * requesting it to be enabled. 287 */ 288 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 289 { 290 struct pci_dev *pdev = dev_priv->drm.pdev; 291 struct drm_device *dev = &dev_priv->drm; 292 293 /* 294 * After we re-enable the power well, if we touch VGA register 0x3d5 295 * we'll get unclaimed register interrupts. This stops after we write 296 * anything to the VGA MSR register. The vgacon module uses this 297 * register all the time, so if we unbind our driver and, as a 298 * consequence, bind vgacon, we'll get stuck in an infinite loop at 299 * console_unlock(). So make here we touch the VGA MSR register, making 300 * sure vgacon can keep working normally without triggering interrupts 301 * and error messages. 302 */ 303 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 304 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 305 vga_put(pdev, VGA_RSRC_LEGACY_IO); 306 307 if (IS_BROADWELL(dev)) 308 gen8_irq_power_well_post_enable(dev_priv, 309 1 << PIPE_C | 1 << PIPE_B); 310 } 311 312 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv) 313 { 314 if (IS_BROADWELL(dev_priv)) 315 gen8_irq_power_well_pre_disable(dev_priv, 316 1 << PIPE_C | 1 << PIPE_B); 317 } 318 319 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, 320 struct i915_power_well *power_well) 321 { 322 struct pci_dev *pdev = dev_priv->drm.pdev; 323 324 /* 325 * After we re-enable the power well, if we touch VGA register 0x3d5 326 * we'll get unclaimed register interrupts. This stops after we write 327 * anything to the VGA MSR register. The vgacon module uses this 328 * register all the time, so if we unbind our driver and, as a 329 * consequence, bind vgacon, we'll get stuck in an infinite loop at 330 * console_unlock(). So make here we touch the VGA MSR register, making 331 * sure vgacon can keep working normally without triggering interrupts 332 * and error messages. 333 */ 334 if (power_well->data == SKL_DISP_PW_2) { 335 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 336 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 337 vga_put(pdev, VGA_RSRC_LEGACY_IO); 338 339 gen8_irq_power_well_post_enable(dev_priv, 340 1 << PIPE_C | 1 << PIPE_B); 341 } 342 } 343 344 static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv, 345 struct i915_power_well *power_well) 346 { 347 if (power_well->data == SKL_DISP_PW_2) 348 gen8_irq_power_well_pre_disable(dev_priv, 349 1 << PIPE_C | 1 << PIPE_B); 350 } 351 352 static void hsw_set_power_well(struct drm_i915_private *dev_priv, 353 struct i915_power_well *power_well, bool enable) 354 { 355 bool is_enabled, enable_requested; 356 uint32_t tmp; 357 358 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 359 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; 360 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; 361 362 if (enable) { 363 if (!enable_requested) 364 I915_WRITE(HSW_PWR_WELL_DRIVER, 365 HSW_PWR_WELL_ENABLE_REQUEST); 366 367 if (!is_enabled) { 368 DRM_DEBUG_KMS("Enabling power well\n"); 369 if (intel_wait_for_register(dev_priv, 370 HSW_PWR_WELL_DRIVER, 371 HSW_PWR_WELL_STATE_ENABLED, 372 HSW_PWR_WELL_STATE_ENABLED, 373 20)) 374 DRM_ERROR("Timeout enabling power well\n"); 375 hsw_power_well_post_enable(dev_priv); 376 } 377 378 } else { 379 if (enable_requested) { 380 hsw_power_well_pre_disable(dev_priv); 381 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 382 POSTING_READ(HSW_PWR_WELL_DRIVER); 383 DRM_DEBUG_KMS("Requesting to disable the power well\n"); 384 } 385 } 386 } 387 388 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 389 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 390 BIT(POWER_DOMAIN_PIPE_B) | \ 391 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 392 BIT(POWER_DOMAIN_PIPE_C) | \ 393 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 394 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 395 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 396 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 397 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 398 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 399 BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 400 BIT(POWER_DOMAIN_AUX_B) | \ 401 BIT(POWER_DOMAIN_AUX_C) | \ 402 BIT(POWER_DOMAIN_AUX_D) | \ 403 BIT(POWER_DOMAIN_AUDIO) | \ 404 BIT(POWER_DOMAIN_VGA) | \ 405 BIT(POWER_DOMAIN_INIT)) 406 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \ 407 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 408 BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 409 BIT(POWER_DOMAIN_INIT)) 410 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \ 411 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 412 BIT(POWER_DOMAIN_INIT)) 413 #define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \ 414 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 415 BIT(POWER_DOMAIN_INIT)) 416 #define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \ 417 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 418 BIT(POWER_DOMAIN_INIT)) 419 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 420 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 421 BIT(POWER_DOMAIN_MODESET) | \ 422 BIT(POWER_DOMAIN_AUX_A) | \ 423 BIT(POWER_DOMAIN_INIT)) 424 425 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 426 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 427 BIT(POWER_DOMAIN_PIPE_B) | \ 428 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 429 BIT(POWER_DOMAIN_PIPE_C) | \ 430 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 431 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 432 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 433 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 434 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 435 BIT(POWER_DOMAIN_AUX_B) | \ 436 BIT(POWER_DOMAIN_AUX_C) | \ 437 BIT(POWER_DOMAIN_AUDIO) | \ 438 BIT(POWER_DOMAIN_VGA) | \ 439 BIT(POWER_DOMAIN_GMBUS) | \ 440 BIT(POWER_DOMAIN_INIT)) 441 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 442 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 443 BIT(POWER_DOMAIN_MODESET) | \ 444 BIT(POWER_DOMAIN_AUX_A) | \ 445 BIT(POWER_DOMAIN_INIT)) 446 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 447 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 448 BIT(POWER_DOMAIN_AUX_A) | \ 449 BIT(POWER_DOMAIN_INIT)) 450 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 451 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 452 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 453 BIT(POWER_DOMAIN_AUX_B) | \ 454 BIT(POWER_DOMAIN_AUX_C) | \ 455 BIT(POWER_DOMAIN_INIT)) 456 457 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 458 { 459 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 460 "DC9 already programmed to be enabled.\n"); 461 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 462 "DC5 still not disabled to enable DC9.\n"); 463 WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n"); 464 WARN_ONCE(intel_irqs_enabled(dev_priv), 465 "Interrupts not disabled yet.\n"); 466 467 /* 468 * TODO: check for the following to verify the conditions to enter DC9 469 * state are satisfied: 470 * 1] Check relevant display engine registers to verify if mode set 471 * disable sequence was followed. 472 * 2] Check if display uninitialize sequence is initialized. 473 */ 474 } 475 476 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 477 { 478 WARN_ONCE(intel_irqs_enabled(dev_priv), 479 "Interrupts not disabled yet.\n"); 480 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 481 "DC5 still not disabled.\n"); 482 483 /* 484 * TODO: check for the following to verify DC9 state was indeed 485 * entered before programming to disable it: 486 * 1] Check relevant display engine registers to verify if mode 487 * set disable sequence was followed. 488 * 2] Check if display uninitialize sequence is initialized. 489 */ 490 } 491 492 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 493 u32 state) 494 { 495 int rewrites = 0; 496 int rereads = 0; 497 u32 v; 498 499 I915_WRITE(DC_STATE_EN, state); 500 501 /* It has been observed that disabling the dc6 state sometimes 502 * doesn't stick and dmc keeps returning old value. Make sure 503 * the write really sticks enough times and also force rewrite until 504 * we are confident that state is exactly what we want. 505 */ 506 do { 507 v = I915_READ(DC_STATE_EN); 508 509 if (v != state) { 510 I915_WRITE(DC_STATE_EN, state); 511 rewrites++; 512 rereads = 0; 513 } else if (rereads++ > 5) { 514 break; 515 } 516 517 } while (rewrites < 100); 518 519 if (v != state) 520 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n", 521 state, v); 522 523 /* Most of the times we need one retry, avoid spam */ 524 if (rewrites > 1) 525 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n", 526 state, rewrites); 527 } 528 529 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 530 { 531 u32 mask; 532 533 mask = DC_STATE_EN_UPTO_DC5; 534 if (IS_BROXTON(dev_priv)) 535 mask |= DC_STATE_EN_DC9; 536 else 537 mask |= DC_STATE_EN_UPTO_DC6; 538 539 return mask; 540 } 541 542 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 543 { 544 u32 val; 545 546 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv); 547 548 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n", 549 dev_priv->csr.dc_state, val); 550 dev_priv->csr.dc_state = val; 551 } 552 553 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) 554 { 555 uint32_t val; 556 uint32_t mask; 557 558 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) 559 state &= dev_priv->csr.allowed_dc_mask; 560 561 val = I915_READ(DC_STATE_EN); 562 mask = gen9_dc_mask(dev_priv); 563 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", 564 val & mask, state); 565 566 /* Check if DMC is ignoring our DC state requests */ 567 if ((val & mask) != dev_priv->csr.dc_state) 568 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n", 569 dev_priv->csr.dc_state, val & mask); 570 571 val &= ~mask; 572 val |= state; 573 574 gen9_write_dc_state(dev_priv, val); 575 576 dev_priv->csr.dc_state = val & mask; 577 } 578 579 void bxt_enable_dc9(struct drm_i915_private *dev_priv) 580 { 581 assert_can_enable_dc9(dev_priv); 582 583 DRM_DEBUG_KMS("Enabling DC9\n"); 584 585 intel_power_sequencer_reset(dev_priv); 586 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 587 } 588 589 void bxt_disable_dc9(struct drm_i915_private *dev_priv) 590 { 591 assert_can_disable_dc9(dev_priv); 592 593 DRM_DEBUG_KMS("Disabling DC9\n"); 594 595 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 596 597 intel_pps_unlock_regs_wa(dev_priv); 598 } 599 600 static void assert_csr_loaded(struct drm_i915_private *dev_priv) 601 { 602 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), 603 "CSR program storage start is NULL\n"); 604 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); 605 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); 606 } 607 608 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 609 { 610 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, 611 SKL_DISP_PW_2); 612 613 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); 614 615 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), 616 "DC5 already programmed to be enabled.\n"); 617 assert_rpm_wakelock_held(dev_priv); 618 619 assert_csr_loaded(dev_priv); 620 } 621 622 void gen9_enable_dc5(struct drm_i915_private *dev_priv) 623 { 624 assert_can_enable_dc5(dev_priv); 625 626 DRM_DEBUG_KMS("Enabling DC5\n"); 627 628 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 629 } 630 631 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 632 { 633 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 634 "Backlight is not disabled.\n"); 635 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 636 "DC6 already programmed to be enabled.\n"); 637 638 assert_csr_loaded(dev_priv); 639 } 640 641 void skl_enable_dc6(struct drm_i915_private *dev_priv) 642 { 643 assert_can_enable_dc6(dev_priv); 644 645 DRM_DEBUG_KMS("Enabling DC6\n"); 646 647 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 648 649 } 650 651 void skl_disable_dc6(struct drm_i915_private *dev_priv) 652 { 653 DRM_DEBUG_KMS("Disabling DC6\n"); 654 655 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 656 } 657 658 static void 659 gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv, 660 struct i915_power_well *power_well) 661 { 662 enum skl_disp_power_wells power_well_id = power_well->data; 663 u32 val; 664 u32 mask; 665 666 mask = SKL_POWER_WELL_REQ(power_well_id); 667 668 val = I915_READ(HSW_PWR_WELL_KVMR); 669 if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n", 670 power_well->name)) 671 I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask); 672 673 val = I915_READ(HSW_PWR_WELL_BIOS); 674 val |= I915_READ(HSW_PWR_WELL_DEBUG); 675 676 if (!(val & mask)) 677 return; 678 679 /* 680 * DMC is known to force on the request bits for power well 1 on SKL 681 * and BXT and the misc IO power well on SKL but we don't expect any 682 * other request bits to be set, so WARN for those. 683 */ 684 if (power_well_id == SKL_DISP_PW_1 || 685 ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && 686 power_well_id == SKL_DISP_PW_MISC_IO)) 687 DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on " 688 "by DMC\n", power_well->name); 689 else 690 WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n", 691 power_well->name); 692 693 I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask); 694 I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask); 695 } 696 697 static void skl_set_power_well(struct drm_i915_private *dev_priv, 698 struct i915_power_well *power_well, bool enable) 699 { 700 uint32_t tmp, fuse_status; 701 uint32_t req_mask, state_mask; 702 bool is_enabled, enable_requested, check_fuse_status = false; 703 704 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 705 fuse_status = I915_READ(SKL_FUSE_STATUS); 706 707 switch (power_well->data) { 708 case SKL_DISP_PW_1: 709 if (intel_wait_for_register(dev_priv, 710 SKL_FUSE_STATUS, 711 SKL_FUSE_PG0_DIST_STATUS, 712 SKL_FUSE_PG0_DIST_STATUS, 713 1)) { 714 DRM_ERROR("PG0 not enabled\n"); 715 return; 716 } 717 break; 718 case SKL_DISP_PW_2: 719 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) { 720 DRM_ERROR("PG1 in disabled state\n"); 721 return; 722 } 723 break; 724 case SKL_DISP_PW_DDI_A_E: 725 case SKL_DISP_PW_DDI_B: 726 case SKL_DISP_PW_DDI_C: 727 case SKL_DISP_PW_DDI_D: 728 case SKL_DISP_PW_MISC_IO: 729 break; 730 default: 731 WARN(1, "Unknown power well %lu\n", power_well->data); 732 return; 733 } 734 735 req_mask = SKL_POWER_WELL_REQ(power_well->data); 736 enable_requested = tmp & req_mask; 737 state_mask = SKL_POWER_WELL_STATE(power_well->data); 738 is_enabled = tmp & state_mask; 739 740 if (!enable && enable_requested) 741 skl_power_well_pre_disable(dev_priv, power_well); 742 743 if (enable) { 744 if (!enable_requested) { 745 WARN((tmp & state_mask) && 746 !I915_READ(HSW_PWR_WELL_BIOS), 747 "Invalid for power well status to be enabled, unless done by the BIOS, \ 748 when request is to disable!\n"); 749 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); 750 } 751 752 if (!is_enabled) { 753 DRM_DEBUG_KMS("Enabling %s\n", power_well->name); 754 check_fuse_status = true; 755 } 756 } else { 757 if (enable_requested) { 758 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); 759 POSTING_READ(HSW_PWR_WELL_DRIVER); 760 DRM_DEBUG_KMS("Disabling %s\n", power_well->name); 761 } 762 763 if (IS_GEN9(dev_priv)) 764 gen9_sanitize_power_well_requests(dev_priv, power_well); 765 } 766 767 if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable, 768 1)) 769 DRM_ERROR("%s %s timeout\n", 770 power_well->name, enable ? "enable" : "disable"); 771 772 if (check_fuse_status) { 773 if (power_well->data == SKL_DISP_PW_1) { 774 if (intel_wait_for_register(dev_priv, 775 SKL_FUSE_STATUS, 776 SKL_FUSE_PG1_DIST_STATUS, 777 SKL_FUSE_PG1_DIST_STATUS, 778 1)) 779 DRM_ERROR("PG1 distributing status timeout\n"); 780 } else if (power_well->data == SKL_DISP_PW_2) { 781 if (intel_wait_for_register(dev_priv, 782 SKL_FUSE_STATUS, 783 SKL_FUSE_PG2_DIST_STATUS, 784 SKL_FUSE_PG2_DIST_STATUS, 785 1)) 786 DRM_ERROR("PG2 distributing status timeout\n"); 787 } 788 } 789 790 if (enable && !is_enabled) 791 skl_power_well_post_enable(dev_priv, power_well); 792 } 793 794 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 795 struct i915_power_well *power_well) 796 { 797 hsw_set_power_well(dev_priv, power_well, power_well->count > 0); 798 799 /* 800 * We're taking over the BIOS, so clear any requests made by it since 801 * the driver is in charge now. 802 */ 803 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) 804 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 805 } 806 807 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 808 struct i915_power_well *power_well) 809 { 810 hsw_set_power_well(dev_priv, power_well, true); 811 } 812 813 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 814 struct i915_power_well *power_well) 815 { 816 hsw_set_power_well(dev_priv, power_well, false); 817 } 818 819 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv, 820 struct i915_power_well *power_well) 821 { 822 uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) | 823 SKL_POWER_WELL_STATE(power_well->data); 824 825 return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask; 826 } 827 828 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv, 829 struct i915_power_well *power_well) 830 { 831 skl_set_power_well(dev_priv, power_well, power_well->count > 0); 832 833 /* Clear any request made by BIOS as driver is taking over */ 834 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 835 } 836 837 static void skl_power_well_enable(struct drm_i915_private *dev_priv, 838 struct i915_power_well *power_well) 839 { 840 skl_set_power_well(dev_priv, power_well, true); 841 } 842 843 static void skl_power_well_disable(struct drm_i915_private *dev_priv, 844 struct i915_power_well *power_well) 845 { 846 skl_set_power_well(dev_priv, power_well, false); 847 } 848 849 static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well) 850 { 851 enum skl_disp_power_wells power_well_id = power_well->data; 852 853 return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0; 854 } 855 856 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 857 struct i915_power_well *power_well) 858 { 859 enum skl_disp_power_wells power_well_id = power_well->data; 860 struct i915_power_well *cmn_a_well = NULL; 861 862 if (power_well_id == BXT_DPIO_CMN_BC) { 863 /* 864 * We need to copy the GRC calibration value from the eDP PHY, 865 * so make sure it's powered up. 866 */ 867 cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); 868 intel_power_well_get(dev_priv, cmn_a_well); 869 } 870 871 bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well)); 872 873 if (cmn_a_well) 874 intel_power_well_put(dev_priv, cmn_a_well); 875 } 876 877 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 878 struct i915_power_well *power_well) 879 { 880 bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well)); 881 } 882 883 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 884 struct i915_power_well *power_well) 885 { 886 return bxt_ddi_phy_is_enabled(dev_priv, 887 bxt_power_well_to_phy(power_well)); 888 } 889 890 static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv, 891 struct i915_power_well *power_well) 892 { 893 if (power_well->count > 0) 894 bxt_dpio_cmn_power_well_enable(dev_priv, power_well); 895 else 896 bxt_dpio_cmn_power_well_disable(dev_priv, power_well); 897 } 898 899 900 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 901 { 902 struct i915_power_well *power_well; 903 904 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); 905 if (power_well->count > 0) 906 bxt_ddi_phy_verify_state(dev_priv, 907 bxt_power_well_to_phy(power_well)); 908 909 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC); 910 if (power_well->count > 0) 911 bxt_ddi_phy_verify_state(dev_priv, 912 bxt_power_well_to_phy(power_well)); 913 } 914 915 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 916 struct i915_power_well *power_well) 917 { 918 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; 919 } 920 921 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 922 { 923 u32 tmp = I915_READ(DBUF_CTL); 924 925 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) != 926 (DBUF_POWER_STATE | DBUF_POWER_REQUEST), 927 "Unexpected DBuf power power state (0x%08x)\n", tmp); 928 } 929 930 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 931 struct i915_power_well *power_well) 932 { 933 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 934 935 WARN_ON(dev_priv->cdclk_freq != 936 dev_priv->display.get_display_clock_speed(&dev_priv->drm)); 937 938 gen9_assert_dbuf_enabled(dev_priv); 939 940 if (IS_BROXTON(dev_priv)) 941 bxt_verify_ddi_phy_power_wells(dev_priv); 942 } 943 944 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 945 struct i915_power_well *power_well) 946 { 947 if (!dev_priv->csr.dmc_payload) 948 return; 949 950 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6) 951 skl_enable_dc6(dev_priv); 952 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) 953 gen9_enable_dc5(dev_priv); 954 } 955 956 static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv, 957 struct i915_power_well *power_well) 958 { 959 if (power_well->count > 0) 960 gen9_dc_off_power_well_enable(dev_priv, power_well); 961 else 962 gen9_dc_off_power_well_disable(dev_priv, power_well); 963 } 964 965 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 966 struct i915_power_well *power_well) 967 { 968 } 969 970 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 971 struct i915_power_well *power_well) 972 { 973 return true; 974 } 975 976 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 977 struct i915_power_well *power_well, bool enable) 978 { 979 enum punit_power_well power_well_id = power_well->data; 980 u32 mask; 981 u32 state; 982 u32 ctrl; 983 984 mask = PUNIT_PWRGT_MASK(power_well_id); 985 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : 986 PUNIT_PWRGT_PWR_GATE(power_well_id); 987 988 mutex_lock(&dev_priv->rps.hw_lock); 989 990 #define COND \ 991 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 992 993 if (COND) 994 goto out; 995 996 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 997 ctrl &= ~mask; 998 ctrl |= state; 999 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1000 1001 if (wait_for(COND, 100)) 1002 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1003 state, 1004 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1005 1006 #undef COND 1007 1008 out: 1009 mutex_unlock(&dev_priv->rps.hw_lock); 1010 } 1011 1012 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, 1013 struct i915_power_well *power_well) 1014 { 1015 vlv_set_power_well(dev_priv, power_well, power_well->count > 0); 1016 } 1017 1018 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1019 struct i915_power_well *power_well) 1020 { 1021 vlv_set_power_well(dev_priv, power_well, true); 1022 } 1023 1024 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1025 struct i915_power_well *power_well) 1026 { 1027 vlv_set_power_well(dev_priv, power_well, false); 1028 } 1029 1030 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1031 struct i915_power_well *power_well) 1032 { 1033 int power_well_id = power_well->data; 1034 bool enabled = false; 1035 u32 mask; 1036 u32 state; 1037 u32 ctrl; 1038 1039 mask = PUNIT_PWRGT_MASK(power_well_id); 1040 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); 1041 1042 mutex_lock(&dev_priv->rps.hw_lock); 1043 1044 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1045 /* 1046 * We only ever set the power-on and power-gate states, anything 1047 * else is unexpected. 1048 */ 1049 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && 1050 state != PUNIT_PWRGT_PWR_GATE(power_well_id)); 1051 if (state == ctrl) 1052 enabled = true; 1053 1054 /* 1055 * A transient state at this point would mean some unexpected party 1056 * is poking at the power controls too. 1057 */ 1058 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1059 WARN_ON(ctrl != state); 1060 1061 mutex_unlock(&dev_priv->rps.hw_lock); 1062 1063 return enabled; 1064 } 1065 1066 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1067 { 1068 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 1069 1070 /* 1071 * Disable trickle feed and enable pnd deadline calculation 1072 */ 1073 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1074 I915_WRITE(CBR1_VLV, 0); 1075 1076 WARN_ON(dev_priv->rawclk_freq == 0); 1077 1078 I915_WRITE(RAWCLK_FREQ_VLV, 1079 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000)); 1080 } 1081 1082 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1083 { 1084 struct intel_encoder *encoder; 1085 enum pipe pipe; 1086 1087 /* 1088 * Enable the CRI clock source so we can get at the 1089 * display and the reference clock for VGA 1090 * hotplug / manual detection. Supposedly DSI also 1091 * needs the ref clock up and running. 1092 * 1093 * CHV DPLL B/C have some issues if VGA mode is enabled. 1094 */ 1095 for_each_pipe(&dev_priv->drm, pipe) { 1096 u32 val = I915_READ(DPLL(pipe)); 1097 1098 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1099 if (pipe != PIPE_A) 1100 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1101 1102 I915_WRITE(DPLL(pipe), val); 1103 } 1104 1105 vlv_init_display_clock_gating(dev_priv); 1106 1107 spin_lock_irq(&dev_priv->irq_lock); 1108 valleyview_enable_display_irqs(dev_priv); 1109 spin_unlock_irq(&dev_priv->irq_lock); 1110 1111 /* 1112 * During driver initialization/resume we can avoid restoring the 1113 * part of the HW/SW state that will be inited anyway explicitly. 1114 */ 1115 if (dev_priv->power_domains.initializing) 1116 return; 1117 1118 intel_hpd_init(dev_priv); 1119 1120 /* Re-enable the ADPA, if we have one */ 1121 for_each_intel_encoder(&dev_priv->drm, encoder) { 1122 if (encoder->type == INTEL_OUTPUT_ANALOG) 1123 intel_crt_reset(&encoder->base); 1124 } 1125 1126 i915_redisable_vga_power_on(&dev_priv->drm); 1127 1128 intel_pps_unlock_regs_wa(dev_priv); 1129 } 1130 1131 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1132 { 1133 spin_lock_irq(&dev_priv->irq_lock); 1134 valleyview_disable_display_irqs(dev_priv); 1135 spin_unlock_irq(&dev_priv->irq_lock); 1136 1137 /* make sure we're done processing display irqs */ 1138 synchronize_irq(dev_priv->drm.irq); 1139 1140 intel_power_sequencer_reset(dev_priv); 1141 1142 /* Prevent us from re-enabling polling on accident in late suspend */ 1143 if (!dev_priv->drm.dev->power.is_suspended) 1144 intel_hpd_poll_init(dev_priv); 1145 } 1146 1147 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1148 struct i915_power_well *power_well) 1149 { 1150 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 1151 1152 vlv_set_power_well(dev_priv, power_well, true); 1153 1154 vlv_display_power_well_init(dev_priv); 1155 } 1156 1157 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1158 struct i915_power_well *power_well) 1159 { 1160 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 1161 1162 vlv_display_power_well_deinit(dev_priv); 1163 1164 vlv_set_power_well(dev_priv, power_well, false); 1165 } 1166 1167 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1168 struct i915_power_well *power_well) 1169 { 1170 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 1171 1172 /* since ref/cri clock was enabled */ 1173 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1174 1175 vlv_set_power_well(dev_priv, power_well, true); 1176 1177 /* 1178 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1179 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1180 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1181 * b. The other bits such as sfr settings / modesel may all 1182 * be set to 0. 1183 * 1184 * This should only be done on init and resume from S3 with 1185 * both PLLs disabled, or we risk losing DPIO and PLL 1186 * synchronization. 1187 */ 1188 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); 1189 } 1190 1191 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1192 struct i915_power_well *power_well) 1193 { 1194 enum pipe pipe; 1195 1196 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 1197 1198 for_each_pipe(dev_priv, pipe) 1199 assert_pll_disabled(dev_priv, pipe); 1200 1201 /* Assert common reset */ 1202 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); 1203 1204 vlv_set_power_well(dev_priv, power_well, false); 1205 } 1206 1207 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) 1208 1209 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, 1210 int power_well_id) 1211 { 1212 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1213 int i; 1214 1215 for (i = 0; i < power_domains->power_well_count; i++) { 1216 struct i915_power_well *power_well; 1217 1218 power_well = &power_domains->power_wells[i]; 1219 if (power_well->data == power_well_id) 1220 return power_well; 1221 } 1222 1223 return NULL; 1224 } 1225 1226 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1227 1228 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1229 { 1230 struct i915_power_well *cmn_bc = 1231 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 1232 struct i915_power_well *cmn_d = 1233 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 1234 u32 phy_control = dev_priv->chv_phy_control; 1235 u32 phy_status = 0; 1236 u32 phy_status_mask = 0xffffffff; 1237 1238 /* 1239 * The BIOS can leave the PHY is some weird state 1240 * where it doesn't fully power down some parts. 1241 * Disable the asserts until the PHY has been fully 1242 * reset (ie. the power well has been disabled at 1243 * least once). 1244 */ 1245 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1246 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1247 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1248 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1249 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1250 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1251 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1252 1253 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1254 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1255 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1256 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1257 1258 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 1259 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1260 1261 /* this assumes override is only used to enable lanes */ 1262 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1263 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1264 1265 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1266 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1267 1268 /* CL1 is on whenever anything is on in either channel */ 1269 if (BITS_SET(phy_control, 1270 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1271 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1272 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1273 1274 /* 1275 * The DPLLB check accounts for the pipe B + port A usage 1276 * with CL2 powered up but all the lanes in the second channel 1277 * powered down. 1278 */ 1279 if (BITS_SET(phy_control, 1280 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1281 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1282 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1283 1284 if (BITS_SET(phy_control, 1285 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1286 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1287 if (BITS_SET(phy_control, 1288 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1289 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1290 1291 if (BITS_SET(phy_control, 1292 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1293 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1294 if (BITS_SET(phy_control, 1295 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1296 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1297 } 1298 1299 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { 1300 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1301 1302 /* this assumes override is only used to enable lanes */ 1303 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1304 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1305 1306 if (BITS_SET(phy_control, 1307 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1308 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1309 1310 if (BITS_SET(phy_control, 1311 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1312 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1313 if (BITS_SET(phy_control, 1314 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1315 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1316 } 1317 1318 phy_status &= phy_status_mask; 1319 1320 /* 1321 * The PHY may be busy with some initial calibration and whatnot, 1322 * so the power state can take a while to actually change. 1323 */ 1324 if (intel_wait_for_register(dev_priv, 1325 DISPLAY_PHY_STATUS, 1326 phy_status_mask, 1327 phy_status, 1328 10)) 1329 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1330 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask, 1331 phy_status, dev_priv->chv_phy_control); 1332 } 1333 1334 #undef BITS_SET 1335 1336 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1337 struct i915_power_well *power_well) 1338 { 1339 enum dpio_phy phy; 1340 enum pipe pipe; 1341 uint32_t tmp; 1342 1343 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && 1344 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); 1345 1346 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1347 pipe = PIPE_A; 1348 phy = DPIO_PHY0; 1349 } else { 1350 pipe = PIPE_C; 1351 phy = DPIO_PHY1; 1352 } 1353 1354 /* since ref/cri clock was enabled */ 1355 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1356 vlv_set_power_well(dev_priv, power_well, true); 1357 1358 /* Poll for phypwrgood signal */ 1359 if (intel_wait_for_register(dev_priv, 1360 DISPLAY_PHY_STATUS, 1361 PHY_POWERGOOD(phy), 1362 PHY_POWERGOOD(phy), 1363 1)) 1364 DRM_ERROR("Display PHY %d is not power up\n", phy); 1365 1366 mutex_lock(&dev_priv->sb_lock); 1367 1368 /* Enable dynamic power down */ 1369 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1370 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1371 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1372 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1373 1374 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1375 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1376 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1377 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1378 } else { 1379 /* 1380 * Force the non-existing CL2 off. BXT does this 1381 * too, so maybe it saves some power even though 1382 * CL2 doesn't exist? 1383 */ 1384 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1385 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1386 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1387 } 1388 1389 mutex_unlock(&dev_priv->sb_lock); 1390 1391 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1392 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1393 1394 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1395 phy, dev_priv->chv_phy_control); 1396 1397 assert_chv_phy_status(dev_priv); 1398 } 1399 1400 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1401 struct i915_power_well *power_well) 1402 { 1403 enum dpio_phy phy; 1404 1405 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && 1406 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); 1407 1408 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1409 phy = DPIO_PHY0; 1410 assert_pll_disabled(dev_priv, PIPE_A); 1411 assert_pll_disabled(dev_priv, PIPE_B); 1412 } else { 1413 phy = DPIO_PHY1; 1414 assert_pll_disabled(dev_priv, PIPE_C); 1415 } 1416 1417 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1418 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1419 1420 vlv_set_power_well(dev_priv, power_well, false); 1421 1422 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1423 phy, dev_priv->chv_phy_control); 1424 1425 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1426 dev_priv->chv_phy_assert[phy] = true; 1427 1428 assert_chv_phy_status(dev_priv); 1429 } 1430 1431 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1432 enum dpio_channel ch, bool override, unsigned int mask) 1433 { 1434 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1435 u32 reg, val, expected, actual; 1436 1437 /* 1438 * The BIOS can leave the PHY is some weird state 1439 * where it doesn't fully power down some parts. 1440 * Disable the asserts until the PHY has been fully 1441 * reset (ie. the power well has been disabled at 1442 * least once). 1443 */ 1444 if (!dev_priv->chv_phy_assert[phy]) 1445 return; 1446 1447 if (ch == DPIO_CH0) 1448 reg = _CHV_CMN_DW0_CH0; 1449 else 1450 reg = _CHV_CMN_DW6_CH1; 1451 1452 mutex_lock(&dev_priv->sb_lock); 1453 val = vlv_dpio_read(dev_priv, pipe, reg); 1454 mutex_unlock(&dev_priv->sb_lock); 1455 1456 /* 1457 * This assumes !override is only used when the port is disabled. 1458 * All lanes should power down even without the override when 1459 * the port is disabled. 1460 */ 1461 if (!override || mask == 0xf) { 1462 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1463 /* 1464 * If CH1 common lane is not active anymore 1465 * (eg. for pipe B DPLL) the entire channel will 1466 * shut down, which causes the common lane registers 1467 * to read as 0. That means we can't actually check 1468 * the lane power down status bits, but as the entire 1469 * register reads as 0 it's a good indication that the 1470 * channel is indeed entirely powered down. 1471 */ 1472 if (ch == DPIO_CH1 && val == 0) 1473 expected = 0; 1474 } else if (mask != 0x0) { 1475 expected = DPIO_ANYDL_POWERDOWN; 1476 } else { 1477 expected = 0; 1478 } 1479 1480 if (ch == DPIO_CH0) 1481 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1482 else 1483 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1484 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1485 1486 WARN(actual != expected, 1487 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1488 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), 1489 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), 1490 reg, val); 1491 } 1492 1493 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1494 enum dpio_channel ch, bool override) 1495 { 1496 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1497 bool was_override; 1498 1499 mutex_lock(&power_domains->lock); 1500 1501 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1502 1503 if (override == was_override) 1504 goto out; 1505 1506 if (override) 1507 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1508 else 1509 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1510 1511 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1512 1513 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1514 phy, ch, dev_priv->chv_phy_control); 1515 1516 assert_chv_phy_status(dev_priv); 1517 1518 out: 1519 mutex_unlock(&power_domains->lock); 1520 1521 return was_override; 1522 } 1523 1524 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1525 bool override, unsigned int mask) 1526 { 1527 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1528 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1529 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); 1530 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); 1531 1532 mutex_lock(&power_domains->lock); 1533 1534 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1535 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1536 1537 if (override) 1538 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1539 else 1540 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1541 1542 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1543 1544 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1545 phy, ch, mask, dev_priv->chv_phy_control); 1546 1547 assert_chv_phy_status(dev_priv); 1548 1549 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1550 1551 mutex_unlock(&power_domains->lock); 1552 } 1553 1554 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1555 struct i915_power_well *power_well) 1556 { 1557 enum pipe pipe = power_well->data; 1558 bool enabled; 1559 u32 state, ctrl; 1560 1561 mutex_lock(&dev_priv->rps.hw_lock); 1562 1563 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); 1564 /* 1565 * We only ever set the power-on and power-gate states, anything 1566 * else is unexpected. 1567 */ 1568 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); 1569 enabled = state == DP_SSS_PWR_ON(pipe); 1570 1571 /* 1572 * A transient state at this point would mean some unexpected party 1573 * is poking at the power controls too. 1574 */ 1575 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); 1576 WARN_ON(ctrl << 16 != state); 1577 1578 mutex_unlock(&dev_priv->rps.hw_lock); 1579 1580 return enabled; 1581 } 1582 1583 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1584 struct i915_power_well *power_well, 1585 bool enable) 1586 { 1587 enum pipe pipe = power_well->data; 1588 u32 state; 1589 u32 ctrl; 1590 1591 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1592 1593 mutex_lock(&dev_priv->rps.hw_lock); 1594 1595 #define COND \ 1596 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) 1597 1598 if (COND) 1599 goto out; 1600 1601 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 1602 ctrl &= ~DP_SSC_MASK(pipe); 1603 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1604 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); 1605 1606 if (wait_for(COND, 100)) 1607 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1608 state, 1609 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); 1610 1611 #undef COND 1612 1613 out: 1614 mutex_unlock(&dev_priv->rps.hw_lock); 1615 } 1616 1617 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 1618 struct i915_power_well *power_well) 1619 { 1620 WARN_ON_ONCE(power_well->data != PIPE_A); 1621 1622 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0); 1623 } 1624 1625 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1626 struct i915_power_well *power_well) 1627 { 1628 WARN_ON_ONCE(power_well->data != PIPE_A); 1629 1630 chv_set_pipe_power_well(dev_priv, power_well, true); 1631 1632 vlv_display_power_well_init(dev_priv); 1633 } 1634 1635 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1636 struct i915_power_well *power_well) 1637 { 1638 WARN_ON_ONCE(power_well->data != PIPE_A); 1639 1640 vlv_display_power_well_deinit(dev_priv); 1641 1642 chv_set_pipe_power_well(dev_priv, power_well, false); 1643 } 1644 1645 static void 1646 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 1647 enum intel_display_power_domain domain) 1648 { 1649 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1650 struct i915_power_well *power_well; 1651 int i; 1652 1653 for_each_power_well(i, power_well, BIT(domain), power_domains) 1654 intel_power_well_get(dev_priv, power_well); 1655 1656 power_domains->domain_use_count[domain]++; 1657 } 1658 1659 /** 1660 * intel_display_power_get - grab a power domain reference 1661 * @dev_priv: i915 device instance 1662 * @domain: power domain to reference 1663 * 1664 * This function grabs a power domain reference for @domain and ensures that the 1665 * power domain and all its parents are powered up. Therefore users should only 1666 * grab a reference to the innermost power domain they need. 1667 * 1668 * Any power domain reference obtained by this function must have a symmetric 1669 * call to intel_display_power_put() to release the reference again. 1670 */ 1671 void intel_display_power_get(struct drm_i915_private *dev_priv, 1672 enum intel_display_power_domain domain) 1673 { 1674 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1675 1676 intel_runtime_pm_get(dev_priv); 1677 1678 mutex_lock(&power_domains->lock); 1679 1680 __intel_display_power_get_domain(dev_priv, domain); 1681 1682 mutex_unlock(&power_domains->lock); 1683 } 1684 1685 /** 1686 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 1687 * @dev_priv: i915 device instance 1688 * @domain: power domain to reference 1689 * 1690 * This function grabs a power domain reference for @domain and ensures that the 1691 * power domain and all its parents are powered up. Therefore users should only 1692 * grab a reference to the innermost power domain they need. 1693 * 1694 * Any power domain reference obtained by this function must have a symmetric 1695 * call to intel_display_power_put() to release the reference again. 1696 */ 1697 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 1698 enum intel_display_power_domain domain) 1699 { 1700 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1701 bool is_enabled; 1702 1703 if (!intel_runtime_pm_get_if_in_use(dev_priv)) 1704 return false; 1705 1706 mutex_lock(&power_domains->lock); 1707 1708 if (__intel_display_power_is_enabled(dev_priv, domain)) { 1709 __intel_display_power_get_domain(dev_priv, domain); 1710 is_enabled = true; 1711 } else { 1712 is_enabled = false; 1713 } 1714 1715 mutex_unlock(&power_domains->lock); 1716 1717 if (!is_enabled) 1718 intel_runtime_pm_put(dev_priv); 1719 1720 return is_enabled; 1721 } 1722 1723 /** 1724 * intel_display_power_put - release a power domain reference 1725 * @dev_priv: i915 device instance 1726 * @domain: power domain to reference 1727 * 1728 * This function drops the power domain reference obtained by 1729 * intel_display_power_get() and might power down the corresponding hardware 1730 * block right away if this is the last reference. 1731 */ 1732 void intel_display_power_put(struct drm_i915_private *dev_priv, 1733 enum intel_display_power_domain domain) 1734 { 1735 struct i915_power_domains *power_domains; 1736 struct i915_power_well *power_well; 1737 int i; 1738 1739 power_domains = &dev_priv->power_domains; 1740 1741 mutex_lock(&power_domains->lock); 1742 1743 WARN(!power_domains->domain_use_count[domain], 1744 "Use count on domain %s is already zero\n", 1745 intel_display_power_domain_str(domain)); 1746 power_domains->domain_use_count[domain]--; 1747 1748 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) 1749 intel_power_well_put(dev_priv, power_well); 1750 1751 mutex_unlock(&power_domains->lock); 1752 1753 intel_runtime_pm_put(dev_priv); 1754 } 1755 1756 #define HSW_DISPLAY_POWER_DOMAINS ( \ 1757 BIT(POWER_DOMAIN_PIPE_B) | \ 1758 BIT(POWER_DOMAIN_PIPE_C) | \ 1759 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1760 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1761 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1762 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1763 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1764 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 1765 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1766 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1767 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1768 BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 1769 BIT(POWER_DOMAIN_VGA) | \ 1770 BIT(POWER_DOMAIN_AUDIO) | \ 1771 BIT(POWER_DOMAIN_INIT)) 1772 1773 #define BDW_DISPLAY_POWER_DOMAINS ( \ 1774 BIT(POWER_DOMAIN_PIPE_B) | \ 1775 BIT(POWER_DOMAIN_PIPE_C) | \ 1776 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1777 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1778 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1779 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1780 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 1781 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1782 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1783 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1784 BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 1785 BIT(POWER_DOMAIN_VGA) | \ 1786 BIT(POWER_DOMAIN_AUDIO) | \ 1787 BIT(POWER_DOMAIN_INIT)) 1788 1789 #define VLV_DISPLAY_POWER_DOMAINS ( \ 1790 BIT(POWER_DOMAIN_PIPE_A) | \ 1791 BIT(POWER_DOMAIN_PIPE_B) | \ 1792 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1793 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1794 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1795 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1796 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1797 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1798 BIT(POWER_DOMAIN_PORT_DSI) | \ 1799 BIT(POWER_DOMAIN_PORT_CRT) | \ 1800 BIT(POWER_DOMAIN_VGA) | \ 1801 BIT(POWER_DOMAIN_AUDIO) | \ 1802 BIT(POWER_DOMAIN_AUX_B) | \ 1803 BIT(POWER_DOMAIN_AUX_C) | \ 1804 BIT(POWER_DOMAIN_GMBUS) | \ 1805 BIT(POWER_DOMAIN_INIT)) 1806 1807 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1808 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1809 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1810 BIT(POWER_DOMAIN_PORT_CRT) | \ 1811 BIT(POWER_DOMAIN_AUX_B) | \ 1812 BIT(POWER_DOMAIN_AUX_C) | \ 1813 BIT(POWER_DOMAIN_INIT)) 1814 1815 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 1816 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1817 BIT(POWER_DOMAIN_AUX_B) | \ 1818 BIT(POWER_DOMAIN_INIT)) 1819 1820 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 1821 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1822 BIT(POWER_DOMAIN_AUX_B) | \ 1823 BIT(POWER_DOMAIN_INIT)) 1824 1825 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 1826 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1827 BIT(POWER_DOMAIN_AUX_C) | \ 1828 BIT(POWER_DOMAIN_INIT)) 1829 1830 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 1831 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1832 BIT(POWER_DOMAIN_AUX_C) | \ 1833 BIT(POWER_DOMAIN_INIT)) 1834 1835 #define CHV_DISPLAY_POWER_DOMAINS ( \ 1836 BIT(POWER_DOMAIN_PIPE_A) | \ 1837 BIT(POWER_DOMAIN_PIPE_B) | \ 1838 BIT(POWER_DOMAIN_PIPE_C) | \ 1839 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1840 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1841 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1842 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1843 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1844 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 1845 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1846 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1847 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1848 BIT(POWER_DOMAIN_PORT_DSI) | \ 1849 BIT(POWER_DOMAIN_VGA) | \ 1850 BIT(POWER_DOMAIN_AUDIO) | \ 1851 BIT(POWER_DOMAIN_AUX_B) | \ 1852 BIT(POWER_DOMAIN_AUX_C) | \ 1853 BIT(POWER_DOMAIN_AUX_D) | \ 1854 BIT(POWER_DOMAIN_GMBUS) | \ 1855 BIT(POWER_DOMAIN_INIT)) 1856 1857 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1858 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1859 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1860 BIT(POWER_DOMAIN_AUX_B) | \ 1861 BIT(POWER_DOMAIN_AUX_C) | \ 1862 BIT(POWER_DOMAIN_INIT)) 1863 1864 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 1865 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1866 BIT(POWER_DOMAIN_AUX_D) | \ 1867 BIT(POWER_DOMAIN_INIT)) 1868 1869 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 1870 .sync_hw = i9xx_always_on_power_well_noop, 1871 .enable = i9xx_always_on_power_well_noop, 1872 .disable = i9xx_always_on_power_well_noop, 1873 .is_enabled = i9xx_always_on_power_well_enabled, 1874 }; 1875 1876 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 1877 .sync_hw = chv_pipe_power_well_sync_hw, 1878 .enable = chv_pipe_power_well_enable, 1879 .disable = chv_pipe_power_well_disable, 1880 .is_enabled = chv_pipe_power_well_enabled, 1881 }; 1882 1883 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 1884 .sync_hw = vlv_power_well_sync_hw, 1885 .enable = chv_dpio_cmn_power_well_enable, 1886 .disable = chv_dpio_cmn_power_well_disable, 1887 .is_enabled = vlv_power_well_enabled, 1888 }; 1889 1890 static struct i915_power_well i9xx_always_on_power_well[] = { 1891 { 1892 .name = "always-on", 1893 .always_on = 1, 1894 .domains = POWER_DOMAIN_MASK, 1895 .ops = &i9xx_always_on_power_well_ops, 1896 }, 1897 }; 1898 1899 static const struct i915_power_well_ops hsw_power_well_ops = { 1900 .sync_hw = hsw_power_well_sync_hw, 1901 .enable = hsw_power_well_enable, 1902 .disable = hsw_power_well_disable, 1903 .is_enabled = hsw_power_well_enabled, 1904 }; 1905 1906 static const struct i915_power_well_ops skl_power_well_ops = { 1907 .sync_hw = skl_power_well_sync_hw, 1908 .enable = skl_power_well_enable, 1909 .disable = skl_power_well_disable, 1910 .is_enabled = skl_power_well_enabled, 1911 }; 1912 1913 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 1914 .sync_hw = gen9_dc_off_power_well_sync_hw, 1915 .enable = gen9_dc_off_power_well_enable, 1916 .disable = gen9_dc_off_power_well_disable, 1917 .is_enabled = gen9_dc_off_power_well_enabled, 1918 }; 1919 1920 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 1921 .sync_hw = bxt_dpio_cmn_power_well_sync_hw, 1922 .enable = bxt_dpio_cmn_power_well_enable, 1923 .disable = bxt_dpio_cmn_power_well_disable, 1924 .is_enabled = bxt_dpio_cmn_power_well_enabled, 1925 }; 1926 1927 static struct i915_power_well hsw_power_wells[] = { 1928 { 1929 .name = "always-on", 1930 .always_on = 1, 1931 .domains = POWER_DOMAIN_MASK, 1932 .ops = &i9xx_always_on_power_well_ops, 1933 }, 1934 { 1935 .name = "display", 1936 .domains = HSW_DISPLAY_POWER_DOMAINS, 1937 .ops = &hsw_power_well_ops, 1938 }, 1939 }; 1940 1941 static struct i915_power_well bdw_power_wells[] = { 1942 { 1943 .name = "always-on", 1944 .always_on = 1, 1945 .domains = POWER_DOMAIN_MASK, 1946 .ops = &i9xx_always_on_power_well_ops, 1947 }, 1948 { 1949 .name = "display", 1950 .domains = BDW_DISPLAY_POWER_DOMAINS, 1951 .ops = &hsw_power_well_ops, 1952 }, 1953 }; 1954 1955 static const struct i915_power_well_ops vlv_display_power_well_ops = { 1956 .sync_hw = vlv_power_well_sync_hw, 1957 .enable = vlv_display_power_well_enable, 1958 .disable = vlv_display_power_well_disable, 1959 .is_enabled = vlv_power_well_enabled, 1960 }; 1961 1962 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 1963 .sync_hw = vlv_power_well_sync_hw, 1964 .enable = vlv_dpio_cmn_power_well_enable, 1965 .disable = vlv_dpio_cmn_power_well_disable, 1966 .is_enabled = vlv_power_well_enabled, 1967 }; 1968 1969 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 1970 .sync_hw = vlv_power_well_sync_hw, 1971 .enable = vlv_power_well_enable, 1972 .disable = vlv_power_well_disable, 1973 .is_enabled = vlv_power_well_enabled, 1974 }; 1975 1976 static struct i915_power_well vlv_power_wells[] = { 1977 { 1978 .name = "always-on", 1979 .always_on = 1, 1980 .domains = POWER_DOMAIN_MASK, 1981 .ops = &i9xx_always_on_power_well_ops, 1982 .data = PUNIT_POWER_WELL_ALWAYS_ON, 1983 }, 1984 { 1985 .name = "display", 1986 .domains = VLV_DISPLAY_POWER_DOMAINS, 1987 .data = PUNIT_POWER_WELL_DISP2D, 1988 .ops = &vlv_display_power_well_ops, 1989 }, 1990 { 1991 .name = "dpio-tx-b-01", 1992 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1993 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1994 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1995 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1996 .ops = &vlv_dpio_power_well_ops, 1997 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, 1998 }, 1999 { 2000 .name = "dpio-tx-b-23", 2001 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2002 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2003 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2004 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2005 .ops = &vlv_dpio_power_well_ops, 2006 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, 2007 }, 2008 { 2009 .name = "dpio-tx-c-01", 2010 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2011 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2012 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2013 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2014 .ops = &vlv_dpio_power_well_ops, 2015 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, 2016 }, 2017 { 2018 .name = "dpio-tx-c-23", 2019 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2020 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2021 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2022 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2023 .ops = &vlv_dpio_power_well_ops, 2024 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, 2025 }, 2026 { 2027 .name = "dpio-common", 2028 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 2029 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 2030 .ops = &vlv_dpio_cmn_power_well_ops, 2031 }, 2032 }; 2033 2034 static struct i915_power_well chv_power_wells[] = { 2035 { 2036 .name = "always-on", 2037 .always_on = 1, 2038 .domains = POWER_DOMAIN_MASK, 2039 .ops = &i9xx_always_on_power_well_ops, 2040 }, 2041 { 2042 .name = "display", 2043 /* 2044 * Pipe A power well is the new disp2d well. Pipe B and C 2045 * power wells don't actually exist. Pipe A power well is 2046 * required for any pipe to work. 2047 */ 2048 .domains = CHV_DISPLAY_POWER_DOMAINS, 2049 .data = PIPE_A, 2050 .ops = &chv_pipe_power_well_ops, 2051 }, 2052 { 2053 .name = "dpio-common-bc", 2054 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 2055 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 2056 .ops = &chv_dpio_cmn_power_well_ops, 2057 }, 2058 { 2059 .name = "dpio-common-d", 2060 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 2061 .data = PUNIT_POWER_WELL_DPIO_CMN_D, 2062 .ops = &chv_dpio_cmn_power_well_ops, 2063 }, 2064 }; 2065 2066 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 2067 int power_well_id) 2068 { 2069 struct i915_power_well *power_well; 2070 bool ret; 2071 2072 power_well = lookup_power_well(dev_priv, power_well_id); 2073 ret = power_well->ops->is_enabled(dev_priv, power_well); 2074 2075 return ret; 2076 } 2077 2078 static struct i915_power_well skl_power_wells[] = { 2079 { 2080 .name = "always-on", 2081 .always_on = 1, 2082 .domains = POWER_DOMAIN_MASK, 2083 .ops = &i9xx_always_on_power_well_ops, 2084 .data = SKL_DISP_PW_ALWAYS_ON, 2085 }, 2086 { 2087 .name = "power well 1", 2088 /* Handled by the DMC firmware */ 2089 .domains = 0, 2090 .ops = &skl_power_well_ops, 2091 .data = SKL_DISP_PW_1, 2092 }, 2093 { 2094 .name = "MISC IO power well", 2095 /* Handled by the DMC firmware */ 2096 .domains = 0, 2097 .ops = &skl_power_well_ops, 2098 .data = SKL_DISP_PW_MISC_IO, 2099 }, 2100 { 2101 .name = "DC off", 2102 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 2103 .ops = &gen9_dc_off_power_well_ops, 2104 .data = SKL_DISP_PW_DC_OFF, 2105 }, 2106 { 2107 .name = "power well 2", 2108 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 2109 .ops = &skl_power_well_ops, 2110 .data = SKL_DISP_PW_2, 2111 }, 2112 { 2113 .name = "DDI A/E power well", 2114 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS, 2115 .ops = &skl_power_well_ops, 2116 .data = SKL_DISP_PW_DDI_A_E, 2117 }, 2118 { 2119 .name = "DDI B power well", 2120 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS, 2121 .ops = &skl_power_well_ops, 2122 .data = SKL_DISP_PW_DDI_B, 2123 }, 2124 { 2125 .name = "DDI C power well", 2126 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS, 2127 .ops = &skl_power_well_ops, 2128 .data = SKL_DISP_PW_DDI_C, 2129 }, 2130 { 2131 .name = "DDI D power well", 2132 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS, 2133 .ops = &skl_power_well_ops, 2134 .data = SKL_DISP_PW_DDI_D, 2135 }, 2136 }; 2137 2138 static struct i915_power_well bxt_power_wells[] = { 2139 { 2140 .name = "always-on", 2141 .always_on = 1, 2142 .domains = POWER_DOMAIN_MASK, 2143 .ops = &i9xx_always_on_power_well_ops, 2144 }, 2145 { 2146 .name = "power well 1", 2147 .domains = 0, 2148 .ops = &skl_power_well_ops, 2149 .data = SKL_DISP_PW_1, 2150 }, 2151 { 2152 .name = "DC off", 2153 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 2154 .ops = &gen9_dc_off_power_well_ops, 2155 .data = SKL_DISP_PW_DC_OFF, 2156 }, 2157 { 2158 .name = "power well 2", 2159 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 2160 .ops = &skl_power_well_ops, 2161 .data = SKL_DISP_PW_2, 2162 }, 2163 { 2164 .name = "dpio-common-a", 2165 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 2166 .ops = &bxt_dpio_cmn_power_well_ops, 2167 .data = BXT_DPIO_CMN_A, 2168 }, 2169 { 2170 .name = "dpio-common-bc", 2171 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 2172 .ops = &bxt_dpio_cmn_power_well_ops, 2173 .data = BXT_DPIO_CMN_BC, 2174 }, 2175 }; 2176 2177 static int 2178 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 2179 int disable_power_well) 2180 { 2181 if (disable_power_well >= 0) 2182 return !!disable_power_well; 2183 2184 return 1; 2185 } 2186 2187 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 2188 int enable_dc) 2189 { 2190 uint32_t mask; 2191 int requested_dc; 2192 int max_dc; 2193 2194 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 2195 max_dc = 2; 2196 mask = 0; 2197 } else if (IS_BROXTON(dev_priv)) { 2198 max_dc = 1; 2199 /* 2200 * DC9 has a separate HW flow from the rest of the DC states, 2201 * not depending on the DMC firmware. It's needed by system 2202 * suspend/resume, so allow it unconditionally. 2203 */ 2204 mask = DC_STATE_EN_DC9; 2205 } else { 2206 max_dc = 0; 2207 mask = 0; 2208 } 2209 2210 if (!i915.disable_power_well) 2211 max_dc = 0; 2212 2213 if (enable_dc >= 0 && enable_dc <= max_dc) { 2214 requested_dc = enable_dc; 2215 } else if (enable_dc == -1) { 2216 requested_dc = max_dc; 2217 } else if (enable_dc > max_dc && enable_dc <= 2) { 2218 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n", 2219 enable_dc, max_dc); 2220 requested_dc = max_dc; 2221 } else { 2222 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc); 2223 requested_dc = max_dc; 2224 } 2225 2226 if (requested_dc > 1) 2227 mask |= DC_STATE_EN_UPTO_DC6; 2228 if (requested_dc > 0) 2229 mask |= DC_STATE_EN_UPTO_DC5; 2230 2231 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask); 2232 2233 return mask; 2234 } 2235 2236 #define set_power_wells(power_domains, __power_wells) ({ \ 2237 (power_domains)->power_wells = (__power_wells); \ 2238 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ 2239 }) 2240 2241 /** 2242 * intel_power_domains_init - initializes the power domain structures 2243 * @dev_priv: i915 device instance 2244 * 2245 * Initializes the power domain structures for @dev_priv depending upon the 2246 * supported platform. 2247 */ 2248 int intel_power_domains_init(struct drm_i915_private *dev_priv) 2249 { 2250 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2251 2252 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, 2253 i915.disable_power_well); 2254 dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv, 2255 i915.enable_dc); 2256 2257 BUILD_BUG_ON(POWER_DOMAIN_NUM > 31); 2258 2259 mutex_init(&power_domains->lock); 2260 2261 /* 2262 * The enabling order will be from lower to higher indexed wells, 2263 * the disabling order is reversed. 2264 */ 2265 if (IS_HASWELL(dev_priv)) { 2266 set_power_wells(power_domains, hsw_power_wells); 2267 } else if (IS_BROADWELL(dev_priv)) { 2268 set_power_wells(power_domains, bdw_power_wells); 2269 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 2270 set_power_wells(power_domains, skl_power_wells); 2271 } else if (IS_BROXTON(dev_priv)) { 2272 set_power_wells(power_domains, bxt_power_wells); 2273 } else if (IS_CHERRYVIEW(dev_priv)) { 2274 set_power_wells(power_domains, chv_power_wells); 2275 } else if (IS_VALLEYVIEW(dev_priv)) { 2276 set_power_wells(power_domains, vlv_power_wells); 2277 } else { 2278 set_power_wells(power_domains, i9xx_always_on_power_well); 2279 } 2280 2281 return 0; 2282 } 2283 2284 /** 2285 * intel_power_domains_fini - finalizes the power domain structures 2286 * @dev_priv: i915 device instance 2287 * 2288 * Finalizes the power domain structures for @dev_priv depending upon the 2289 * supported platform. This function also disables runtime pm and ensures that 2290 * the device stays powered up so that the driver can be reloaded. 2291 */ 2292 void intel_power_domains_fini(struct drm_i915_private *dev_priv) 2293 { 2294 struct device *kdev = &dev_priv->drm.pdev->dev; 2295 2296 /* 2297 * The i915.ko module is still not prepared to be loaded when 2298 * the power well is not enabled, so just enable it in case 2299 * we're going to unload/reload. 2300 * The following also reacquires the RPM reference the core passed 2301 * to the driver during loading, which is dropped in 2302 * intel_runtime_pm_enable(). We have to hand back the control of the 2303 * device to the core with this reference held. 2304 */ 2305 intel_display_set_init_power(dev_priv, true); 2306 2307 /* Remove the refcount we took to keep power well support disabled. */ 2308 if (!i915.disable_power_well) 2309 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 2310 2311 /* 2312 * Remove the refcount we took in intel_runtime_pm_enable() in case 2313 * the platform doesn't support runtime PM. 2314 */ 2315 if (!HAS_RUNTIME_PM(dev_priv)) 2316 pm_runtime_put(kdev); 2317 } 2318 2319 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 2320 { 2321 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2322 struct i915_power_well *power_well; 2323 int i; 2324 2325 mutex_lock(&power_domains->lock); 2326 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 2327 power_well->ops->sync_hw(dev_priv, power_well); 2328 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, 2329 power_well); 2330 } 2331 mutex_unlock(&power_domains->lock); 2332 } 2333 2334 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 2335 { 2336 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 2337 POSTING_READ(DBUF_CTL); 2338 2339 udelay(10); 2340 2341 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 2342 DRM_ERROR("DBuf power enable timeout\n"); 2343 } 2344 2345 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 2346 { 2347 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 2348 POSTING_READ(DBUF_CTL); 2349 2350 udelay(10); 2351 2352 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 2353 DRM_ERROR("DBuf power disable timeout!\n"); 2354 } 2355 2356 static void skl_display_core_init(struct drm_i915_private *dev_priv, 2357 bool resume) 2358 { 2359 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2360 struct i915_power_well *well; 2361 uint32_t val; 2362 2363 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2364 2365 /* enable PCH reset handshake */ 2366 val = I915_READ(HSW_NDE_RSTWRN_OPT); 2367 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE); 2368 2369 /* enable PG1 and Misc I/O */ 2370 mutex_lock(&power_domains->lock); 2371 2372 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2373 intel_power_well_enable(dev_priv, well); 2374 2375 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 2376 intel_power_well_enable(dev_priv, well); 2377 2378 mutex_unlock(&power_domains->lock); 2379 2380 skl_init_cdclk(dev_priv); 2381 2382 gen9_dbuf_enable(dev_priv); 2383 2384 if (resume && dev_priv->csr.dmc_payload) 2385 intel_csr_load_program(dev_priv); 2386 } 2387 2388 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 2389 { 2390 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2391 struct i915_power_well *well; 2392 2393 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2394 2395 gen9_dbuf_disable(dev_priv); 2396 2397 skl_uninit_cdclk(dev_priv); 2398 2399 /* The spec doesn't call for removing the reset handshake flag */ 2400 /* disable PG1 and Misc I/O */ 2401 2402 mutex_lock(&power_domains->lock); 2403 2404 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 2405 intel_power_well_disable(dev_priv, well); 2406 2407 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2408 intel_power_well_disable(dev_priv, well); 2409 2410 mutex_unlock(&power_domains->lock); 2411 } 2412 2413 void bxt_display_core_init(struct drm_i915_private *dev_priv, 2414 bool resume) 2415 { 2416 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2417 struct i915_power_well *well; 2418 uint32_t val; 2419 2420 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2421 2422 /* 2423 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 2424 * or else the reset will hang because there is no PCH to respond. 2425 * Move the handshake programming to initialization sequence. 2426 * Previously was left up to BIOS. 2427 */ 2428 val = I915_READ(HSW_NDE_RSTWRN_OPT); 2429 val &= ~RESET_PCH_HANDSHAKE_ENABLE; 2430 I915_WRITE(HSW_NDE_RSTWRN_OPT, val); 2431 2432 /* Enable PG1 */ 2433 mutex_lock(&power_domains->lock); 2434 2435 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2436 intel_power_well_enable(dev_priv, well); 2437 2438 mutex_unlock(&power_domains->lock); 2439 2440 bxt_init_cdclk(dev_priv); 2441 2442 gen9_dbuf_enable(dev_priv); 2443 2444 if (resume && dev_priv->csr.dmc_payload) 2445 intel_csr_load_program(dev_priv); 2446 } 2447 2448 void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 2449 { 2450 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2451 struct i915_power_well *well; 2452 2453 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2454 2455 gen9_dbuf_disable(dev_priv); 2456 2457 bxt_uninit_cdclk(dev_priv); 2458 2459 /* The spec doesn't call for removing the reset handshake flag */ 2460 2461 /* Disable PG1 */ 2462 mutex_lock(&power_domains->lock); 2463 2464 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2465 intel_power_well_disable(dev_priv, well); 2466 2467 mutex_unlock(&power_domains->lock); 2468 } 2469 2470 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 2471 { 2472 struct i915_power_well *cmn_bc = 2473 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 2474 struct i915_power_well *cmn_d = 2475 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 2476 2477 /* 2478 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 2479 * workaround never ever read DISPLAY_PHY_CONTROL, and 2480 * instead maintain a shadow copy ourselves. Use the actual 2481 * power well state and lane status to reconstruct the 2482 * expected initial value. 2483 */ 2484 dev_priv->chv_phy_control = 2485 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 2486 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 2487 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 2488 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 2489 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 2490 2491 /* 2492 * If all lanes are disabled we leave the override disabled 2493 * with all power down bits cleared to match the state we 2494 * would use after disabling the port. Otherwise enable the 2495 * override and set the lane powerdown bits accding to the 2496 * current lane status. 2497 */ 2498 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 2499 uint32_t status = I915_READ(DPLL(PIPE_A)); 2500 unsigned int mask; 2501 2502 mask = status & DPLL_PORTB_READY_MASK; 2503 if (mask == 0xf) 2504 mask = 0x0; 2505 else 2506 dev_priv->chv_phy_control |= 2507 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 2508 2509 dev_priv->chv_phy_control |= 2510 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 2511 2512 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 2513 if (mask == 0xf) 2514 mask = 0x0; 2515 else 2516 dev_priv->chv_phy_control |= 2517 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 2518 2519 dev_priv->chv_phy_control |= 2520 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 2521 2522 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 2523 2524 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 2525 } else { 2526 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 2527 } 2528 2529 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { 2530 uint32_t status = I915_READ(DPIO_PHY_STATUS); 2531 unsigned int mask; 2532 2533 mask = status & DPLL_PORTD_READY_MASK; 2534 2535 if (mask == 0xf) 2536 mask = 0x0; 2537 else 2538 dev_priv->chv_phy_control |= 2539 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 2540 2541 dev_priv->chv_phy_control |= 2542 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 2543 2544 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 2545 2546 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 2547 } else { 2548 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 2549 } 2550 2551 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 2552 2553 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", 2554 dev_priv->chv_phy_control); 2555 } 2556 2557 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 2558 { 2559 struct i915_power_well *cmn = 2560 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 2561 struct i915_power_well *disp2d = 2562 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); 2563 2564 /* If the display might be already active skip this */ 2565 if (cmn->ops->is_enabled(dev_priv, cmn) && 2566 disp2d->ops->is_enabled(dev_priv, disp2d) && 2567 I915_READ(DPIO_CTL) & DPIO_CMNRST) 2568 return; 2569 2570 DRM_DEBUG_KMS("toggling display PHY side reset\n"); 2571 2572 /* cmnlane needs DPLL registers */ 2573 disp2d->ops->enable(dev_priv, disp2d); 2574 2575 /* 2576 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 2577 * Need to assert and de-assert PHY SB reset by gating the 2578 * common lane power, then un-gating it. 2579 * Simply ungating isn't enough to reset the PHY enough to get 2580 * ports and lanes running. 2581 */ 2582 cmn->ops->disable(dev_priv, cmn); 2583 } 2584 2585 /** 2586 * intel_power_domains_init_hw - initialize hardware power domain state 2587 * @dev_priv: i915 device instance 2588 * @resume: Called from resume code paths or not 2589 * 2590 * This function initializes the hardware power domain state and enables all 2591 * power domains using intel_display_set_init_power(). 2592 */ 2593 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) 2594 { 2595 struct drm_device *dev = &dev_priv->drm; 2596 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2597 2598 power_domains->initializing = true; 2599 2600 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 2601 skl_display_core_init(dev_priv, resume); 2602 } else if (IS_BROXTON(dev)) { 2603 bxt_display_core_init(dev_priv, resume); 2604 } else if (IS_CHERRYVIEW(dev)) { 2605 mutex_lock(&power_domains->lock); 2606 chv_phy_control_init(dev_priv); 2607 mutex_unlock(&power_domains->lock); 2608 } else if (IS_VALLEYVIEW(dev)) { 2609 mutex_lock(&power_domains->lock); 2610 vlv_cmnlane_wa(dev_priv); 2611 mutex_unlock(&power_domains->lock); 2612 } 2613 2614 /* For now, we need the power well to be always enabled. */ 2615 intel_display_set_init_power(dev_priv, true); 2616 /* Disable power support if the user asked so. */ 2617 if (!i915.disable_power_well) 2618 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 2619 intel_power_domains_sync_hw(dev_priv); 2620 power_domains->initializing = false; 2621 } 2622 2623 /** 2624 * intel_power_domains_suspend - suspend power domain state 2625 * @dev_priv: i915 device instance 2626 * 2627 * This function prepares the hardware power domain state before entering 2628 * system suspend. It must be paired with intel_power_domains_init_hw(). 2629 */ 2630 void intel_power_domains_suspend(struct drm_i915_private *dev_priv) 2631 { 2632 /* 2633 * Even if power well support was disabled we still want to disable 2634 * power wells while we are system suspended. 2635 */ 2636 if (!i915.disable_power_well) 2637 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 2638 2639 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 2640 skl_display_core_uninit(dev_priv); 2641 else if (IS_BROXTON(dev_priv)) 2642 bxt_display_core_uninit(dev_priv); 2643 } 2644 2645 /** 2646 * intel_runtime_pm_get - grab a runtime pm reference 2647 * @dev_priv: i915 device instance 2648 * 2649 * This function grabs a device-level runtime pm reference (mostly used for GEM 2650 * code to ensure the GTT or GT is on) and ensures that it is powered up. 2651 * 2652 * Any runtime pm reference obtained by this function must have a symmetric 2653 * call to intel_runtime_pm_put() to release the reference again. 2654 */ 2655 void intel_runtime_pm_get(struct drm_i915_private *dev_priv) 2656 { 2657 struct pci_dev *pdev = dev_priv->drm.pdev; 2658 struct device *kdev = &pdev->dev; 2659 2660 pm_runtime_get_sync(kdev); 2661 2662 atomic_inc(&dev_priv->pm.wakeref_count); 2663 assert_rpm_wakelock_held(dev_priv); 2664 } 2665 2666 /** 2667 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use 2668 * @dev_priv: i915 device instance 2669 * 2670 * This function grabs a device-level runtime pm reference if the device is 2671 * already in use and ensures that it is powered up. 2672 * 2673 * Any runtime pm reference obtained by this function must have a symmetric 2674 * call to intel_runtime_pm_put() to release the reference again. 2675 */ 2676 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) 2677 { 2678 struct pci_dev *pdev = dev_priv->drm.pdev; 2679 struct device *kdev = &pdev->dev; 2680 2681 if (IS_ENABLED(CONFIG_PM)) { 2682 int ret = pm_runtime_get_if_in_use(kdev); 2683 2684 /* 2685 * In cases runtime PM is disabled by the RPM core and we get 2686 * an -EINVAL return value we are not supposed to call this 2687 * function, since the power state is undefined. This applies 2688 * atm to the late/early system suspend/resume handlers. 2689 */ 2690 WARN_ON_ONCE(ret < 0); 2691 if (ret <= 0) 2692 return false; 2693 } 2694 2695 atomic_inc(&dev_priv->pm.wakeref_count); 2696 assert_rpm_wakelock_held(dev_priv); 2697 2698 return true; 2699 } 2700 2701 /** 2702 * intel_runtime_pm_get_noresume - grab a runtime pm reference 2703 * @dev_priv: i915 device instance 2704 * 2705 * This function grabs a device-level runtime pm reference (mostly used for GEM 2706 * code to ensure the GTT or GT is on). 2707 * 2708 * It will _not_ power up the device but instead only check that it's powered 2709 * on. Therefore it is only valid to call this functions from contexts where 2710 * the device is known to be powered up and where trying to power it up would 2711 * result in hilarity and deadlocks. That pretty much means only the system 2712 * suspend/resume code where this is used to grab runtime pm references for 2713 * delayed setup down in work items. 2714 * 2715 * Any runtime pm reference obtained by this function must have a symmetric 2716 * call to intel_runtime_pm_put() to release the reference again. 2717 */ 2718 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) 2719 { 2720 struct pci_dev *pdev = dev_priv->drm.pdev; 2721 struct device *kdev = &pdev->dev; 2722 2723 assert_rpm_wakelock_held(dev_priv); 2724 pm_runtime_get_noresume(kdev); 2725 2726 atomic_inc(&dev_priv->pm.wakeref_count); 2727 } 2728 2729 /** 2730 * intel_runtime_pm_put - release a runtime pm reference 2731 * @dev_priv: i915 device instance 2732 * 2733 * This function drops the device-level runtime pm reference obtained by 2734 * intel_runtime_pm_get() and might power down the corresponding 2735 * hardware block right away if this is the last reference. 2736 */ 2737 void intel_runtime_pm_put(struct drm_i915_private *dev_priv) 2738 { 2739 struct pci_dev *pdev = dev_priv->drm.pdev; 2740 struct device *kdev = &pdev->dev; 2741 2742 assert_rpm_wakelock_held(dev_priv); 2743 if (atomic_dec_and_test(&dev_priv->pm.wakeref_count)) 2744 atomic_inc(&dev_priv->pm.atomic_seq); 2745 2746 pm_runtime_mark_last_busy(kdev); 2747 pm_runtime_put_autosuspend(kdev); 2748 } 2749 2750 /** 2751 * intel_runtime_pm_enable - enable runtime pm 2752 * @dev_priv: i915 device instance 2753 * 2754 * This function enables runtime pm at the end of the driver load sequence. 2755 * 2756 * Note that this function does currently not enable runtime pm for the 2757 * subordinate display power domains. That is only done on the first modeset 2758 * using intel_display_set_init_power(). 2759 */ 2760 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) 2761 { 2762 struct pci_dev *pdev = dev_priv->drm.pdev; 2763 struct drm_device *dev = &dev_priv->drm; 2764 struct device *kdev = &pdev->dev; 2765 2766 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */ 2767 pm_runtime_mark_last_busy(kdev); 2768 2769 /* 2770 * Take a permanent reference to disable the RPM functionality and drop 2771 * it only when unloading the driver. Use the low level get/put helpers, 2772 * so the driver's own RPM reference tracking asserts also work on 2773 * platforms without RPM support. 2774 */ 2775 if (!HAS_RUNTIME_PM(dev)) { 2776 pm_runtime_dont_use_autosuspend(kdev); 2777 pm_runtime_get_sync(kdev); 2778 } else { 2779 pm_runtime_use_autosuspend(kdev); 2780 } 2781 2782 /* 2783 * The core calls the driver load handler with an RPM reference held. 2784 * We drop that here and will reacquire it during unloading in 2785 * intel_power_domains_fini(). 2786 */ 2787 pm_runtime_put_autosuspend(kdev); 2788 } 2789