1 /* 2 * Copyright © 2012-2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 * 27 */ 28 29 #include <linux/pm_runtime.h> 30 #include <linux/vgaarb.h> 31 32 #include "i915_drv.h" 33 #include "intel_drv.h" 34 #include <drm/i915_powerwell.h> 35 36 /** 37 * DOC: runtime pm 38 * 39 * The i915 driver supports dynamic enabling and disabling of entire hardware 40 * blocks at runtime. This is especially important on the display side where 41 * software is supposed to control many power gates manually on recent hardware, 42 * since on the GT side a lot of the power management is done by the hardware. 43 * But even there some manual control at the device level is required. 44 * 45 * Since i915 supports a diverse set of platforms with a unified codebase and 46 * hardware engineers just love to shuffle functionality around between power 47 * domains there's a sizeable amount of indirection required. This file provides 48 * generic functions to the driver for grabbing and releasing references for 49 * abstract power domains. It then maps those to the actual power wells 50 * present for a given platform. 51 */ 52 53 static struct i915_power_domains *hsw_pwr; 54 55 #define for_each_power_well(i, power_well, domain_mask, power_domains) \ 56 for (i = 0; \ 57 i < (power_domains)->power_well_count && \ 58 ((power_well) = &(power_domains)->power_wells[i]); \ 59 i++) \ 60 if ((power_well)->domains & (domain_mask)) 61 62 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ 63 for (i = (power_domains)->power_well_count - 1; \ 64 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ 65 i--) \ 66 if ((power_well)->domains & (domain_mask)) 67 68 /* 69 * We should only use the power well if we explicitly asked the hardware to 70 * enable it, so check if it's enabled and also check if we've requested it to 71 * be enabled. 72 */ 73 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 74 struct i915_power_well *power_well) 75 { 76 return I915_READ(HSW_PWR_WELL_DRIVER) == 77 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 78 } 79 80 /** 81 * __intel_display_power_is_enabled - unlocked check for a power domain 82 * @dev_priv: i915 device instance 83 * @domain: power domain to check 84 * 85 * This is the unlocked version of intel_display_power_is_enabled() and should 86 * only be used from error capture and recovery code where deadlocks are 87 * possible. 88 * 89 * Returns: 90 * True when the power domain is enabled, false otherwise. 91 */ 92 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 93 enum intel_display_power_domain domain) 94 { 95 struct i915_power_domains *power_domains; 96 struct i915_power_well *power_well; 97 bool is_enabled; 98 int i; 99 100 if (dev_priv->pm.suspended) 101 return false; 102 103 power_domains = &dev_priv->power_domains; 104 105 is_enabled = true; 106 107 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 108 if (power_well->always_on) 109 continue; 110 111 if (!power_well->hw_enabled) { 112 is_enabled = false; 113 break; 114 } 115 } 116 117 return is_enabled; 118 } 119 120 /** 121 * intel_display_power_is_enabled - unlocked check for a power domain 122 * @dev_priv: i915 device instance 123 * @domain: power domain to check 124 * 125 * This function can be used to check the hw power domain state. It is mostly 126 * used in hardware state readout functions. Everywhere else code should rely 127 * upon explicit power domain reference counting to ensure that the hardware 128 * block is powered up before accessing it. 129 * 130 * Callers must hold the relevant modesetting locks to ensure that concurrent 131 * threads can't disable the power well while the caller tries to read a few 132 * registers. 133 * 134 * Returns: 135 * True when the power domain is enabled, false otherwise. 136 */ 137 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 138 enum intel_display_power_domain domain) 139 { 140 struct i915_power_domains *power_domains; 141 bool ret; 142 143 power_domains = &dev_priv->power_domains; 144 145 mutex_lock(&power_domains->lock); 146 ret = __intel_display_power_is_enabled(dev_priv, domain); 147 mutex_unlock(&power_domains->lock); 148 149 return ret; 150 } 151 152 /** 153 * intel_display_set_init_power - set the initial power domain state 154 * @dev_priv: i915 device instance 155 * @enable: whether to enable or disable the initial power domain state 156 * 157 * For simplicity our driver load/unload and system suspend/resume code assumes 158 * that all power domains are always enabled. This functions controls the state 159 * of this little hack. While the initial power domain state is enabled runtime 160 * pm is effectively disabled. 161 */ 162 void intel_display_set_init_power(struct drm_i915_private *dev_priv, 163 bool enable) 164 { 165 if (dev_priv->power_domains.init_power_on == enable) 166 return; 167 168 if (enable) 169 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 170 else 171 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 172 173 dev_priv->power_domains.init_power_on = enable; 174 } 175 176 /* 177 * Starting with Haswell, we have a "Power Down Well" that can be turned off 178 * when not needed anymore. We have 4 registers that can request the power well 179 * to be enabled, and it will only be disabled if none of the registers is 180 * requesting it to be enabled. 181 */ 182 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 183 { 184 struct drm_device *dev = dev_priv->dev; 185 186 /* 187 * After we re-enable the power well, if we touch VGA register 0x3d5 188 * we'll get unclaimed register interrupts. This stops after we write 189 * anything to the VGA MSR register. The vgacon module uses this 190 * register all the time, so if we unbind our driver and, as a 191 * consequence, bind vgacon, we'll get stuck in an infinite loop at 192 * console_unlock(). So make here we touch the VGA MSR register, making 193 * sure vgacon can keep working normally without triggering interrupts 194 * and error messages. 195 */ 196 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 197 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 198 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 199 200 if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) 201 gen8_irq_power_well_post_enable(dev_priv); 202 } 203 204 static void hsw_set_power_well(struct drm_i915_private *dev_priv, 205 struct i915_power_well *power_well, bool enable) 206 { 207 bool is_enabled, enable_requested; 208 uint32_t tmp; 209 210 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 211 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; 212 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; 213 214 if (enable) { 215 if (!enable_requested) 216 I915_WRITE(HSW_PWR_WELL_DRIVER, 217 HSW_PWR_WELL_ENABLE_REQUEST); 218 219 if (!is_enabled) { 220 DRM_DEBUG_KMS("Enabling power well\n"); 221 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & 222 HSW_PWR_WELL_STATE_ENABLED), 20)) 223 DRM_ERROR("Timeout enabling power well\n"); 224 hsw_power_well_post_enable(dev_priv); 225 } 226 227 } else { 228 if (enable_requested) { 229 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 230 POSTING_READ(HSW_PWR_WELL_DRIVER); 231 DRM_DEBUG_KMS("Requesting to disable the power well\n"); 232 } 233 } 234 } 235 236 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 237 struct i915_power_well *power_well) 238 { 239 hsw_set_power_well(dev_priv, power_well, power_well->count > 0); 240 241 /* 242 * We're taking over the BIOS, so clear any requests made by it since 243 * the driver is in charge now. 244 */ 245 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) 246 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 247 } 248 249 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 250 struct i915_power_well *power_well) 251 { 252 hsw_set_power_well(dev_priv, power_well, true); 253 } 254 255 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 256 struct i915_power_well *power_well) 257 { 258 hsw_set_power_well(dev_priv, power_well, false); 259 } 260 261 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 262 struct i915_power_well *power_well) 263 { 264 } 265 266 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 267 struct i915_power_well *power_well) 268 { 269 return true; 270 } 271 272 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 273 struct i915_power_well *power_well, bool enable) 274 { 275 enum punit_power_well power_well_id = power_well->data; 276 u32 mask; 277 u32 state; 278 u32 ctrl; 279 280 mask = PUNIT_PWRGT_MASK(power_well_id); 281 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : 282 PUNIT_PWRGT_PWR_GATE(power_well_id); 283 284 mutex_lock(&dev_priv->rps.hw_lock); 285 286 #define COND \ 287 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 288 289 if (COND) 290 goto out; 291 292 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 293 ctrl &= ~mask; 294 ctrl |= state; 295 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 296 297 if (wait_for(COND, 100)) 298 DRM_ERROR("timout setting power well state %08x (%08x)\n", 299 state, 300 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 301 302 #undef COND 303 304 out: 305 mutex_unlock(&dev_priv->rps.hw_lock); 306 } 307 308 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, 309 struct i915_power_well *power_well) 310 { 311 vlv_set_power_well(dev_priv, power_well, power_well->count > 0); 312 } 313 314 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 315 struct i915_power_well *power_well) 316 { 317 vlv_set_power_well(dev_priv, power_well, true); 318 } 319 320 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 321 struct i915_power_well *power_well) 322 { 323 vlv_set_power_well(dev_priv, power_well, false); 324 } 325 326 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 327 struct i915_power_well *power_well) 328 { 329 int power_well_id = power_well->data; 330 bool enabled = false; 331 u32 mask; 332 u32 state; 333 u32 ctrl; 334 335 mask = PUNIT_PWRGT_MASK(power_well_id); 336 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); 337 338 mutex_lock(&dev_priv->rps.hw_lock); 339 340 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 341 /* 342 * We only ever set the power-on and power-gate states, anything 343 * else is unexpected. 344 */ 345 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && 346 state != PUNIT_PWRGT_PWR_GATE(power_well_id)); 347 if (state == ctrl) 348 enabled = true; 349 350 /* 351 * A transient state at this point would mean some unexpected party 352 * is poking at the power controls too. 353 */ 354 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 355 WARN_ON(ctrl != state); 356 357 mutex_unlock(&dev_priv->rps.hw_lock); 358 359 return enabled; 360 } 361 362 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 363 struct i915_power_well *power_well) 364 { 365 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 366 367 vlv_set_power_well(dev_priv, power_well, true); 368 369 spin_lock_irq(&dev_priv->irq_lock); 370 valleyview_enable_display_irqs(dev_priv); 371 spin_unlock_irq(&dev_priv->irq_lock); 372 373 /* 374 * During driver initialization/resume we can avoid restoring the 375 * part of the HW/SW state that will be inited anyway explicitly. 376 */ 377 if (dev_priv->power_domains.initializing) 378 return; 379 380 intel_hpd_init(dev_priv); 381 382 i915_redisable_vga_power_on(dev_priv->dev); 383 } 384 385 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 386 struct i915_power_well *power_well) 387 { 388 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 389 390 spin_lock_irq(&dev_priv->irq_lock); 391 valleyview_disable_display_irqs(dev_priv); 392 spin_unlock_irq(&dev_priv->irq_lock); 393 394 vlv_set_power_well(dev_priv, power_well, false); 395 396 vlv_power_sequencer_reset(dev_priv); 397 } 398 399 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 400 struct i915_power_well *power_well) 401 { 402 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 403 404 /* 405 * Enable the CRI clock source so we can get at the 406 * display and the reference clock for VGA 407 * hotplug / manual detection. 408 */ 409 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | 410 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); 411 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 412 413 vlv_set_power_well(dev_priv, power_well, true); 414 415 /* 416 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 417 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 418 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 419 * b. The other bits such as sfr settings / modesel may all 420 * be set to 0. 421 * 422 * This should only be done on init and resume from S3 with 423 * both PLLs disabled, or we risk losing DPIO and PLL 424 * synchronization. 425 */ 426 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); 427 } 428 429 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 430 struct i915_power_well *power_well) 431 { 432 enum pipe pipe; 433 434 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 435 436 for_each_pipe(dev_priv, pipe) 437 assert_pll_disabled(dev_priv, pipe); 438 439 /* Assert common reset */ 440 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); 441 442 vlv_set_power_well(dev_priv, power_well, false); 443 } 444 445 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 446 struct i915_power_well *power_well) 447 { 448 enum dpio_phy phy; 449 450 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && 451 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); 452 453 /* 454 * Enable the CRI clock source so we can get at the 455 * display and the reference clock for VGA 456 * hotplug / manual detection. 457 */ 458 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 459 phy = DPIO_PHY0; 460 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | 461 DPLL_REFA_CLK_ENABLE_VLV); 462 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | 463 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); 464 } else { 465 phy = DPIO_PHY1; 466 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | 467 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); 468 } 469 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 470 vlv_set_power_well(dev_priv, power_well, true); 471 472 /* Poll for phypwrgood signal */ 473 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) 474 DRM_ERROR("Display PHY %d is not power up\n", phy); 475 476 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) | 477 PHY_COM_LANE_RESET_DEASSERT(phy)); 478 } 479 480 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 481 struct i915_power_well *power_well) 482 { 483 enum dpio_phy phy; 484 485 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && 486 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); 487 488 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 489 phy = DPIO_PHY0; 490 assert_pll_disabled(dev_priv, PIPE_A); 491 assert_pll_disabled(dev_priv, PIPE_B); 492 } else { 493 phy = DPIO_PHY1; 494 assert_pll_disabled(dev_priv, PIPE_C); 495 } 496 497 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) & 498 ~PHY_COM_LANE_RESET_DEASSERT(phy)); 499 500 vlv_set_power_well(dev_priv, power_well, false); 501 } 502 503 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 504 struct i915_power_well *power_well) 505 { 506 enum pipe pipe = power_well->data; 507 bool enabled; 508 u32 state, ctrl; 509 510 mutex_lock(&dev_priv->rps.hw_lock); 511 512 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); 513 /* 514 * We only ever set the power-on and power-gate states, anything 515 * else is unexpected. 516 */ 517 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); 518 enabled = state == DP_SSS_PWR_ON(pipe); 519 520 /* 521 * A transient state at this point would mean some unexpected party 522 * is poking at the power controls too. 523 */ 524 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); 525 WARN_ON(ctrl << 16 != state); 526 527 mutex_unlock(&dev_priv->rps.hw_lock); 528 529 return enabled; 530 } 531 532 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 533 struct i915_power_well *power_well, 534 bool enable) 535 { 536 enum pipe pipe = power_well->data; 537 u32 state; 538 u32 ctrl; 539 540 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 541 542 mutex_lock(&dev_priv->rps.hw_lock); 543 544 #define COND \ 545 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) 546 547 if (COND) 548 goto out; 549 550 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 551 ctrl &= ~DP_SSC_MASK(pipe); 552 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 553 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); 554 555 if (wait_for(COND, 100)) 556 DRM_ERROR("timout setting power well state %08x (%08x)\n", 557 state, 558 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); 559 560 #undef COND 561 562 out: 563 mutex_unlock(&dev_priv->rps.hw_lock); 564 } 565 566 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 567 struct i915_power_well *power_well) 568 { 569 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0); 570 } 571 572 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 573 struct i915_power_well *power_well) 574 { 575 WARN_ON_ONCE(power_well->data != PIPE_A && 576 power_well->data != PIPE_B && 577 power_well->data != PIPE_C); 578 579 chv_set_pipe_power_well(dev_priv, power_well, true); 580 581 if (power_well->data == PIPE_A) { 582 spin_lock_irq(&dev_priv->irq_lock); 583 valleyview_enable_display_irqs(dev_priv); 584 spin_unlock_irq(&dev_priv->irq_lock); 585 586 /* 587 * During driver initialization/resume we can avoid restoring the 588 * part of the HW/SW state that will be inited anyway explicitly. 589 */ 590 if (dev_priv->power_domains.initializing) 591 return; 592 593 intel_hpd_init(dev_priv); 594 595 i915_redisable_vga_power_on(dev_priv->dev); 596 } 597 } 598 599 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 600 struct i915_power_well *power_well) 601 { 602 WARN_ON_ONCE(power_well->data != PIPE_A && 603 power_well->data != PIPE_B && 604 power_well->data != PIPE_C); 605 606 if (power_well->data == PIPE_A) { 607 spin_lock_irq(&dev_priv->irq_lock); 608 valleyview_disable_display_irqs(dev_priv); 609 spin_unlock_irq(&dev_priv->irq_lock); 610 } 611 612 chv_set_pipe_power_well(dev_priv, power_well, false); 613 614 if (power_well->data == PIPE_A) 615 vlv_power_sequencer_reset(dev_priv); 616 } 617 618 /** 619 * intel_display_power_get - grab a power domain reference 620 * @dev_priv: i915 device instance 621 * @domain: power domain to reference 622 * 623 * This function grabs a power domain reference for @domain and ensures that the 624 * power domain and all its parents are powered up. Therefore users should only 625 * grab a reference to the innermost power domain they need. 626 * 627 * Any power domain reference obtained by this function must have a symmetric 628 * call to intel_display_power_put() to release the reference again. 629 */ 630 void intel_display_power_get(struct drm_i915_private *dev_priv, 631 enum intel_display_power_domain domain) 632 { 633 struct i915_power_domains *power_domains; 634 struct i915_power_well *power_well; 635 int i; 636 637 intel_runtime_pm_get(dev_priv); 638 639 power_domains = &dev_priv->power_domains; 640 641 mutex_lock(&power_domains->lock); 642 643 for_each_power_well(i, power_well, BIT(domain), power_domains) { 644 if (!power_well->count++) { 645 DRM_DEBUG_KMS("enabling %s\n", power_well->name); 646 power_well->ops->enable(dev_priv, power_well); 647 power_well->hw_enabled = true; 648 } 649 } 650 651 power_domains->domain_use_count[domain]++; 652 653 mutex_unlock(&power_domains->lock); 654 } 655 656 /** 657 * intel_display_power_put - release a power domain reference 658 * @dev_priv: i915 device instance 659 * @domain: power domain to reference 660 * 661 * This function drops the power domain reference obtained by 662 * intel_display_power_get() and might power down the corresponding hardware 663 * block right away if this is the last reference. 664 */ 665 void intel_display_power_put(struct drm_i915_private *dev_priv, 666 enum intel_display_power_domain domain) 667 { 668 struct i915_power_domains *power_domains; 669 struct i915_power_well *power_well; 670 int i; 671 672 power_domains = &dev_priv->power_domains; 673 674 mutex_lock(&power_domains->lock); 675 676 WARN_ON(!power_domains->domain_use_count[domain]); 677 power_domains->domain_use_count[domain]--; 678 679 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 680 WARN_ON(!power_well->count); 681 682 if (!--power_well->count && i915.disable_power_well) { 683 DRM_DEBUG_KMS("disabling %s\n", power_well->name); 684 power_well->hw_enabled = false; 685 power_well->ops->disable(dev_priv, power_well); 686 } 687 } 688 689 mutex_unlock(&power_domains->lock); 690 691 intel_runtime_pm_put(dev_priv); 692 } 693 694 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) 695 696 #define HSW_ALWAYS_ON_POWER_DOMAINS ( \ 697 BIT(POWER_DOMAIN_PIPE_A) | \ 698 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ 699 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 700 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ 701 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 702 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 703 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 704 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 705 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 706 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 707 BIT(POWER_DOMAIN_PORT_CRT) | \ 708 BIT(POWER_DOMAIN_PLLS) | \ 709 BIT(POWER_DOMAIN_INIT)) 710 #define HSW_DISPLAY_POWER_DOMAINS ( \ 711 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ 712 BIT(POWER_DOMAIN_INIT)) 713 714 #define BDW_ALWAYS_ON_POWER_DOMAINS ( \ 715 HSW_ALWAYS_ON_POWER_DOMAINS | \ 716 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER)) 717 #define BDW_DISPLAY_POWER_DOMAINS ( \ 718 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \ 719 BIT(POWER_DOMAIN_INIT)) 720 721 #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT) 722 #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK 723 724 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 725 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 726 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 727 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 728 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 729 BIT(POWER_DOMAIN_PORT_CRT) | \ 730 BIT(POWER_DOMAIN_INIT)) 731 732 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 733 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 734 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 735 BIT(POWER_DOMAIN_INIT)) 736 737 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 738 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 739 BIT(POWER_DOMAIN_INIT)) 740 741 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 742 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 743 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 744 BIT(POWER_DOMAIN_INIT)) 745 746 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 747 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 748 BIT(POWER_DOMAIN_INIT)) 749 750 #define CHV_PIPE_A_POWER_DOMAINS ( \ 751 BIT(POWER_DOMAIN_PIPE_A) | \ 752 BIT(POWER_DOMAIN_INIT)) 753 754 #define CHV_PIPE_B_POWER_DOMAINS ( \ 755 BIT(POWER_DOMAIN_PIPE_B) | \ 756 BIT(POWER_DOMAIN_INIT)) 757 758 #define CHV_PIPE_C_POWER_DOMAINS ( \ 759 BIT(POWER_DOMAIN_PIPE_C) | \ 760 BIT(POWER_DOMAIN_INIT)) 761 762 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 763 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 764 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 765 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 766 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 767 BIT(POWER_DOMAIN_INIT)) 768 769 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 770 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 771 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 772 BIT(POWER_DOMAIN_INIT)) 773 774 #define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \ 775 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 776 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 777 BIT(POWER_DOMAIN_INIT)) 778 779 #define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \ 780 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 781 BIT(POWER_DOMAIN_INIT)) 782 783 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 784 .sync_hw = i9xx_always_on_power_well_noop, 785 .enable = i9xx_always_on_power_well_noop, 786 .disable = i9xx_always_on_power_well_noop, 787 .is_enabled = i9xx_always_on_power_well_enabled, 788 }; 789 790 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 791 .sync_hw = chv_pipe_power_well_sync_hw, 792 .enable = chv_pipe_power_well_enable, 793 .disable = chv_pipe_power_well_disable, 794 .is_enabled = chv_pipe_power_well_enabled, 795 }; 796 797 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 798 .sync_hw = vlv_power_well_sync_hw, 799 .enable = chv_dpio_cmn_power_well_enable, 800 .disable = chv_dpio_cmn_power_well_disable, 801 .is_enabled = vlv_power_well_enabled, 802 }; 803 804 static struct i915_power_well i9xx_always_on_power_well[] = { 805 { 806 .name = "always-on", 807 .always_on = 1, 808 .domains = POWER_DOMAIN_MASK, 809 .ops = &i9xx_always_on_power_well_ops, 810 }, 811 }; 812 813 static const struct i915_power_well_ops hsw_power_well_ops = { 814 .sync_hw = hsw_power_well_sync_hw, 815 .enable = hsw_power_well_enable, 816 .disable = hsw_power_well_disable, 817 .is_enabled = hsw_power_well_enabled, 818 }; 819 820 static struct i915_power_well hsw_power_wells[] = { 821 { 822 .name = "always-on", 823 .always_on = 1, 824 .domains = HSW_ALWAYS_ON_POWER_DOMAINS, 825 .ops = &i9xx_always_on_power_well_ops, 826 }, 827 { 828 .name = "display", 829 .domains = HSW_DISPLAY_POWER_DOMAINS, 830 .ops = &hsw_power_well_ops, 831 }, 832 }; 833 834 static struct i915_power_well bdw_power_wells[] = { 835 { 836 .name = "always-on", 837 .always_on = 1, 838 .domains = BDW_ALWAYS_ON_POWER_DOMAINS, 839 .ops = &i9xx_always_on_power_well_ops, 840 }, 841 { 842 .name = "display", 843 .domains = BDW_DISPLAY_POWER_DOMAINS, 844 .ops = &hsw_power_well_ops, 845 }, 846 }; 847 848 static const struct i915_power_well_ops vlv_display_power_well_ops = { 849 .sync_hw = vlv_power_well_sync_hw, 850 .enable = vlv_display_power_well_enable, 851 .disable = vlv_display_power_well_disable, 852 .is_enabled = vlv_power_well_enabled, 853 }; 854 855 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 856 .sync_hw = vlv_power_well_sync_hw, 857 .enable = vlv_dpio_cmn_power_well_enable, 858 .disable = vlv_dpio_cmn_power_well_disable, 859 .is_enabled = vlv_power_well_enabled, 860 }; 861 862 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 863 .sync_hw = vlv_power_well_sync_hw, 864 .enable = vlv_power_well_enable, 865 .disable = vlv_power_well_disable, 866 .is_enabled = vlv_power_well_enabled, 867 }; 868 869 static struct i915_power_well vlv_power_wells[] = { 870 { 871 .name = "always-on", 872 .always_on = 1, 873 .domains = VLV_ALWAYS_ON_POWER_DOMAINS, 874 .ops = &i9xx_always_on_power_well_ops, 875 }, 876 { 877 .name = "display", 878 .domains = VLV_DISPLAY_POWER_DOMAINS, 879 .data = PUNIT_POWER_WELL_DISP2D, 880 .ops = &vlv_display_power_well_ops, 881 }, 882 { 883 .name = "dpio-tx-b-01", 884 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 885 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 886 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 887 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 888 .ops = &vlv_dpio_power_well_ops, 889 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, 890 }, 891 { 892 .name = "dpio-tx-b-23", 893 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 894 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 895 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 896 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 897 .ops = &vlv_dpio_power_well_ops, 898 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, 899 }, 900 { 901 .name = "dpio-tx-c-01", 902 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 903 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 904 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 905 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 906 .ops = &vlv_dpio_power_well_ops, 907 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, 908 }, 909 { 910 .name = "dpio-tx-c-23", 911 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 912 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 913 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 914 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 915 .ops = &vlv_dpio_power_well_ops, 916 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, 917 }, 918 { 919 .name = "dpio-common", 920 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 921 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 922 .ops = &vlv_dpio_cmn_power_well_ops, 923 }, 924 }; 925 926 static struct i915_power_well chv_power_wells[] = { 927 { 928 .name = "always-on", 929 .always_on = 1, 930 .domains = VLV_ALWAYS_ON_POWER_DOMAINS, 931 .ops = &i9xx_always_on_power_well_ops, 932 }, 933 #if 0 934 { 935 .name = "display", 936 .domains = VLV_DISPLAY_POWER_DOMAINS, 937 .data = PUNIT_POWER_WELL_DISP2D, 938 .ops = &vlv_display_power_well_ops, 939 }, 940 #endif 941 { 942 .name = "pipe-a", 943 /* 944 * FIXME: pipe A power well seems to be the new disp2d well. 945 * At least all registers seem to be housed there. Figure 946 * out if this a a temporary situation in pre-production 947 * hardware or a permanent state of affairs. 948 */ 949 .domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS, 950 .data = PIPE_A, 951 .ops = &chv_pipe_power_well_ops, 952 }, 953 #if 0 954 { 955 .name = "pipe-b", 956 .domains = CHV_PIPE_B_POWER_DOMAINS, 957 .data = PIPE_B, 958 .ops = &chv_pipe_power_well_ops, 959 }, 960 { 961 .name = "pipe-c", 962 .domains = CHV_PIPE_C_POWER_DOMAINS, 963 .data = PIPE_C, 964 .ops = &chv_pipe_power_well_ops, 965 }, 966 #endif 967 { 968 .name = "dpio-common-bc", 969 /* 970 * XXX: cmnreset for one PHY seems to disturb the other. 971 * As a workaround keep both powered on at the same 972 * time for now. 973 */ 974 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS, 975 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 976 .ops = &chv_dpio_cmn_power_well_ops, 977 }, 978 { 979 .name = "dpio-common-d", 980 /* 981 * XXX: cmnreset for one PHY seems to disturb the other. 982 * As a workaround keep both powered on at the same 983 * time for now. 984 */ 985 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS, 986 .data = PUNIT_POWER_WELL_DPIO_CMN_D, 987 .ops = &chv_dpio_cmn_power_well_ops, 988 }, 989 #if 0 990 { 991 .name = "dpio-tx-b-01", 992 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 993 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS, 994 .ops = &vlv_dpio_power_well_ops, 995 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, 996 }, 997 { 998 .name = "dpio-tx-b-23", 999 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1000 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS, 1001 .ops = &vlv_dpio_power_well_ops, 1002 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, 1003 }, 1004 { 1005 .name = "dpio-tx-c-01", 1006 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1007 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1008 .ops = &vlv_dpio_power_well_ops, 1009 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, 1010 }, 1011 { 1012 .name = "dpio-tx-c-23", 1013 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1014 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1015 .ops = &vlv_dpio_power_well_ops, 1016 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, 1017 }, 1018 { 1019 .name = "dpio-tx-d-01", 1020 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS | 1021 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS, 1022 .ops = &vlv_dpio_power_well_ops, 1023 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01, 1024 }, 1025 { 1026 .name = "dpio-tx-d-23", 1027 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS | 1028 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS, 1029 .ops = &vlv_dpio_power_well_ops, 1030 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23, 1031 }, 1032 #endif 1033 }; 1034 1035 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, 1036 enum punit_power_well power_well_id) 1037 { 1038 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1039 struct i915_power_well *power_well; 1040 int i; 1041 1042 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 1043 if (power_well->data == power_well_id) 1044 return power_well; 1045 } 1046 1047 return NULL; 1048 } 1049 1050 #define set_power_wells(power_domains, __power_wells) ({ \ 1051 (power_domains)->power_wells = (__power_wells); \ 1052 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ 1053 }) 1054 1055 /** 1056 * intel_power_domains_init - initializes the power domain structures 1057 * @dev_priv: i915 device instance 1058 * 1059 * Initializes the power domain structures for @dev_priv depending upon the 1060 * supported platform. 1061 */ 1062 int intel_power_domains_init(struct drm_i915_private *dev_priv) 1063 { 1064 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1065 1066 mutex_init(&power_domains->lock); 1067 1068 /* 1069 * The enabling order will be from lower to higher indexed wells, 1070 * the disabling order is reversed. 1071 */ 1072 if (IS_HASWELL(dev_priv->dev)) { 1073 set_power_wells(power_domains, hsw_power_wells); 1074 hsw_pwr = power_domains; 1075 } else if (IS_BROADWELL(dev_priv->dev)) { 1076 set_power_wells(power_domains, bdw_power_wells); 1077 hsw_pwr = power_domains; 1078 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1079 set_power_wells(power_domains, chv_power_wells); 1080 } else if (IS_VALLEYVIEW(dev_priv->dev)) { 1081 set_power_wells(power_domains, vlv_power_wells); 1082 } else { 1083 set_power_wells(power_domains, i9xx_always_on_power_well); 1084 } 1085 1086 return 0; 1087 } 1088 1089 static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv) 1090 { 1091 struct drm_device *dev = dev_priv->dev; 1092 struct device *device = &dev->pdev->dev; 1093 1094 if (!HAS_RUNTIME_PM(dev)) 1095 return; 1096 1097 if (!intel_enable_rc6(dev)) 1098 return; 1099 1100 /* Make sure we're not suspended first. */ 1101 pm_runtime_get_sync(device); 1102 pm_runtime_disable(device); 1103 } 1104 1105 /** 1106 * intel_power_domains_fini - finalizes the power domain structures 1107 * @dev_priv: i915 device instance 1108 * 1109 * Finalizes the power domain structures for @dev_priv depending upon the 1110 * supported platform. This function also disables runtime pm and ensures that 1111 * the device stays powered up so that the driver can be reloaded. 1112 */ 1113 void intel_power_domains_fini(struct drm_i915_private *dev_priv) 1114 { 1115 intel_runtime_pm_disable(dev_priv); 1116 1117 /* The i915.ko module is still not prepared to be loaded when 1118 * the power well is not enabled, so just enable it in case 1119 * we're going to unload/reload. */ 1120 intel_display_set_init_power(dev_priv, true); 1121 1122 hsw_pwr = NULL; 1123 } 1124 1125 static void intel_power_domains_resume(struct drm_i915_private *dev_priv) 1126 { 1127 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1128 struct i915_power_well *power_well; 1129 int i; 1130 1131 mutex_lock(&power_domains->lock); 1132 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 1133 power_well->ops->sync_hw(dev_priv, power_well); 1134 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, 1135 power_well); 1136 } 1137 mutex_unlock(&power_domains->lock); 1138 } 1139 1140 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 1141 { 1142 struct i915_power_well *cmn = 1143 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 1144 struct i915_power_well *disp2d = 1145 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); 1146 1147 /* If the display might be already active skip this */ 1148 if (cmn->ops->is_enabled(dev_priv, cmn) && 1149 disp2d->ops->is_enabled(dev_priv, disp2d) && 1150 I915_READ(DPIO_CTL) & DPIO_CMNRST) 1151 return; 1152 1153 DRM_DEBUG_KMS("toggling display PHY side reset\n"); 1154 1155 /* cmnlane needs DPLL registers */ 1156 disp2d->ops->enable(dev_priv, disp2d); 1157 1158 /* 1159 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 1160 * Need to assert and de-assert PHY SB reset by gating the 1161 * common lane power, then un-gating it. 1162 * Simply ungating isn't enough to reset the PHY enough to get 1163 * ports and lanes running. 1164 */ 1165 cmn->ops->disable(dev_priv, cmn); 1166 } 1167 1168 /** 1169 * intel_power_domains_init_hw - initialize hardware power domain state 1170 * @dev_priv: i915 device instance 1171 * 1172 * This function initializes the hardware power domain state and enables all 1173 * power domains using intel_display_set_init_power(). 1174 */ 1175 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) 1176 { 1177 struct drm_device *dev = dev_priv->dev; 1178 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1179 1180 power_domains->initializing = true; 1181 1182 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 1183 mutex_lock(&power_domains->lock); 1184 vlv_cmnlane_wa(dev_priv); 1185 mutex_unlock(&power_domains->lock); 1186 } 1187 1188 /* For now, we need the power well to be always enabled. */ 1189 intel_display_set_init_power(dev_priv, true); 1190 intel_power_domains_resume(dev_priv); 1191 power_domains->initializing = false; 1192 } 1193 1194 /** 1195 * intel_aux_display_runtime_get - grab an auxilliary power domain reference 1196 * @dev_priv: i915 device instance 1197 * 1198 * This function grabs a power domain reference for the auxiliary power domain 1199 * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its 1200 * parents are powered up. Therefore users should only grab a reference to the 1201 * innermost power domain they need. 1202 * 1203 * Any power domain reference obtained by this function must have a symmetric 1204 * call to intel_aux_display_runtime_put() to release the reference again. 1205 */ 1206 void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) 1207 { 1208 intel_runtime_pm_get(dev_priv); 1209 } 1210 1211 /** 1212 * intel_aux_display_runtime_put - release an auxilliary power domain reference 1213 * @dev_priv: i915 device instance 1214 * 1215 * This function drops the auxilliary power domain reference obtained by 1216 * intel_aux_display_runtime_get() and might power down the corresponding 1217 * hardware block right away if this is the last reference. 1218 */ 1219 void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv) 1220 { 1221 intel_runtime_pm_put(dev_priv); 1222 } 1223 1224 /** 1225 * intel_runtime_pm_get - grab a runtime pm reference 1226 * @dev_priv: i915 device instance 1227 * 1228 * This function grabs a device-level runtime pm reference (mostly used for GEM 1229 * code to ensure the GTT or GT is on) and ensures that it is powered up. 1230 * 1231 * Any runtime pm reference obtained by this function must have a symmetric 1232 * call to intel_runtime_pm_put() to release the reference again. 1233 */ 1234 void intel_runtime_pm_get(struct drm_i915_private *dev_priv) 1235 { 1236 struct drm_device *dev = dev_priv->dev; 1237 struct device *device = &dev->pdev->dev; 1238 1239 if (!HAS_RUNTIME_PM(dev)) 1240 return; 1241 1242 pm_runtime_get_sync(device); 1243 WARN(dev_priv->pm.suspended, "Device still suspended.\n"); 1244 } 1245 1246 /** 1247 * intel_runtime_pm_get_noresume - grab a runtime pm reference 1248 * @dev_priv: i915 device instance 1249 * 1250 * This function grabs a device-level runtime pm reference (mostly used for GEM 1251 * code to ensure the GTT or GT is on). 1252 * 1253 * It will _not_ power up the device but instead only check that it's powered 1254 * on. Therefore it is only valid to call this functions from contexts where 1255 * the device is known to be powered up and where trying to power it up would 1256 * result in hilarity and deadlocks. That pretty much means only the system 1257 * suspend/resume code where this is used to grab runtime pm references for 1258 * delayed setup down in work items. 1259 * 1260 * Any runtime pm reference obtained by this function must have a symmetric 1261 * call to intel_runtime_pm_put() to release the reference again. 1262 */ 1263 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) 1264 { 1265 struct drm_device *dev = dev_priv->dev; 1266 struct device *device = &dev->pdev->dev; 1267 1268 if (!HAS_RUNTIME_PM(dev)) 1269 return; 1270 1271 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n"); 1272 pm_runtime_get_noresume(device); 1273 } 1274 1275 /** 1276 * intel_runtime_pm_put - release a runtime pm reference 1277 * @dev_priv: i915 device instance 1278 * 1279 * This function drops the device-level runtime pm reference obtained by 1280 * intel_runtime_pm_get() and might power down the corresponding 1281 * hardware block right away if this is the last reference. 1282 */ 1283 void intel_runtime_pm_put(struct drm_i915_private *dev_priv) 1284 { 1285 struct drm_device *dev = dev_priv->dev; 1286 struct device *device = &dev->pdev->dev; 1287 1288 if (!HAS_RUNTIME_PM(dev)) 1289 return; 1290 1291 pm_runtime_mark_last_busy(device); 1292 pm_runtime_put_autosuspend(device); 1293 } 1294 1295 /** 1296 * intel_runtime_pm_enable - enable runtime pm 1297 * @dev_priv: i915 device instance 1298 * 1299 * This function enables runtime pm at the end of the driver load sequence. 1300 * 1301 * Note that this function does currently not enable runtime pm for the 1302 * subordinate display power domains. That is only done on the first modeset 1303 * using intel_display_set_init_power(). 1304 */ 1305 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) 1306 { 1307 struct drm_device *dev = dev_priv->dev; 1308 struct device *device = &dev->pdev->dev; 1309 1310 if (!HAS_RUNTIME_PM(dev)) 1311 return; 1312 1313 pm_runtime_set_active(device); 1314 1315 /* 1316 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a 1317 * requirement. 1318 */ 1319 if (!intel_enable_rc6(dev)) { 1320 DRM_INFO("RC6 disabled, disabling runtime PM support\n"); 1321 return; 1322 } 1323 1324 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ 1325 pm_runtime_mark_last_busy(device); 1326 pm_runtime_use_autosuspend(device); 1327 1328 pm_runtime_put_autosuspend(device); 1329 } 1330 1331 /* Display audio driver power well request */ 1332 int i915_request_power_well(void) 1333 { 1334 struct drm_i915_private *dev_priv; 1335 1336 if (!hsw_pwr) 1337 return -ENODEV; 1338 1339 dev_priv = container_of(hsw_pwr, struct drm_i915_private, 1340 power_domains); 1341 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); 1342 return 0; 1343 } 1344 EXPORT_SYMBOL_GPL(i915_request_power_well); 1345 1346 /* Display audio driver power well release */ 1347 int i915_release_power_well(void) 1348 { 1349 struct drm_i915_private *dev_priv; 1350 1351 if (!hsw_pwr) 1352 return -ENODEV; 1353 1354 dev_priv = container_of(hsw_pwr, struct drm_i915_private, 1355 power_domains); 1356 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); 1357 return 0; 1358 } 1359 EXPORT_SYMBOL_GPL(i915_release_power_well); 1360 1361 /* 1362 * Private interface for the audio driver to get CDCLK in kHz. 1363 * 1364 * Caller must request power well using i915_request_power_well() prior to 1365 * making the call. 1366 */ 1367 int i915_get_cdclk_freq(void) 1368 { 1369 struct drm_i915_private *dev_priv; 1370 1371 if (!hsw_pwr) 1372 return -ENODEV; 1373 1374 dev_priv = container_of(hsw_pwr, struct drm_i915_private, 1375 power_domains); 1376 1377 return intel_ddi_get_cdclk_freq(dev_priv); 1378 } 1379 EXPORT_SYMBOL_GPL(i915_get_cdclk_freq); 1380