1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 8 #include "i915_drv.h" 9 #include "i915_irq.h" 10 #include "i915_reg.h" 11 #include "intel_backlight_regs.h" 12 #include "intel_cdclk.h" 13 #include "intel_combo_phy.h" 14 #include "intel_de.h" 15 #include "intel_display_power.h" 16 #include "intel_display_power_map.h" 17 #include "intel_display_power_well.h" 18 #include "intel_display_types.h" 19 #include "intel_dmc.h" 20 #include "intel_mchbar_regs.h" 21 #include "intel_pch_refclk.h" 22 #include "intel_pcode.h" 23 #include "intel_pps_regs.h" 24 #include "intel_snps_phy.h" 25 #include "skl_watermark.h" 26 #include "skl_watermark_regs.h" 27 #include "vlv_sideband.h" 28 29 #define for_each_power_domain_well(__dev_priv, __power_well, __domain) \ 30 for_each_power_well(__dev_priv, __power_well) \ 31 for_each_if(test_bit((__domain), (__power_well)->domains.bits)) 32 33 #define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \ 34 for_each_power_well_reverse(__dev_priv, __power_well) \ 35 for_each_if(test_bit((__domain), (__power_well)->domains.bits)) 36 37 const char * 38 intel_display_power_domain_str(enum intel_display_power_domain domain) 39 { 40 switch (domain) { 41 case POWER_DOMAIN_DISPLAY_CORE: 42 return "DISPLAY_CORE"; 43 case POWER_DOMAIN_PIPE_A: 44 return "PIPE_A"; 45 case POWER_DOMAIN_PIPE_B: 46 return "PIPE_B"; 47 case POWER_DOMAIN_PIPE_C: 48 return "PIPE_C"; 49 case POWER_DOMAIN_PIPE_D: 50 return "PIPE_D"; 51 case POWER_DOMAIN_PIPE_PANEL_FITTER_A: 52 return "PIPE_PANEL_FITTER_A"; 53 case POWER_DOMAIN_PIPE_PANEL_FITTER_B: 54 return "PIPE_PANEL_FITTER_B"; 55 case POWER_DOMAIN_PIPE_PANEL_FITTER_C: 56 return "PIPE_PANEL_FITTER_C"; 57 case POWER_DOMAIN_PIPE_PANEL_FITTER_D: 58 return "PIPE_PANEL_FITTER_D"; 59 case POWER_DOMAIN_TRANSCODER_A: 60 return "TRANSCODER_A"; 61 case POWER_DOMAIN_TRANSCODER_B: 62 return "TRANSCODER_B"; 63 case POWER_DOMAIN_TRANSCODER_C: 64 return "TRANSCODER_C"; 65 case POWER_DOMAIN_TRANSCODER_D: 66 return "TRANSCODER_D"; 67 case POWER_DOMAIN_TRANSCODER_EDP: 68 return "TRANSCODER_EDP"; 69 case POWER_DOMAIN_TRANSCODER_DSI_A: 70 return "TRANSCODER_DSI_A"; 71 case POWER_DOMAIN_TRANSCODER_DSI_C: 72 return "TRANSCODER_DSI_C"; 73 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 74 return "TRANSCODER_VDSC_PW2"; 75 case POWER_DOMAIN_PORT_DDI_LANES_A: 76 return "PORT_DDI_LANES_A"; 77 case POWER_DOMAIN_PORT_DDI_LANES_B: 78 return "PORT_DDI_LANES_B"; 79 case POWER_DOMAIN_PORT_DDI_LANES_C: 80 return "PORT_DDI_LANES_C"; 81 case POWER_DOMAIN_PORT_DDI_LANES_D: 82 return "PORT_DDI_LANES_D"; 83 case POWER_DOMAIN_PORT_DDI_LANES_E: 84 return "PORT_DDI_LANES_E"; 85 case POWER_DOMAIN_PORT_DDI_LANES_F: 86 return "PORT_DDI_LANES_F"; 87 case POWER_DOMAIN_PORT_DDI_LANES_TC1: 88 return "PORT_DDI_LANES_TC1"; 89 case POWER_DOMAIN_PORT_DDI_LANES_TC2: 90 return "PORT_DDI_LANES_TC2"; 91 case POWER_DOMAIN_PORT_DDI_LANES_TC3: 92 return "PORT_DDI_LANES_TC3"; 93 case POWER_DOMAIN_PORT_DDI_LANES_TC4: 94 return "PORT_DDI_LANES_TC4"; 95 case POWER_DOMAIN_PORT_DDI_LANES_TC5: 96 return "PORT_DDI_LANES_TC5"; 97 case POWER_DOMAIN_PORT_DDI_LANES_TC6: 98 return "PORT_DDI_LANES_TC6"; 99 case POWER_DOMAIN_PORT_DDI_IO_A: 100 return "PORT_DDI_IO_A"; 101 case POWER_DOMAIN_PORT_DDI_IO_B: 102 return "PORT_DDI_IO_B"; 103 case POWER_DOMAIN_PORT_DDI_IO_C: 104 return "PORT_DDI_IO_C"; 105 case POWER_DOMAIN_PORT_DDI_IO_D: 106 return "PORT_DDI_IO_D"; 107 case POWER_DOMAIN_PORT_DDI_IO_E: 108 return "PORT_DDI_IO_E"; 109 case POWER_DOMAIN_PORT_DDI_IO_F: 110 return "PORT_DDI_IO_F"; 111 case POWER_DOMAIN_PORT_DDI_IO_TC1: 112 return "PORT_DDI_IO_TC1"; 113 case POWER_DOMAIN_PORT_DDI_IO_TC2: 114 return "PORT_DDI_IO_TC2"; 115 case POWER_DOMAIN_PORT_DDI_IO_TC3: 116 return "PORT_DDI_IO_TC3"; 117 case POWER_DOMAIN_PORT_DDI_IO_TC4: 118 return "PORT_DDI_IO_TC4"; 119 case POWER_DOMAIN_PORT_DDI_IO_TC5: 120 return "PORT_DDI_IO_TC5"; 121 case POWER_DOMAIN_PORT_DDI_IO_TC6: 122 return "PORT_DDI_IO_TC6"; 123 case POWER_DOMAIN_PORT_DSI: 124 return "PORT_DSI"; 125 case POWER_DOMAIN_PORT_CRT: 126 return "PORT_CRT"; 127 case POWER_DOMAIN_PORT_OTHER: 128 return "PORT_OTHER"; 129 case POWER_DOMAIN_VGA: 130 return "VGA"; 131 case POWER_DOMAIN_AUDIO_MMIO: 132 return "AUDIO_MMIO"; 133 case POWER_DOMAIN_AUDIO_PLAYBACK: 134 return "AUDIO_PLAYBACK"; 135 case POWER_DOMAIN_AUX_IO_A: 136 return "AUX_IO_A"; 137 case POWER_DOMAIN_AUX_IO_B: 138 return "AUX_IO_B"; 139 case POWER_DOMAIN_AUX_IO_C: 140 return "AUX_IO_C"; 141 case POWER_DOMAIN_AUX_IO_D: 142 return "AUX_IO_D"; 143 case POWER_DOMAIN_AUX_IO_E: 144 return "AUX_IO_E"; 145 case POWER_DOMAIN_AUX_IO_F: 146 return "AUX_IO_F"; 147 case POWER_DOMAIN_AUX_A: 148 return "AUX_A"; 149 case POWER_DOMAIN_AUX_B: 150 return "AUX_B"; 151 case POWER_DOMAIN_AUX_C: 152 return "AUX_C"; 153 case POWER_DOMAIN_AUX_D: 154 return "AUX_D"; 155 case POWER_DOMAIN_AUX_E: 156 return "AUX_E"; 157 case POWER_DOMAIN_AUX_F: 158 return "AUX_F"; 159 case POWER_DOMAIN_AUX_USBC1: 160 return "AUX_USBC1"; 161 case POWER_DOMAIN_AUX_USBC2: 162 return "AUX_USBC2"; 163 case POWER_DOMAIN_AUX_USBC3: 164 return "AUX_USBC3"; 165 case POWER_DOMAIN_AUX_USBC4: 166 return "AUX_USBC4"; 167 case POWER_DOMAIN_AUX_USBC5: 168 return "AUX_USBC5"; 169 case POWER_DOMAIN_AUX_USBC6: 170 return "AUX_USBC6"; 171 case POWER_DOMAIN_AUX_TBT1: 172 return "AUX_TBT1"; 173 case POWER_DOMAIN_AUX_TBT2: 174 return "AUX_TBT2"; 175 case POWER_DOMAIN_AUX_TBT3: 176 return "AUX_TBT3"; 177 case POWER_DOMAIN_AUX_TBT4: 178 return "AUX_TBT4"; 179 case POWER_DOMAIN_AUX_TBT5: 180 return "AUX_TBT5"; 181 case POWER_DOMAIN_AUX_TBT6: 182 return "AUX_TBT6"; 183 case POWER_DOMAIN_GMBUS: 184 return "GMBUS"; 185 case POWER_DOMAIN_INIT: 186 return "INIT"; 187 case POWER_DOMAIN_MODESET: 188 return "MODESET"; 189 case POWER_DOMAIN_GT_IRQ: 190 return "GT_IRQ"; 191 case POWER_DOMAIN_DC_OFF: 192 return "DC_OFF"; 193 case POWER_DOMAIN_TC_COLD_OFF: 194 return "TC_COLD_OFF"; 195 default: 196 MISSING_CASE(domain); 197 return "?"; 198 } 199 } 200 201 /** 202 * __intel_display_power_is_enabled - unlocked check for a power domain 203 * @dev_priv: i915 device instance 204 * @domain: power domain to check 205 * 206 * This is the unlocked version of intel_display_power_is_enabled() and should 207 * only be used from error capture and recovery code where deadlocks are 208 * possible. 209 * 210 * Returns: 211 * True when the power domain is enabled, false otherwise. 212 */ 213 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 214 enum intel_display_power_domain domain) 215 { 216 struct i915_power_well *power_well; 217 bool is_enabled; 218 219 if (dev_priv->runtime_pm.suspended) 220 return false; 221 222 is_enabled = true; 223 224 for_each_power_domain_well_reverse(dev_priv, power_well, domain) { 225 if (intel_power_well_is_always_on(power_well)) 226 continue; 227 228 if (!intel_power_well_is_enabled_cached(power_well)) { 229 is_enabled = false; 230 break; 231 } 232 } 233 234 return is_enabled; 235 } 236 237 /** 238 * intel_display_power_is_enabled - check for a power domain 239 * @dev_priv: i915 device instance 240 * @domain: power domain to check 241 * 242 * This function can be used to check the hw power domain state. It is mostly 243 * used in hardware state readout functions. Everywhere else code should rely 244 * upon explicit power domain reference counting to ensure that the hardware 245 * block is powered up before accessing it. 246 * 247 * Callers must hold the relevant modesetting locks to ensure that concurrent 248 * threads can't disable the power well while the caller tries to read a few 249 * registers. 250 * 251 * Returns: 252 * True when the power domain is enabled, false otherwise. 253 */ 254 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 255 enum intel_display_power_domain domain) 256 { 257 struct i915_power_domains *power_domains; 258 bool ret; 259 260 power_domains = &dev_priv->display.power.domains; 261 262 mutex_lock(&power_domains->lock); 263 ret = __intel_display_power_is_enabled(dev_priv, domain); 264 mutex_unlock(&power_domains->lock); 265 266 return ret; 267 } 268 269 static u32 270 sanitize_target_dc_state(struct drm_i915_private *i915, 271 u32 target_dc_state) 272 { 273 struct i915_power_domains *power_domains = &i915->display.power.domains; 274 static const u32 states[] = { 275 DC_STATE_EN_UPTO_DC6, 276 DC_STATE_EN_UPTO_DC5, 277 DC_STATE_EN_DC3CO, 278 DC_STATE_DISABLE, 279 }; 280 int i; 281 282 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 283 if (target_dc_state != states[i]) 284 continue; 285 286 if (power_domains->allowed_dc_mask & target_dc_state) 287 break; 288 289 target_dc_state = states[i + 1]; 290 } 291 292 return target_dc_state; 293 } 294 295 /** 296 * intel_display_power_set_target_dc_state - Set target dc state. 297 * @dev_priv: i915 device 298 * @state: state which needs to be set as target_dc_state. 299 * 300 * This function set the "DC off" power well target_dc_state, 301 * based upon this target_dc_stste, "DC off" power well will 302 * enable desired DC state. 303 */ 304 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, 305 u32 state) 306 { 307 struct i915_power_well *power_well; 308 bool dc_off_enabled; 309 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 310 311 mutex_lock(&power_domains->lock); 312 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); 313 314 if (drm_WARN_ON(&dev_priv->drm, !power_well)) 315 goto unlock; 316 317 state = sanitize_target_dc_state(dev_priv, state); 318 319 if (state == power_domains->target_dc_state) 320 goto unlock; 321 322 dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well); 323 /* 324 * If DC off power well is disabled, need to enable and disable the 325 * DC off power well to effect target DC state. 326 */ 327 if (!dc_off_enabled) 328 intel_power_well_enable(dev_priv, power_well); 329 330 power_domains->target_dc_state = state; 331 332 if (!dc_off_enabled) 333 intel_power_well_disable(dev_priv, power_well); 334 335 unlock: 336 mutex_unlock(&power_domains->lock); 337 } 338 339 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 340 341 static void __async_put_domains_mask(struct i915_power_domains *power_domains, 342 struct intel_power_domain_mask *mask) 343 { 344 bitmap_or(mask->bits, 345 power_domains->async_put_domains[0].bits, 346 power_domains->async_put_domains[1].bits, 347 POWER_DOMAIN_NUM); 348 } 349 350 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 351 352 static bool 353 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 354 { 355 struct drm_i915_private *i915 = container_of(power_domains, 356 struct drm_i915_private, 357 display.power.domains); 358 359 return !drm_WARN_ON(&i915->drm, 360 bitmap_intersects(power_domains->async_put_domains[0].bits, 361 power_domains->async_put_domains[1].bits, 362 POWER_DOMAIN_NUM)); 363 } 364 365 static bool 366 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 367 { 368 struct drm_i915_private *i915 = container_of(power_domains, 369 struct drm_i915_private, 370 display.power.domains); 371 struct intel_power_domain_mask async_put_mask; 372 enum intel_display_power_domain domain; 373 bool err = false; 374 375 err |= !assert_async_put_domain_masks_disjoint(power_domains); 376 __async_put_domains_mask(power_domains, &async_put_mask); 377 err |= drm_WARN_ON(&i915->drm, 378 !!power_domains->async_put_wakeref != 379 !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)); 380 381 for_each_power_domain(domain, &async_put_mask) 382 err |= drm_WARN_ON(&i915->drm, 383 power_domains->domain_use_count[domain] != 1); 384 385 return !err; 386 } 387 388 static void print_power_domains(struct i915_power_domains *power_domains, 389 const char *prefix, struct intel_power_domain_mask *mask) 390 { 391 struct drm_i915_private *i915 = container_of(power_domains, 392 struct drm_i915_private, 393 display.power.domains); 394 enum intel_display_power_domain domain; 395 396 drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM)); 397 for_each_power_domain(domain, mask) 398 drm_dbg(&i915->drm, "%s use_count %d\n", 399 intel_display_power_domain_str(domain), 400 power_domains->domain_use_count[domain]); 401 } 402 403 static void 404 print_async_put_domains_state(struct i915_power_domains *power_domains) 405 { 406 struct drm_i915_private *i915 = container_of(power_domains, 407 struct drm_i915_private, 408 display.power.domains); 409 410 drm_dbg(&i915->drm, "async_put_wakeref %u\n", 411 power_domains->async_put_wakeref); 412 413 print_power_domains(power_domains, "async_put_domains[0]", 414 &power_domains->async_put_domains[0]); 415 print_power_domains(power_domains, "async_put_domains[1]", 416 &power_domains->async_put_domains[1]); 417 } 418 419 static void 420 verify_async_put_domains_state(struct i915_power_domains *power_domains) 421 { 422 if (!__async_put_domains_state_ok(power_domains)) 423 print_async_put_domains_state(power_domains); 424 } 425 426 #else 427 428 static void 429 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 430 { 431 } 432 433 static void 434 verify_async_put_domains_state(struct i915_power_domains *power_domains) 435 { 436 } 437 438 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 439 440 static void async_put_domains_mask(struct i915_power_domains *power_domains, 441 struct intel_power_domain_mask *mask) 442 443 { 444 assert_async_put_domain_masks_disjoint(power_domains); 445 446 __async_put_domains_mask(power_domains, mask); 447 } 448 449 static void 450 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 451 enum intel_display_power_domain domain) 452 { 453 assert_async_put_domain_masks_disjoint(power_domains); 454 455 clear_bit(domain, power_domains->async_put_domains[0].bits); 456 clear_bit(domain, power_domains->async_put_domains[1].bits); 457 } 458 459 static bool 460 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 461 enum intel_display_power_domain domain) 462 { 463 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 464 struct intel_power_domain_mask async_put_mask; 465 bool ret = false; 466 467 async_put_domains_mask(power_domains, &async_put_mask); 468 if (!test_bit(domain, async_put_mask.bits)) 469 goto out_verify; 470 471 async_put_domains_clear_domain(power_domains, domain); 472 473 ret = true; 474 475 async_put_domains_mask(power_domains, &async_put_mask); 476 if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)) 477 goto out_verify; 478 479 cancel_delayed_work(&power_domains->async_put_work); 480 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 481 fetch_and_zero(&power_domains->async_put_wakeref)); 482 out_verify: 483 verify_async_put_domains_state(power_domains); 484 485 return ret; 486 } 487 488 static void 489 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 490 enum intel_display_power_domain domain) 491 { 492 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 493 struct i915_power_well *power_well; 494 495 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 496 return; 497 498 for_each_power_domain_well(dev_priv, power_well, domain) 499 intel_power_well_get(dev_priv, power_well); 500 501 power_domains->domain_use_count[domain]++; 502 } 503 504 /** 505 * intel_display_power_get - grab a power domain reference 506 * @dev_priv: i915 device instance 507 * @domain: power domain to reference 508 * 509 * This function grabs a power domain reference for @domain and ensures that the 510 * power domain and all its parents are powered up. Therefore users should only 511 * grab a reference to the innermost power domain they need. 512 * 513 * Any power domain reference obtained by this function must have a symmetric 514 * call to intel_display_power_put() to release the reference again. 515 */ 516 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 517 enum intel_display_power_domain domain) 518 { 519 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 520 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 521 522 mutex_lock(&power_domains->lock); 523 __intel_display_power_get_domain(dev_priv, domain); 524 mutex_unlock(&power_domains->lock); 525 526 return wakeref; 527 } 528 529 /** 530 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 531 * @dev_priv: i915 device instance 532 * @domain: power domain to reference 533 * 534 * This function grabs a power domain reference for @domain and ensures that the 535 * power domain and all its parents are powered up. Therefore users should only 536 * grab a reference to the innermost power domain they need. 537 * 538 * Any power domain reference obtained by this function must have a symmetric 539 * call to intel_display_power_put() to release the reference again. 540 */ 541 intel_wakeref_t 542 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 543 enum intel_display_power_domain domain) 544 { 545 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 546 intel_wakeref_t wakeref; 547 bool is_enabled; 548 549 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 550 if (!wakeref) 551 return false; 552 553 mutex_lock(&power_domains->lock); 554 555 if (__intel_display_power_is_enabled(dev_priv, domain)) { 556 __intel_display_power_get_domain(dev_priv, domain); 557 is_enabled = true; 558 } else { 559 is_enabled = false; 560 } 561 562 mutex_unlock(&power_domains->lock); 563 564 if (!is_enabled) { 565 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 566 wakeref = 0; 567 } 568 569 return wakeref; 570 } 571 572 static void 573 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 574 enum intel_display_power_domain domain) 575 { 576 struct i915_power_domains *power_domains; 577 struct i915_power_well *power_well; 578 const char *name = intel_display_power_domain_str(domain); 579 struct intel_power_domain_mask async_put_mask; 580 581 power_domains = &dev_priv->display.power.domains; 582 583 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], 584 "Use count on domain %s is already zero\n", 585 name); 586 async_put_domains_mask(power_domains, &async_put_mask); 587 drm_WARN(&dev_priv->drm, 588 test_bit(domain, async_put_mask.bits), 589 "Async disabling of domain %s is pending\n", 590 name); 591 592 power_domains->domain_use_count[domain]--; 593 594 for_each_power_domain_well_reverse(dev_priv, power_well, domain) 595 intel_power_well_put(dev_priv, power_well); 596 } 597 598 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 599 enum intel_display_power_domain domain) 600 { 601 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 602 603 mutex_lock(&power_domains->lock); 604 __intel_display_power_put_domain(dev_priv, domain); 605 mutex_unlock(&power_domains->lock); 606 } 607 608 static void 609 queue_async_put_domains_work(struct i915_power_domains *power_domains, 610 intel_wakeref_t wakeref) 611 { 612 struct drm_i915_private *i915 = container_of(power_domains, 613 struct drm_i915_private, 614 display.power.domains); 615 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 616 power_domains->async_put_wakeref = wakeref; 617 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, 618 &power_domains->async_put_work, 619 msecs_to_jiffies(100))); 620 } 621 622 static void 623 release_async_put_domains(struct i915_power_domains *power_domains, 624 struct intel_power_domain_mask *mask) 625 { 626 struct drm_i915_private *dev_priv = 627 container_of(power_domains, struct drm_i915_private, 628 display.power.domains); 629 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 630 enum intel_display_power_domain domain; 631 intel_wakeref_t wakeref; 632 633 /* 634 * The caller must hold already raw wakeref, upgrade that to a proper 635 * wakeref to make the state checker happy about the HW access during 636 * power well disabling. 637 */ 638 assert_rpm_raw_wakeref_held(rpm); 639 wakeref = intel_runtime_pm_get(rpm); 640 641 for_each_power_domain(domain, mask) { 642 /* Clear before put, so put's sanity check is happy. */ 643 async_put_domains_clear_domain(power_domains, domain); 644 __intel_display_power_put_domain(dev_priv, domain); 645 } 646 647 intel_runtime_pm_put(rpm, wakeref); 648 } 649 650 static void 651 intel_display_power_put_async_work(struct work_struct *work) 652 { 653 struct drm_i915_private *dev_priv = 654 container_of(work, struct drm_i915_private, 655 display.power.domains.async_put_work.work); 656 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 657 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 658 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 659 intel_wakeref_t old_work_wakeref = 0; 660 661 mutex_lock(&power_domains->lock); 662 663 /* 664 * Bail out if all the domain refs pending to be released were grabbed 665 * by subsequent gets or a flush_work. 666 */ 667 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 668 if (!old_work_wakeref) 669 goto out_verify; 670 671 release_async_put_domains(power_domains, 672 &power_domains->async_put_domains[0]); 673 674 /* Requeue the work if more domains were async put meanwhile. */ 675 if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) { 676 bitmap_copy(power_domains->async_put_domains[0].bits, 677 power_domains->async_put_domains[1].bits, 678 POWER_DOMAIN_NUM); 679 bitmap_zero(power_domains->async_put_domains[1].bits, 680 POWER_DOMAIN_NUM); 681 queue_async_put_domains_work(power_domains, 682 fetch_and_zero(&new_work_wakeref)); 683 } else { 684 /* 685 * Cancel the work that got queued after this one got dequeued, 686 * since here we released the corresponding async-put reference. 687 */ 688 cancel_delayed_work(&power_domains->async_put_work); 689 } 690 691 out_verify: 692 verify_async_put_domains_state(power_domains); 693 694 mutex_unlock(&power_domains->lock); 695 696 if (old_work_wakeref) 697 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 698 if (new_work_wakeref) 699 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 700 } 701 702 /** 703 * __intel_display_power_put_async - release a power domain reference asynchronously 704 * @i915: i915 device instance 705 * @domain: power domain to reference 706 * @wakeref: wakeref acquired for the reference that is being released 707 * 708 * This function drops the power domain reference obtained by 709 * intel_display_power_get*() and schedules a work to power down the 710 * corresponding hardware block if this is the last reference. 711 */ 712 void __intel_display_power_put_async(struct drm_i915_private *i915, 713 enum intel_display_power_domain domain, 714 intel_wakeref_t wakeref) 715 { 716 struct i915_power_domains *power_domains = &i915->display.power.domains; 717 struct intel_runtime_pm *rpm = &i915->runtime_pm; 718 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 719 720 mutex_lock(&power_domains->lock); 721 722 if (power_domains->domain_use_count[domain] > 1) { 723 __intel_display_power_put_domain(i915, domain); 724 725 goto out_verify; 726 } 727 728 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); 729 730 /* Let a pending work requeue itself or queue a new one. */ 731 if (power_domains->async_put_wakeref) { 732 set_bit(domain, power_domains->async_put_domains[1].bits); 733 } else { 734 set_bit(domain, power_domains->async_put_domains[0].bits); 735 queue_async_put_domains_work(power_domains, 736 fetch_and_zero(&work_wakeref)); 737 } 738 739 out_verify: 740 verify_async_put_domains_state(power_domains); 741 742 mutex_unlock(&power_domains->lock); 743 744 if (work_wakeref) 745 intel_runtime_pm_put_raw(rpm, work_wakeref); 746 747 intel_runtime_pm_put(rpm, wakeref); 748 } 749 750 /** 751 * intel_display_power_flush_work - flushes the async display power disabling work 752 * @i915: i915 device instance 753 * 754 * Flushes any pending work that was scheduled by a preceding 755 * intel_display_power_put_async() call, completing the disabling of the 756 * corresponding power domains. 757 * 758 * Note that the work handler function may still be running after this 759 * function returns; to ensure that the work handler isn't running use 760 * intel_display_power_flush_work_sync() instead. 761 */ 762 void intel_display_power_flush_work(struct drm_i915_private *i915) 763 { 764 struct i915_power_domains *power_domains = &i915->display.power.domains; 765 struct intel_power_domain_mask async_put_mask; 766 intel_wakeref_t work_wakeref; 767 768 mutex_lock(&power_domains->lock); 769 770 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 771 if (!work_wakeref) 772 goto out_verify; 773 774 async_put_domains_mask(power_domains, &async_put_mask); 775 release_async_put_domains(power_domains, &async_put_mask); 776 cancel_delayed_work(&power_domains->async_put_work); 777 778 out_verify: 779 verify_async_put_domains_state(power_domains); 780 781 mutex_unlock(&power_domains->lock); 782 783 if (work_wakeref) 784 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 785 } 786 787 /** 788 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 789 * @i915: i915 device instance 790 * 791 * Like intel_display_power_flush_work(), but also ensure that the work 792 * handler function is not running any more when this function returns. 793 */ 794 static void 795 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 796 { 797 struct i915_power_domains *power_domains = &i915->display.power.domains; 798 799 intel_display_power_flush_work(i915); 800 cancel_delayed_work_sync(&power_domains->async_put_work); 801 802 verify_async_put_domains_state(power_domains); 803 804 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 805 } 806 807 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 808 /** 809 * intel_display_power_put - release a power domain reference 810 * @dev_priv: i915 device instance 811 * @domain: power domain to reference 812 * @wakeref: wakeref acquired for the reference that is being released 813 * 814 * This function drops the power domain reference obtained by 815 * intel_display_power_get() and might power down the corresponding hardware 816 * block right away if this is the last reference. 817 */ 818 void intel_display_power_put(struct drm_i915_private *dev_priv, 819 enum intel_display_power_domain domain, 820 intel_wakeref_t wakeref) 821 { 822 __intel_display_power_put(dev_priv, domain); 823 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 824 } 825 #else 826 /** 827 * intel_display_power_put_unchecked - release an unchecked power domain reference 828 * @dev_priv: i915 device instance 829 * @domain: power domain to reference 830 * 831 * This function drops the power domain reference obtained by 832 * intel_display_power_get() and might power down the corresponding hardware 833 * block right away if this is the last reference. 834 * 835 * This function is only for the power domain code's internal use to suppress wakeref 836 * tracking when the correspondig debug kconfig option is disabled, should not 837 * be used otherwise. 838 */ 839 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 840 enum intel_display_power_domain domain) 841 { 842 __intel_display_power_put(dev_priv, domain); 843 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 844 } 845 #endif 846 847 void 848 intel_display_power_get_in_set(struct drm_i915_private *i915, 849 struct intel_display_power_domain_set *power_domain_set, 850 enum intel_display_power_domain domain) 851 { 852 intel_wakeref_t __maybe_unused wf; 853 854 drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); 855 856 wf = intel_display_power_get(i915, domain); 857 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 858 power_domain_set->wakerefs[domain] = wf; 859 #endif 860 set_bit(domain, power_domain_set->mask.bits); 861 } 862 863 bool 864 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, 865 struct intel_display_power_domain_set *power_domain_set, 866 enum intel_display_power_domain domain) 867 { 868 intel_wakeref_t wf; 869 870 drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); 871 872 wf = intel_display_power_get_if_enabled(i915, domain); 873 if (!wf) 874 return false; 875 876 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 877 power_domain_set->wakerefs[domain] = wf; 878 #endif 879 set_bit(domain, power_domain_set->mask.bits); 880 881 return true; 882 } 883 884 void 885 intel_display_power_put_mask_in_set(struct drm_i915_private *i915, 886 struct intel_display_power_domain_set *power_domain_set, 887 struct intel_power_domain_mask *mask) 888 { 889 enum intel_display_power_domain domain; 890 891 drm_WARN_ON(&i915->drm, 892 !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM)); 893 894 for_each_power_domain(domain, mask) { 895 intel_wakeref_t __maybe_unused wf = -1; 896 897 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 898 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); 899 #endif 900 intel_display_power_put(i915, domain, wf); 901 clear_bit(domain, power_domain_set->mask.bits); 902 } 903 } 904 905 static int 906 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 907 int disable_power_well) 908 { 909 if (disable_power_well >= 0) 910 return !!disable_power_well; 911 912 return 1; 913 } 914 915 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 916 int enable_dc) 917 { 918 u32 mask; 919 int requested_dc; 920 int max_dc; 921 922 if (!HAS_DISPLAY(dev_priv)) 923 return 0; 924 925 if (IS_DG2(dev_priv)) 926 max_dc = 1; 927 else if (IS_DG1(dev_priv)) 928 max_dc = 3; 929 else if (DISPLAY_VER(dev_priv) >= 12) 930 max_dc = 4; 931 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 932 max_dc = 1; 933 else if (DISPLAY_VER(dev_priv) >= 9) 934 max_dc = 2; 935 else 936 max_dc = 0; 937 938 /* 939 * DC9 has a separate HW flow from the rest of the DC states, 940 * not depending on the DMC firmware. It's needed by system 941 * suspend/resume, so allow it unconditionally. 942 */ 943 mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || 944 DISPLAY_VER(dev_priv) >= 11 ? 945 DC_STATE_EN_DC9 : 0; 946 947 if (!dev_priv->params.disable_power_well) 948 max_dc = 0; 949 950 if (enable_dc >= 0 && enable_dc <= max_dc) { 951 requested_dc = enable_dc; 952 } else if (enable_dc == -1) { 953 requested_dc = max_dc; 954 } else if (enable_dc > max_dc && enable_dc <= 4) { 955 drm_dbg_kms(&dev_priv->drm, 956 "Adjusting requested max DC state (%d->%d)\n", 957 enable_dc, max_dc); 958 requested_dc = max_dc; 959 } else { 960 drm_err(&dev_priv->drm, 961 "Unexpected value for enable_dc (%d)\n", enable_dc); 962 requested_dc = max_dc; 963 } 964 965 switch (requested_dc) { 966 case 4: 967 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 968 break; 969 case 3: 970 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 971 break; 972 case 2: 973 mask |= DC_STATE_EN_UPTO_DC6; 974 break; 975 case 1: 976 mask |= DC_STATE_EN_UPTO_DC5; 977 break; 978 } 979 980 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); 981 982 return mask; 983 } 984 985 /** 986 * intel_power_domains_init - initializes the power domain structures 987 * @dev_priv: i915 device instance 988 * 989 * Initializes the power domain structures for @dev_priv depending upon the 990 * supported platform. 991 */ 992 int intel_power_domains_init(struct drm_i915_private *dev_priv) 993 { 994 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 995 996 dev_priv->params.disable_power_well = 997 sanitize_disable_power_well_option(dev_priv, 998 dev_priv->params.disable_power_well); 999 power_domains->allowed_dc_mask = 1000 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); 1001 1002 power_domains->target_dc_state = 1003 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 1004 1005 mutex_init(&power_domains->lock); 1006 1007 INIT_DELAYED_WORK(&power_domains->async_put_work, 1008 intel_display_power_put_async_work); 1009 1010 return intel_display_power_map_init(power_domains); 1011 } 1012 1013 /** 1014 * intel_power_domains_cleanup - clean up power domains resources 1015 * @dev_priv: i915 device instance 1016 * 1017 * Release any resources acquired by intel_power_domains_init() 1018 */ 1019 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 1020 { 1021 intel_display_power_map_cleanup(&dev_priv->display.power.domains); 1022 } 1023 1024 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 1025 { 1026 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1027 struct i915_power_well *power_well; 1028 1029 mutex_lock(&power_domains->lock); 1030 for_each_power_well(dev_priv, power_well) 1031 intel_power_well_sync_hw(dev_priv, power_well); 1032 mutex_unlock(&power_domains->lock); 1033 } 1034 1035 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, 1036 enum dbuf_slice slice, bool enable) 1037 { 1038 i915_reg_t reg = DBUF_CTL_S(slice); 1039 bool state; 1040 1041 intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST, 1042 enable ? DBUF_POWER_REQUEST : 0); 1043 intel_de_posting_read(dev_priv, reg); 1044 udelay(10); 1045 1046 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; 1047 drm_WARN(&dev_priv->drm, enable != state, 1048 "DBuf slice %d power %s timeout!\n", 1049 slice, str_enable_disable(enable)); 1050 } 1051 1052 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, 1053 u8 req_slices) 1054 { 1055 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1056 u8 slice_mask = DISPLAY_INFO(dev_priv)->dbuf.slice_mask; 1057 enum dbuf_slice slice; 1058 1059 drm_WARN(&dev_priv->drm, req_slices & ~slice_mask, 1060 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n", 1061 req_slices, slice_mask); 1062 1063 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", 1064 req_slices); 1065 1066 /* 1067 * Might be running this in parallel to gen9_dc_off_power_well_enable 1068 * being called from intel_dp_detect for instance, 1069 * which causes assertion triggered by race condition, 1070 * as gen9_assert_dbuf_enabled might preempt this when registers 1071 * were already updated, while dev_priv was not. 1072 */ 1073 mutex_lock(&power_domains->lock); 1074 1075 for_each_dbuf_slice(dev_priv, slice) 1076 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); 1077 1078 dev_priv->display.dbuf.enabled_slices = req_slices; 1079 1080 mutex_unlock(&power_domains->lock); 1081 } 1082 1083 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 1084 { 1085 dev_priv->display.dbuf.enabled_slices = 1086 intel_enabled_dbuf_slices_mask(dev_priv); 1087 1088 /* 1089 * Just power up at least 1 slice, we will 1090 * figure out later which slices we have and what we need. 1091 */ 1092 gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | 1093 dev_priv->display.dbuf.enabled_slices); 1094 } 1095 1096 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 1097 { 1098 gen9_dbuf_slices_update(dev_priv, 0); 1099 } 1100 1101 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) 1102 { 1103 enum dbuf_slice slice; 1104 1105 if (IS_ALDERLAKE_P(dev_priv)) 1106 return; 1107 1108 for_each_dbuf_slice(dev_priv, slice) 1109 intel_de_rmw(dev_priv, DBUF_CTL_S(slice), 1110 DBUF_TRACKER_STATE_SERVICE_MASK, 1111 DBUF_TRACKER_STATE_SERVICE(8)); 1112 } 1113 1114 static void icl_mbus_init(struct drm_i915_private *dev_priv) 1115 { 1116 unsigned long abox_regs = DISPLAY_INFO(dev_priv)->abox_mask; 1117 u32 mask, val, i; 1118 1119 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) 1120 return; 1121 1122 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | 1123 MBUS_ABOX_BT_CREDIT_POOL2_MASK | 1124 MBUS_ABOX_B_CREDIT_MASK | 1125 MBUS_ABOX_BW_CREDIT_MASK; 1126 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 1127 MBUS_ABOX_BT_CREDIT_POOL2(16) | 1128 MBUS_ABOX_B_CREDIT(1) | 1129 MBUS_ABOX_BW_CREDIT(1); 1130 1131 /* 1132 * gen12 platforms that use abox1 and abox2 for pixel data reads still 1133 * expect us to program the abox_ctl0 register as well, even though 1134 * we don't have to program other instance-0 registers like BW_BUDDY. 1135 */ 1136 if (DISPLAY_VER(dev_priv) == 12) 1137 abox_regs |= BIT(0); 1138 1139 for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) 1140 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val); 1141 } 1142 1143 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 1144 { 1145 u32 val = intel_de_read(dev_priv, LCPLL_CTL); 1146 1147 /* 1148 * The LCPLL register should be turned on by the BIOS. For now 1149 * let's just check its state and print errors in case 1150 * something is wrong. Don't even try to turn it on. 1151 */ 1152 1153 if (val & LCPLL_CD_SOURCE_FCLK) 1154 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); 1155 1156 if (val & LCPLL_PLL_DISABLE) 1157 drm_err(&dev_priv->drm, "LCPLL is disabled\n"); 1158 1159 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 1160 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); 1161 } 1162 1163 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 1164 { 1165 struct intel_crtc *crtc; 1166 1167 for_each_intel_crtc(&dev_priv->drm, crtc) 1168 I915_STATE_WARN(dev_priv, crtc->active, 1169 "CRTC for pipe %c enabled\n", 1170 pipe_name(crtc->pipe)); 1171 1172 I915_STATE_WARN(dev_priv, intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), 1173 "Display power well on\n"); 1174 I915_STATE_WARN(dev_priv, 1175 intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, 1176 "SPLL enabled\n"); 1177 I915_STATE_WARN(dev_priv, 1178 intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 1179 "WRPLL1 enabled\n"); 1180 I915_STATE_WARN(dev_priv, 1181 intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 1182 "WRPLL2 enabled\n"); 1183 I915_STATE_WARN(dev_priv, 1184 intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, 1185 "Panel power on\n"); 1186 I915_STATE_WARN(dev_priv, 1187 intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 1188 "CPU PWM1 enabled\n"); 1189 if (IS_HASWELL(dev_priv)) 1190 I915_STATE_WARN(dev_priv, 1191 intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 1192 "CPU PWM2 enabled\n"); 1193 I915_STATE_WARN(dev_priv, 1194 intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 1195 "PCH PWM1 enabled\n"); 1196 I915_STATE_WARN(dev_priv, 1197 (intel_de_read(dev_priv, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), 1198 "Utility pin enabled in PWM mode\n"); 1199 I915_STATE_WARN(dev_priv, 1200 intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, 1201 "PCH GTC enabled\n"); 1202 1203 /* 1204 * In theory we can still leave IRQs enabled, as long as only the HPD 1205 * interrupts remain enabled. We used to check for that, but since it's 1206 * gen-specific and since we only disable LCPLL after we fully disable 1207 * the interrupts, the check below should be enough. 1208 */ 1209 I915_STATE_WARN(dev_priv, intel_irqs_enabled(dev_priv), 1210 "IRQs enabled\n"); 1211 } 1212 1213 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 1214 { 1215 if (IS_HASWELL(dev_priv)) 1216 return intel_de_read(dev_priv, D_COMP_HSW); 1217 else 1218 return intel_de_read(dev_priv, D_COMP_BDW); 1219 } 1220 1221 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 1222 { 1223 if (IS_HASWELL(dev_priv)) { 1224 if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val)) 1225 drm_dbg_kms(&dev_priv->drm, 1226 "Failed to write to D_COMP\n"); 1227 } else { 1228 intel_de_write(dev_priv, D_COMP_BDW, val); 1229 intel_de_posting_read(dev_priv, D_COMP_BDW); 1230 } 1231 } 1232 1233 /* 1234 * This function implements pieces of two sequences from BSpec: 1235 * - Sequence for display software to disable LCPLL 1236 * - Sequence for display software to allow package C8+ 1237 * The steps implemented here are just the steps that actually touch the LCPLL 1238 * register. Callers should take care of disabling all the display engine 1239 * functions, doing the mode unset, fixing interrupts, etc. 1240 */ 1241 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 1242 bool switch_to_fclk, bool allow_power_down) 1243 { 1244 u32 val; 1245 1246 assert_can_disable_lcpll(dev_priv); 1247 1248 val = intel_de_read(dev_priv, LCPLL_CTL); 1249 1250 if (switch_to_fclk) { 1251 val |= LCPLL_CD_SOURCE_FCLK; 1252 intel_de_write(dev_priv, LCPLL_CTL, val); 1253 1254 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & 1255 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 1256 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); 1257 1258 val = intel_de_read(dev_priv, LCPLL_CTL); 1259 } 1260 1261 val |= LCPLL_PLL_DISABLE; 1262 intel_de_write(dev_priv, LCPLL_CTL, val); 1263 intel_de_posting_read(dev_priv, LCPLL_CTL); 1264 1265 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 1266 drm_err(&dev_priv->drm, "LCPLL still locked\n"); 1267 1268 val = hsw_read_dcomp(dev_priv); 1269 val |= D_COMP_COMP_DISABLE; 1270 hsw_write_dcomp(dev_priv, val); 1271 ndelay(100); 1272 1273 if (wait_for((hsw_read_dcomp(dev_priv) & 1274 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 1275 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); 1276 1277 if (allow_power_down) { 1278 intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW); 1279 intel_de_posting_read(dev_priv, LCPLL_CTL); 1280 } 1281 } 1282 1283 /* 1284 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 1285 * source. 1286 */ 1287 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 1288 { 1289 u32 val; 1290 1291 val = intel_de_read(dev_priv, LCPLL_CTL); 1292 1293 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 1294 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 1295 return; 1296 1297 /* 1298 * Make sure we're not on PC8 state before disabling PC8, otherwise 1299 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 1300 */ 1301 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 1302 1303 if (val & LCPLL_POWER_DOWN_ALLOW) { 1304 val &= ~LCPLL_POWER_DOWN_ALLOW; 1305 intel_de_write(dev_priv, LCPLL_CTL, val); 1306 intel_de_posting_read(dev_priv, LCPLL_CTL); 1307 } 1308 1309 val = hsw_read_dcomp(dev_priv); 1310 val |= D_COMP_COMP_FORCE; 1311 val &= ~D_COMP_COMP_DISABLE; 1312 hsw_write_dcomp(dev_priv, val); 1313 1314 val = intel_de_read(dev_priv, LCPLL_CTL); 1315 val &= ~LCPLL_PLL_DISABLE; 1316 intel_de_write(dev_priv, LCPLL_CTL, val); 1317 1318 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 1319 drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); 1320 1321 if (val & LCPLL_CD_SOURCE_FCLK) { 1322 intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0); 1323 1324 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & 1325 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 1326 drm_err(&dev_priv->drm, 1327 "Switching back to LCPLL failed\n"); 1328 } 1329 1330 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 1331 1332 intel_update_cdclk(dev_priv); 1333 intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); 1334 } 1335 1336 /* 1337 * Package states C8 and deeper are really deep PC states that can only be 1338 * reached when all the devices on the system allow it, so even if the graphics 1339 * device allows PC8+, it doesn't mean the system will actually get to these 1340 * states. Our driver only allows PC8+ when going into runtime PM. 1341 * 1342 * The requirements for PC8+ are that all the outputs are disabled, the power 1343 * well is disabled and most interrupts are disabled, and these are also 1344 * requirements for runtime PM. When these conditions are met, we manually do 1345 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 1346 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 1347 * hang the machine. 1348 * 1349 * When we really reach PC8 or deeper states (not just when we allow it) we lose 1350 * the state of some registers, so when we come back from PC8+ we need to 1351 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 1352 * need to take care of the registers kept by RC6. Notice that this happens even 1353 * if we don't put the device in PCI D3 state (which is what currently happens 1354 * because of the runtime PM support). 1355 * 1356 * For more, read "Display Sequences for Package C8" on the hardware 1357 * documentation. 1358 */ 1359 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 1360 { 1361 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); 1362 1363 if (HAS_PCH_LPT_LP(dev_priv)) 1364 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 1365 PCH_LP_PARTITION_LEVEL_DISABLE, 0); 1366 1367 lpt_disable_clkout_dp(dev_priv); 1368 hsw_disable_lcpll(dev_priv, true, true); 1369 } 1370 1371 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 1372 { 1373 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); 1374 1375 hsw_restore_lcpll(dev_priv); 1376 intel_init_pch_refclk(dev_priv); 1377 1378 if (HAS_PCH_LPT_LP(dev_priv)) 1379 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 1380 0, PCH_LP_PARTITION_LEVEL_DISABLE); 1381 } 1382 1383 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 1384 bool enable) 1385 { 1386 i915_reg_t reg; 1387 u32 reset_bits; 1388 1389 if (IS_IVYBRIDGE(dev_priv)) { 1390 reg = GEN7_MSG_CTL; 1391 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 1392 } else { 1393 reg = HSW_NDE_RSTWRN_OPT; 1394 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 1395 } 1396 1397 if (DISPLAY_VER(dev_priv) >= 14) 1398 reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN; 1399 1400 intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0); 1401 } 1402 1403 static void skl_display_core_init(struct drm_i915_private *dev_priv, 1404 bool resume) 1405 { 1406 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1407 struct i915_power_well *well; 1408 1409 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1410 1411 /* enable PCH reset handshake */ 1412 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 1413 1414 if (!HAS_DISPLAY(dev_priv)) 1415 return; 1416 1417 /* enable PG1 and Misc I/O */ 1418 mutex_lock(&power_domains->lock); 1419 1420 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1421 intel_power_well_enable(dev_priv, well); 1422 1423 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 1424 intel_power_well_enable(dev_priv, well); 1425 1426 mutex_unlock(&power_domains->lock); 1427 1428 intel_cdclk_init_hw(dev_priv); 1429 1430 gen9_dbuf_enable(dev_priv); 1431 1432 if (resume) 1433 intel_dmc_load_program(dev_priv); 1434 } 1435 1436 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 1437 { 1438 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1439 struct i915_power_well *well; 1440 1441 if (!HAS_DISPLAY(dev_priv)) 1442 return; 1443 1444 gen9_disable_dc_states(dev_priv); 1445 /* TODO: disable DMC program */ 1446 1447 gen9_dbuf_disable(dev_priv); 1448 1449 intel_cdclk_uninit_hw(dev_priv); 1450 1451 /* The spec doesn't call for removing the reset handshake flag */ 1452 /* disable PG1 and Misc I/O */ 1453 1454 mutex_lock(&power_domains->lock); 1455 1456 /* 1457 * BSpec says to keep the MISC IO power well enabled here, only 1458 * remove our request for power well 1. 1459 * Note that even though the driver's request is removed power well 1 1460 * may stay enabled after this due to DMC's own request on it. 1461 */ 1462 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1463 intel_power_well_disable(dev_priv, well); 1464 1465 mutex_unlock(&power_domains->lock); 1466 1467 usleep_range(10, 30); /* 10 us delay per Bspec */ 1468 } 1469 1470 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 1471 { 1472 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1473 struct i915_power_well *well; 1474 1475 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1476 1477 /* 1478 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 1479 * or else the reset will hang because there is no PCH to respond. 1480 * Move the handshake programming to initialization sequence. 1481 * Previously was left up to BIOS. 1482 */ 1483 intel_pch_reset_handshake(dev_priv, false); 1484 1485 if (!HAS_DISPLAY(dev_priv)) 1486 return; 1487 1488 /* Enable PG1 */ 1489 mutex_lock(&power_domains->lock); 1490 1491 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1492 intel_power_well_enable(dev_priv, well); 1493 1494 mutex_unlock(&power_domains->lock); 1495 1496 intel_cdclk_init_hw(dev_priv); 1497 1498 gen9_dbuf_enable(dev_priv); 1499 1500 if (resume) 1501 intel_dmc_load_program(dev_priv); 1502 } 1503 1504 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 1505 { 1506 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1507 struct i915_power_well *well; 1508 1509 if (!HAS_DISPLAY(dev_priv)) 1510 return; 1511 1512 gen9_disable_dc_states(dev_priv); 1513 /* TODO: disable DMC program */ 1514 1515 gen9_dbuf_disable(dev_priv); 1516 1517 intel_cdclk_uninit_hw(dev_priv); 1518 1519 /* The spec doesn't call for removing the reset handshake flag */ 1520 1521 /* 1522 * Disable PW1 (PG1). 1523 * Note that even though the driver's request is removed power well 1 1524 * may stay enabled after this due to DMC's own request on it. 1525 */ 1526 mutex_lock(&power_domains->lock); 1527 1528 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1529 intel_power_well_disable(dev_priv, well); 1530 1531 mutex_unlock(&power_domains->lock); 1532 1533 usleep_range(10, 30); /* 10 us delay per Bspec */ 1534 } 1535 1536 struct buddy_page_mask { 1537 u32 page_mask; 1538 u8 type; 1539 u8 num_channels; 1540 }; 1541 1542 static const struct buddy_page_mask tgl_buddy_page_masks[] = { 1543 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, 1544 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF }, 1545 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, 1546 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C }, 1547 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, 1548 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E }, 1549 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, 1550 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 }, 1551 {} 1552 }; 1553 1554 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { 1555 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, 1556 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, 1557 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 }, 1558 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 }, 1559 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, 1560 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, 1561 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 }, 1562 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 }, 1563 {} 1564 }; 1565 1566 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) 1567 { 1568 enum intel_dram_type type = dev_priv->dram_info.type; 1569 u8 num_channels = dev_priv->dram_info.num_channels; 1570 const struct buddy_page_mask *table; 1571 unsigned long abox_mask = DISPLAY_INFO(dev_priv)->abox_mask; 1572 int config, i; 1573 1574 /* BW_BUDDY registers are not used on dgpu's beyond DG1 */ 1575 if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv)) 1576 return; 1577 1578 if (IS_ALDERLAKE_S(dev_priv) || 1579 IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) 1580 /* Wa_1409767108 */ 1581 table = wa_1409767108_buddy_page_masks; 1582 else 1583 table = tgl_buddy_page_masks; 1584 1585 for (config = 0; table[config].page_mask != 0; config++) 1586 if (table[config].num_channels == num_channels && 1587 table[config].type == type) 1588 break; 1589 1590 if (table[config].page_mask == 0) { 1591 drm_dbg(&dev_priv->drm, 1592 "Unknown memory configuration; disabling address buddy logic.\n"); 1593 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) 1594 intel_de_write(dev_priv, BW_BUDDY_CTL(i), 1595 BW_BUDDY_DISABLE); 1596 } else { 1597 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { 1598 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i), 1599 table[config].page_mask); 1600 1601 /* Wa_22010178259:tgl,dg1,rkl,adl-s */ 1602 if (DISPLAY_VER(dev_priv) == 12) 1603 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i), 1604 BW_BUDDY_TLB_REQ_TIMER_MASK, 1605 BW_BUDDY_TLB_REQ_TIMER(0x8)); 1606 } 1607 } 1608 } 1609 1610 static void icl_display_core_init(struct drm_i915_private *dev_priv, 1611 bool resume) 1612 { 1613 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1614 struct i915_power_well *well; 1615 1616 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1617 1618 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ 1619 if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && 1620 INTEL_PCH_TYPE(dev_priv) < PCH_DG1) 1621 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0, 1622 PCH_DPMGUNIT_CLOCK_GATE_DISABLE); 1623 1624 /* 1. Enable PCH reset handshake. */ 1625 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 1626 1627 if (!HAS_DISPLAY(dev_priv)) 1628 return; 1629 1630 /* 2. Initialize all combo phys */ 1631 intel_combo_phy_init(dev_priv); 1632 1633 /* 1634 * 3. Enable Power Well 1 (PG1). 1635 * The AUX IO power wells will be enabled on demand. 1636 */ 1637 mutex_lock(&power_domains->lock); 1638 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1639 intel_power_well_enable(dev_priv, well); 1640 mutex_unlock(&power_domains->lock); 1641 1642 if (DISPLAY_VER(dev_priv) == 14) 1643 intel_de_rmw(dev_priv, DC_STATE_EN, 1644 HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0); 1645 1646 /* 4. Enable CDCLK. */ 1647 intel_cdclk_init_hw(dev_priv); 1648 1649 if (DISPLAY_VER(dev_priv) >= 12) 1650 gen12_dbuf_slices_config(dev_priv); 1651 1652 /* 5. Enable DBUF. */ 1653 gen9_dbuf_enable(dev_priv); 1654 1655 /* 6. Setup MBUS. */ 1656 icl_mbus_init(dev_priv); 1657 1658 /* 7. Program arbiter BW_BUDDY registers */ 1659 if (DISPLAY_VER(dev_priv) >= 12) 1660 tgl_bw_buddy_init(dev_priv); 1661 1662 /* 8. Ensure PHYs have completed calibration and adaptation */ 1663 if (IS_DG2(dev_priv)) 1664 intel_snps_phy_wait_for_calibration(dev_priv); 1665 1666 if (resume) 1667 intel_dmc_load_program(dev_priv); 1668 1669 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */ 1670 if (DISPLAY_VER(dev_priv) >= 12) 1671 intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, 1672 DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | 1673 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR); 1674 1675 /* Wa_14011503030:xelpd */ 1676 if (DISPLAY_VER(dev_priv) >= 13) 1677 intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0); 1678 } 1679 1680 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 1681 { 1682 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1683 struct i915_power_well *well; 1684 1685 if (!HAS_DISPLAY(dev_priv)) 1686 return; 1687 1688 gen9_disable_dc_states(dev_priv); 1689 intel_dmc_disable_program(dev_priv); 1690 1691 /* 1. Disable all display engine functions -> aready done */ 1692 1693 /* 2. Disable DBUF */ 1694 gen9_dbuf_disable(dev_priv); 1695 1696 /* 3. Disable CD clock */ 1697 intel_cdclk_uninit_hw(dev_priv); 1698 1699 if (DISPLAY_VER(dev_priv) == 14) 1700 intel_de_rmw(dev_priv, DC_STATE_EN, 0, 1701 HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH); 1702 1703 /* 1704 * 4. Disable Power Well 1 (PG1). 1705 * The AUX IO power wells are toggled on demand, so they are already 1706 * disabled at this point. 1707 */ 1708 mutex_lock(&power_domains->lock); 1709 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1710 intel_power_well_disable(dev_priv, well); 1711 mutex_unlock(&power_domains->lock); 1712 1713 /* 5. */ 1714 intel_combo_phy_uninit(dev_priv); 1715 } 1716 1717 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 1718 { 1719 struct i915_power_well *cmn_bc = 1720 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1721 struct i915_power_well *cmn_d = 1722 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1723 1724 /* 1725 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 1726 * workaround never ever read DISPLAY_PHY_CONTROL, and 1727 * instead maintain a shadow copy ourselves. Use the actual 1728 * power well state and lane status to reconstruct the 1729 * expected initial value. 1730 */ 1731 dev_priv->display.power.chv_phy_control = 1732 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 1733 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 1734 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 1735 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 1736 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 1737 1738 /* 1739 * If all lanes are disabled we leave the override disabled 1740 * with all power down bits cleared to match the state we 1741 * would use after disabling the port. Otherwise enable the 1742 * override and set the lane powerdown bits accding to the 1743 * current lane status. 1744 */ 1745 if (intel_power_well_is_enabled(dev_priv, cmn_bc)) { 1746 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)); 1747 unsigned int mask; 1748 1749 mask = status & DPLL_PORTB_READY_MASK; 1750 if (mask == 0xf) 1751 mask = 0x0; 1752 else 1753 dev_priv->display.power.chv_phy_control |= 1754 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 1755 1756 dev_priv->display.power.chv_phy_control |= 1757 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 1758 1759 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 1760 if (mask == 0xf) 1761 mask = 0x0; 1762 else 1763 dev_priv->display.power.chv_phy_control |= 1764 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 1765 1766 dev_priv->display.power.chv_phy_control |= 1767 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 1768 1769 dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 1770 1771 dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false; 1772 } else { 1773 dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true; 1774 } 1775 1776 if (intel_power_well_is_enabled(dev_priv, cmn_d)) { 1777 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS); 1778 unsigned int mask; 1779 1780 mask = status & DPLL_PORTD_READY_MASK; 1781 1782 if (mask == 0xf) 1783 mask = 0x0; 1784 else 1785 dev_priv->display.power.chv_phy_control |= 1786 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 1787 1788 dev_priv->display.power.chv_phy_control |= 1789 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 1790 1791 dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 1792 1793 dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false; 1794 } else { 1795 dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true; 1796 } 1797 1798 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", 1799 dev_priv->display.power.chv_phy_control); 1800 1801 /* Defer application of initial phy_control to enabling the powerwell */ 1802 } 1803 1804 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 1805 { 1806 struct i915_power_well *cmn = 1807 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1808 struct i915_power_well *disp2d = 1809 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 1810 1811 /* If the display might be already active skip this */ 1812 if (intel_power_well_is_enabled(dev_priv, cmn) && 1813 intel_power_well_is_enabled(dev_priv, disp2d) && 1814 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST) 1815 return; 1816 1817 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); 1818 1819 /* cmnlane needs DPLL registers */ 1820 intel_power_well_enable(dev_priv, disp2d); 1821 1822 /* 1823 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 1824 * Need to assert and de-assert PHY SB reset by gating the 1825 * common lane power, then un-gating it. 1826 * Simply ungating isn't enough to reset the PHY enough to get 1827 * ports and lanes running. 1828 */ 1829 intel_power_well_disable(dev_priv, cmn); 1830 } 1831 1832 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 1833 { 1834 bool ret; 1835 1836 vlv_punit_get(dev_priv); 1837 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 1838 vlv_punit_put(dev_priv); 1839 1840 return ret; 1841 } 1842 1843 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 1844 { 1845 drm_WARN(&dev_priv->drm, 1846 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 1847 "VED not power gated\n"); 1848 } 1849 1850 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 1851 { 1852 static const struct pci_device_id isp_ids[] = { 1853 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 1854 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 1855 {} 1856 }; 1857 1858 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && 1859 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 1860 "ISP not power gated\n"); 1861 } 1862 1863 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 1864 1865 /** 1866 * intel_power_domains_init_hw - initialize hardware power domain state 1867 * @i915: i915 device instance 1868 * @resume: Called from resume code paths or not 1869 * 1870 * This function initializes the hardware power domain state and enables all 1871 * power wells belonging to the INIT power domain. Power wells in other 1872 * domains (and not in the INIT domain) are referenced or disabled by 1873 * intel_modeset_readout_hw_state(). After that the reference count of each 1874 * power well must match its HW enabled state, see 1875 * intel_power_domains_verify_state(). 1876 * 1877 * It will return with power domains disabled (to be enabled later by 1878 * intel_power_domains_enable()) and must be paired with 1879 * intel_power_domains_driver_remove(). 1880 */ 1881 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 1882 { 1883 struct i915_power_domains *power_domains = &i915->display.power.domains; 1884 1885 power_domains->initializing = true; 1886 1887 if (DISPLAY_VER(i915) >= 11) { 1888 icl_display_core_init(i915, resume); 1889 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 1890 bxt_display_core_init(i915, resume); 1891 } else if (DISPLAY_VER(i915) == 9) { 1892 skl_display_core_init(i915, resume); 1893 } else if (IS_CHERRYVIEW(i915)) { 1894 mutex_lock(&power_domains->lock); 1895 chv_phy_control_init(i915); 1896 mutex_unlock(&power_domains->lock); 1897 assert_isp_power_gated(i915); 1898 } else if (IS_VALLEYVIEW(i915)) { 1899 mutex_lock(&power_domains->lock); 1900 vlv_cmnlane_wa(i915); 1901 mutex_unlock(&power_domains->lock); 1902 assert_ved_power_gated(i915); 1903 assert_isp_power_gated(i915); 1904 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 1905 hsw_assert_cdclk(i915); 1906 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 1907 } else if (IS_IVYBRIDGE(i915)) { 1908 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 1909 } 1910 1911 /* 1912 * Keep all power wells enabled for any dependent HW access during 1913 * initialization and to make sure we keep BIOS enabled display HW 1914 * resources powered until display HW readout is complete. We drop 1915 * this reference in intel_power_domains_enable(). 1916 */ 1917 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 1918 power_domains->init_wakeref = 1919 intel_display_power_get(i915, POWER_DOMAIN_INIT); 1920 1921 /* Disable power support if the user asked so. */ 1922 if (!i915->params.disable_power_well) { 1923 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); 1924 i915->display.power.domains.disable_wakeref = intel_display_power_get(i915, 1925 POWER_DOMAIN_INIT); 1926 } 1927 intel_power_domains_sync_hw(i915); 1928 1929 power_domains->initializing = false; 1930 } 1931 1932 /** 1933 * intel_power_domains_driver_remove - deinitialize hw power domain state 1934 * @i915: i915 device instance 1935 * 1936 * De-initializes the display power domain HW state. It also ensures that the 1937 * device stays powered up so that the driver can be reloaded. 1938 * 1939 * It must be called with power domains already disabled (after a call to 1940 * intel_power_domains_disable()) and must be paired with 1941 * intel_power_domains_init_hw(). 1942 */ 1943 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 1944 { 1945 intel_wakeref_t wakeref __maybe_unused = 1946 fetch_and_zero(&i915->display.power.domains.init_wakeref); 1947 1948 /* Remove the refcount we took to keep power well support disabled. */ 1949 if (!i915->params.disable_power_well) 1950 intel_display_power_put(i915, POWER_DOMAIN_INIT, 1951 fetch_and_zero(&i915->display.power.domains.disable_wakeref)); 1952 1953 intel_display_power_flush_work_sync(i915); 1954 1955 intel_power_domains_verify_state(i915); 1956 1957 /* Keep the power well enabled, but cancel its rpm wakeref. */ 1958 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 1959 } 1960 1961 /** 1962 * intel_power_domains_sanitize_state - sanitize power domains state 1963 * @i915: i915 device instance 1964 * 1965 * Sanitize the power domains state during driver loading and system resume. 1966 * The function will disable all display power wells that BIOS has enabled 1967 * without a user for it (any user for a power well has taken a reference 1968 * on it by the time this function is called, after the state of all the 1969 * pipe, encoder, etc. HW resources have been sanitized). 1970 */ 1971 void intel_power_domains_sanitize_state(struct drm_i915_private *i915) 1972 { 1973 struct i915_power_domains *power_domains = &i915->display.power.domains; 1974 struct i915_power_well *power_well; 1975 1976 mutex_lock(&power_domains->lock); 1977 1978 for_each_power_well_reverse(i915, power_well) { 1979 if (power_well->desc->always_on || power_well->count || 1980 !intel_power_well_is_enabled(i915, power_well)) 1981 continue; 1982 1983 drm_dbg_kms(&i915->drm, 1984 "BIOS left unused %s power well enabled, disabling it\n", 1985 intel_power_well_name(power_well)); 1986 intel_power_well_disable(i915, power_well); 1987 } 1988 1989 mutex_unlock(&power_domains->lock); 1990 } 1991 1992 /** 1993 * intel_power_domains_enable - enable toggling of display power wells 1994 * @i915: i915 device instance 1995 * 1996 * Enable the ondemand enabling/disabling of the display power wells. Note that 1997 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 1998 * only at specific points of the display modeset sequence, thus they are not 1999 * affected by the intel_power_domains_enable()/disable() calls. The purpose 2000 * of these function is to keep the rest of power wells enabled until the end 2001 * of display HW readout (which will acquire the power references reflecting 2002 * the current HW state). 2003 */ 2004 void intel_power_domains_enable(struct drm_i915_private *i915) 2005 { 2006 intel_wakeref_t wakeref __maybe_unused = 2007 fetch_and_zero(&i915->display.power.domains.init_wakeref); 2008 2009 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 2010 intel_power_domains_verify_state(i915); 2011 } 2012 2013 /** 2014 * intel_power_domains_disable - disable toggling of display power wells 2015 * @i915: i915 device instance 2016 * 2017 * Disable the ondemand enabling/disabling of the display power wells. See 2018 * intel_power_domains_enable() for which power wells this call controls. 2019 */ 2020 void intel_power_domains_disable(struct drm_i915_private *i915) 2021 { 2022 struct i915_power_domains *power_domains = &i915->display.power.domains; 2023 2024 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 2025 power_domains->init_wakeref = 2026 intel_display_power_get(i915, POWER_DOMAIN_INIT); 2027 2028 intel_power_domains_verify_state(i915); 2029 } 2030 2031 /** 2032 * intel_power_domains_suspend - suspend power domain state 2033 * @i915: i915 device instance 2034 * @s2idle: specifies whether we go to idle, or deeper sleep 2035 * 2036 * This function prepares the hardware power domain state before entering 2037 * system suspend. 2038 * 2039 * It must be called with power domains already disabled (after a call to 2040 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 2041 */ 2042 void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle) 2043 { 2044 struct i915_power_domains *power_domains = &i915->display.power.domains; 2045 intel_wakeref_t wakeref __maybe_unused = 2046 fetch_and_zero(&power_domains->init_wakeref); 2047 2048 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 2049 2050 /* 2051 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 2052 * support don't manually deinit the power domains. This also means the 2053 * DMC firmware will stay active, it will power down any HW 2054 * resources as required and also enable deeper system power states 2055 * that would be blocked if the firmware was inactive. 2056 */ 2057 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle && 2058 intel_dmc_has_payload(i915)) { 2059 intel_display_power_flush_work(i915); 2060 intel_power_domains_verify_state(i915); 2061 return; 2062 } 2063 2064 /* 2065 * Even if power well support was disabled we still want to disable 2066 * power wells if power domains must be deinitialized for suspend. 2067 */ 2068 if (!i915->params.disable_power_well) 2069 intel_display_power_put(i915, POWER_DOMAIN_INIT, 2070 fetch_and_zero(&i915->display.power.domains.disable_wakeref)); 2071 2072 intel_display_power_flush_work(i915); 2073 intel_power_domains_verify_state(i915); 2074 2075 if (DISPLAY_VER(i915) >= 11) 2076 icl_display_core_uninit(i915); 2077 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) 2078 bxt_display_core_uninit(i915); 2079 else if (DISPLAY_VER(i915) == 9) 2080 skl_display_core_uninit(i915); 2081 2082 power_domains->display_core_suspended = true; 2083 } 2084 2085 /** 2086 * intel_power_domains_resume - resume power domain state 2087 * @i915: i915 device instance 2088 * 2089 * This function resume the hardware power domain state during system resume. 2090 * 2091 * It will return with power domain support disabled (to be enabled later by 2092 * intel_power_domains_enable()) and must be paired with 2093 * intel_power_domains_suspend(). 2094 */ 2095 void intel_power_domains_resume(struct drm_i915_private *i915) 2096 { 2097 struct i915_power_domains *power_domains = &i915->display.power.domains; 2098 2099 if (power_domains->display_core_suspended) { 2100 intel_power_domains_init_hw(i915, true); 2101 power_domains->display_core_suspended = false; 2102 } else { 2103 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 2104 power_domains->init_wakeref = 2105 intel_display_power_get(i915, POWER_DOMAIN_INIT); 2106 } 2107 2108 intel_power_domains_verify_state(i915); 2109 } 2110 2111 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2112 2113 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 2114 { 2115 struct i915_power_domains *power_domains = &i915->display.power.domains; 2116 struct i915_power_well *power_well; 2117 2118 for_each_power_well(i915, power_well) { 2119 enum intel_display_power_domain domain; 2120 2121 drm_dbg(&i915->drm, "%-25s %d\n", 2122 intel_power_well_name(power_well), intel_power_well_refcount(power_well)); 2123 2124 for_each_power_domain(domain, intel_power_well_domains(power_well)) 2125 drm_dbg(&i915->drm, " %-23s %d\n", 2126 intel_display_power_domain_str(domain), 2127 power_domains->domain_use_count[domain]); 2128 } 2129 } 2130 2131 /** 2132 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 2133 * @i915: i915 device instance 2134 * 2135 * Verify if the reference count of each power well matches its HW enabled 2136 * state and the total refcount of the domains it belongs to. This must be 2137 * called after modeset HW state sanitization, which is responsible for 2138 * acquiring reference counts for any power wells in use and disabling the 2139 * ones left on by BIOS but not required by any active output. 2140 */ 2141 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 2142 { 2143 struct i915_power_domains *power_domains = &i915->display.power.domains; 2144 struct i915_power_well *power_well; 2145 bool dump_domain_info; 2146 2147 mutex_lock(&power_domains->lock); 2148 2149 verify_async_put_domains_state(power_domains); 2150 2151 dump_domain_info = false; 2152 for_each_power_well(i915, power_well) { 2153 enum intel_display_power_domain domain; 2154 int domains_count; 2155 bool enabled; 2156 2157 enabled = intel_power_well_is_enabled(i915, power_well); 2158 if ((intel_power_well_refcount(power_well) || 2159 intel_power_well_is_always_on(power_well)) != 2160 enabled) 2161 drm_err(&i915->drm, 2162 "power well %s state mismatch (refcount %d/enabled %d)", 2163 intel_power_well_name(power_well), 2164 intel_power_well_refcount(power_well), enabled); 2165 2166 domains_count = 0; 2167 for_each_power_domain(domain, intel_power_well_domains(power_well)) 2168 domains_count += power_domains->domain_use_count[domain]; 2169 2170 if (intel_power_well_refcount(power_well) != domains_count) { 2171 drm_err(&i915->drm, 2172 "power well %s refcount/domain refcount mismatch " 2173 "(refcount %d/domains refcount %d)\n", 2174 intel_power_well_name(power_well), 2175 intel_power_well_refcount(power_well), 2176 domains_count); 2177 dump_domain_info = true; 2178 } 2179 } 2180 2181 if (dump_domain_info) { 2182 static bool dumped; 2183 2184 if (!dumped) { 2185 intel_power_domains_dump_info(i915); 2186 dumped = true; 2187 } 2188 } 2189 2190 mutex_unlock(&power_domains->lock); 2191 } 2192 2193 #else 2194 2195 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 2196 { 2197 } 2198 2199 #endif 2200 2201 void intel_display_power_suspend_late(struct drm_i915_private *i915) 2202 { 2203 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 2204 IS_BROXTON(i915)) { 2205 bxt_enable_dc9(i915); 2206 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2207 hsw_enable_pc8(i915); 2208 } 2209 2210 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 2211 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 2212 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); 2213 } 2214 2215 void intel_display_power_resume_early(struct drm_i915_private *i915) 2216 { 2217 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 2218 IS_BROXTON(i915)) { 2219 gen9_sanitize_dc_state(i915); 2220 bxt_disable_dc9(i915); 2221 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2222 hsw_disable_pc8(i915); 2223 } 2224 2225 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 2226 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 2227 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); 2228 } 2229 2230 void intel_display_power_suspend(struct drm_i915_private *i915) 2231 { 2232 if (DISPLAY_VER(i915) >= 11) { 2233 icl_display_core_uninit(i915); 2234 bxt_enable_dc9(i915); 2235 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 2236 bxt_display_core_uninit(i915); 2237 bxt_enable_dc9(i915); 2238 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2239 hsw_enable_pc8(i915); 2240 } 2241 } 2242 2243 void intel_display_power_resume(struct drm_i915_private *i915) 2244 { 2245 struct i915_power_domains *power_domains = &i915->display.power.domains; 2246 2247 if (DISPLAY_VER(i915) >= 11) { 2248 bxt_disable_dc9(i915); 2249 icl_display_core_init(i915, true); 2250 if (intel_dmc_has_payload(i915)) { 2251 if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6) 2252 skl_enable_dc6(i915); 2253 else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5) 2254 gen9_enable_dc5(i915); 2255 } 2256 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 2257 bxt_disable_dc9(i915); 2258 bxt_display_core_init(i915, true); 2259 if (intel_dmc_has_payload(i915) && 2260 (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 2261 gen9_enable_dc5(i915); 2262 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2263 hsw_disable_pc8(i915); 2264 } 2265 } 2266 2267 void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m) 2268 { 2269 struct i915_power_domains *power_domains = &i915->display.power.domains; 2270 int i; 2271 2272 mutex_lock(&power_domains->lock); 2273 2274 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2275 for (i = 0; i < power_domains->power_well_count; i++) { 2276 struct i915_power_well *power_well; 2277 enum intel_display_power_domain power_domain; 2278 2279 power_well = &power_domains->power_wells[i]; 2280 seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well), 2281 intel_power_well_refcount(power_well)); 2282 2283 for_each_power_domain(power_domain, intel_power_well_domains(power_well)) 2284 seq_printf(m, " %-23s %d\n", 2285 intel_display_power_domain_str(power_domain), 2286 power_domains->domain_use_count[power_domain]); 2287 } 2288 2289 mutex_unlock(&power_domains->lock); 2290 } 2291 2292 struct intel_ddi_port_domains { 2293 enum port port_start; 2294 enum port port_end; 2295 enum aux_ch aux_ch_start; 2296 enum aux_ch aux_ch_end; 2297 2298 enum intel_display_power_domain ddi_lanes; 2299 enum intel_display_power_domain ddi_io; 2300 enum intel_display_power_domain aux_io; 2301 enum intel_display_power_domain aux_legacy_usbc; 2302 enum intel_display_power_domain aux_tbt; 2303 }; 2304 2305 static const struct intel_ddi_port_domains 2306 i9xx_port_domains[] = { 2307 { 2308 .port_start = PORT_A, 2309 .port_end = PORT_F, 2310 .aux_ch_start = AUX_CH_A, 2311 .aux_ch_end = AUX_CH_F, 2312 2313 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2314 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2315 .aux_io = POWER_DOMAIN_AUX_IO_A, 2316 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2317 .aux_tbt = POWER_DOMAIN_INVALID, 2318 }, 2319 }; 2320 2321 static const struct intel_ddi_port_domains 2322 d11_port_domains[] = { 2323 { 2324 .port_start = PORT_A, 2325 .port_end = PORT_B, 2326 .aux_ch_start = AUX_CH_A, 2327 .aux_ch_end = AUX_CH_B, 2328 2329 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2330 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2331 .aux_io = POWER_DOMAIN_AUX_IO_A, 2332 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2333 .aux_tbt = POWER_DOMAIN_INVALID, 2334 }, { 2335 .port_start = PORT_C, 2336 .port_end = PORT_F, 2337 .aux_ch_start = AUX_CH_C, 2338 .aux_ch_end = AUX_CH_F, 2339 2340 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C, 2341 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C, 2342 .aux_io = POWER_DOMAIN_AUX_IO_C, 2343 .aux_legacy_usbc = POWER_DOMAIN_AUX_C, 2344 .aux_tbt = POWER_DOMAIN_AUX_TBT1, 2345 }, 2346 }; 2347 2348 static const struct intel_ddi_port_domains 2349 d12_port_domains[] = { 2350 { 2351 .port_start = PORT_A, 2352 .port_end = PORT_C, 2353 .aux_ch_start = AUX_CH_A, 2354 .aux_ch_end = AUX_CH_C, 2355 2356 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2357 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2358 .aux_io = POWER_DOMAIN_AUX_IO_A, 2359 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2360 .aux_tbt = POWER_DOMAIN_INVALID, 2361 }, { 2362 .port_start = PORT_TC1, 2363 .port_end = PORT_TC6, 2364 .aux_ch_start = AUX_CH_USBC1, 2365 .aux_ch_end = AUX_CH_USBC6, 2366 2367 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, 2368 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, 2369 .aux_io = POWER_DOMAIN_INVALID, 2370 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, 2371 .aux_tbt = POWER_DOMAIN_AUX_TBT1, 2372 }, 2373 }; 2374 2375 static const struct intel_ddi_port_domains 2376 d13_port_domains[] = { 2377 { 2378 .port_start = PORT_A, 2379 .port_end = PORT_C, 2380 .aux_ch_start = AUX_CH_A, 2381 .aux_ch_end = AUX_CH_C, 2382 2383 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2384 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2385 .aux_io = POWER_DOMAIN_AUX_IO_A, 2386 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2387 .aux_tbt = POWER_DOMAIN_INVALID, 2388 }, { 2389 .port_start = PORT_TC1, 2390 .port_end = PORT_TC4, 2391 .aux_ch_start = AUX_CH_USBC1, 2392 .aux_ch_end = AUX_CH_USBC4, 2393 2394 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, 2395 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, 2396 .aux_io = POWER_DOMAIN_INVALID, 2397 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, 2398 .aux_tbt = POWER_DOMAIN_AUX_TBT1, 2399 }, { 2400 .port_start = PORT_D_XELPD, 2401 .port_end = PORT_E_XELPD, 2402 .aux_ch_start = AUX_CH_D_XELPD, 2403 .aux_ch_end = AUX_CH_E_XELPD, 2404 2405 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D, 2406 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D, 2407 .aux_io = POWER_DOMAIN_AUX_IO_D, 2408 .aux_legacy_usbc = POWER_DOMAIN_AUX_D, 2409 .aux_tbt = POWER_DOMAIN_INVALID, 2410 }, 2411 }; 2412 2413 static void 2414 intel_port_domains_for_platform(struct drm_i915_private *i915, 2415 const struct intel_ddi_port_domains **domains, 2416 int *domains_size) 2417 { 2418 if (DISPLAY_VER(i915) >= 13) { 2419 *domains = d13_port_domains; 2420 *domains_size = ARRAY_SIZE(d13_port_domains); 2421 } else if (DISPLAY_VER(i915) >= 12) { 2422 *domains = d12_port_domains; 2423 *domains_size = ARRAY_SIZE(d12_port_domains); 2424 } else if (DISPLAY_VER(i915) >= 11) { 2425 *domains = d11_port_domains; 2426 *domains_size = ARRAY_SIZE(d11_port_domains); 2427 } else { 2428 *domains = i9xx_port_domains; 2429 *domains_size = ARRAY_SIZE(i9xx_port_domains); 2430 } 2431 } 2432 2433 static const struct intel_ddi_port_domains * 2434 intel_port_domains_for_port(struct drm_i915_private *i915, enum port port) 2435 { 2436 const struct intel_ddi_port_domains *domains; 2437 int domains_size; 2438 int i; 2439 2440 intel_port_domains_for_platform(i915, &domains, &domains_size); 2441 for (i = 0; i < domains_size; i++) 2442 if (port >= domains[i].port_start && port <= domains[i].port_end) 2443 return &domains[i]; 2444 2445 return NULL; 2446 } 2447 2448 enum intel_display_power_domain 2449 intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port) 2450 { 2451 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); 2452 2453 if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID)) 2454 return POWER_DOMAIN_PORT_DDI_IO_A; 2455 2456 return domains->ddi_io + (int)(port - domains->port_start); 2457 } 2458 2459 enum intel_display_power_domain 2460 intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port) 2461 { 2462 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); 2463 2464 if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID)) 2465 return POWER_DOMAIN_PORT_DDI_LANES_A; 2466 2467 return domains->ddi_lanes + (int)(port - domains->port_start); 2468 } 2469 2470 static const struct intel_ddi_port_domains * 2471 intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch) 2472 { 2473 const struct intel_ddi_port_domains *domains; 2474 int domains_size; 2475 int i; 2476 2477 intel_port_domains_for_platform(i915, &domains, &domains_size); 2478 for (i = 0; i < domains_size; i++) 2479 if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end) 2480 return &domains[i]; 2481 2482 return NULL; 2483 } 2484 2485 enum intel_display_power_domain 2486 intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) 2487 { 2488 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); 2489 2490 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID)) 2491 return POWER_DOMAIN_AUX_IO_A; 2492 2493 return domains->aux_io + (int)(aux_ch - domains->aux_ch_start); 2494 } 2495 2496 enum intel_display_power_domain 2497 intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) 2498 { 2499 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); 2500 2501 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)) 2502 return POWER_DOMAIN_AUX_A; 2503 2504 return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start); 2505 } 2506 2507 enum intel_display_power_domain 2508 intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) 2509 { 2510 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); 2511 2512 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID)) 2513 return POWER_DOMAIN_AUX_TBT1; 2514 2515 return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start); 2516 } 2517