1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 8 #include "i915_drv.h" 9 #include "i915_irq.h" 10 #include "intel_backlight_regs.h" 11 #include "intel_cdclk.h" 12 #include "intel_combo_phy.h" 13 #include "intel_de.h" 14 #include "intel_display_power.h" 15 #include "intel_display_power_map.h" 16 #include "intel_display_power_well.h" 17 #include "intel_display_types.h" 18 #include "intel_dmc.h" 19 #include "intel_mchbar_regs.h" 20 #include "intel_pch_refclk.h" 21 #include "intel_pcode.h" 22 #include "intel_snps_phy.h" 23 #include "skl_watermark.h" 24 #include "vlv_sideband.h" 25 26 #define for_each_power_domain_well(__dev_priv, __power_well, __domain) \ 27 for_each_power_well(__dev_priv, __power_well) \ 28 for_each_if(test_bit((__domain), (__power_well)->domains.bits)) 29 30 #define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \ 31 for_each_power_well_reverse(__dev_priv, __power_well) \ 32 for_each_if(test_bit((__domain), (__power_well)->domains.bits)) 33 34 const char * 35 intel_display_power_domain_str(enum intel_display_power_domain domain) 36 { 37 switch (domain) { 38 case POWER_DOMAIN_DISPLAY_CORE: 39 return "DISPLAY_CORE"; 40 case POWER_DOMAIN_PIPE_A: 41 return "PIPE_A"; 42 case POWER_DOMAIN_PIPE_B: 43 return "PIPE_B"; 44 case POWER_DOMAIN_PIPE_C: 45 return "PIPE_C"; 46 case POWER_DOMAIN_PIPE_D: 47 return "PIPE_D"; 48 case POWER_DOMAIN_PIPE_PANEL_FITTER_A: 49 return "PIPE_PANEL_FITTER_A"; 50 case POWER_DOMAIN_PIPE_PANEL_FITTER_B: 51 return "PIPE_PANEL_FITTER_B"; 52 case POWER_DOMAIN_PIPE_PANEL_FITTER_C: 53 return "PIPE_PANEL_FITTER_C"; 54 case POWER_DOMAIN_PIPE_PANEL_FITTER_D: 55 return "PIPE_PANEL_FITTER_D"; 56 case POWER_DOMAIN_TRANSCODER_A: 57 return "TRANSCODER_A"; 58 case POWER_DOMAIN_TRANSCODER_B: 59 return "TRANSCODER_B"; 60 case POWER_DOMAIN_TRANSCODER_C: 61 return "TRANSCODER_C"; 62 case POWER_DOMAIN_TRANSCODER_D: 63 return "TRANSCODER_D"; 64 case POWER_DOMAIN_TRANSCODER_EDP: 65 return "TRANSCODER_EDP"; 66 case POWER_DOMAIN_TRANSCODER_DSI_A: 67 return "TRANSCODER_DSI_A"; 68 case POWER_DOMAIN_TRANSCODER_DSI_C: 69 return "TRANSCODER_DSI_C"; 70 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 71 return "TRANSCODER_VDSC_PW2"; 72 case POWER_DOMAIN_PORT_DDI_LANES_A: 73 return "PORT_DDI_LANES_A"; 74 case POWER_DOMAIN_PORT_DDI_LANES_B: 75 return "PORT_DDI_LANES_B"; 76 case POWER_DOMAIN_PORT_DDI_LANES_C: 77 return "PORT_DDI_LANES_C"; 78 case POWER_DOMAIN_PORT_DDI_LANES_D: 79 return "PORT_DDI_LANES_D"; 80 case POWER_DOMAIN_PORT_DDI_LANES_E: 81 return "PORT_DDI_LANES_E"; 82 case POWER_DOMAIN_PORT_DDI_LANES_F: 83 return "PORT_DDI_LANES_F"; 84 case POWER_DOMAIN_PORT_DDI_LANES_TC1: 85 return "PORT_DDI_LANES_TC1"; 86 case POWER_DOMAIN_PORT_DDI_LANES_TC2: 87 return "PORT_DDI_LANES_TC2"; 88 case POWER_DOMAIN_PORT_DDI_LANES_TC3: 89 return "PORT_DDI_LANES_TC3"; 90 case POWER_DOMAIN_PORT_DDI_LANES_TC4: 91 return "PORT_DDI_LANES_TC4"; 92 case POWER_DOMAIN_PORT_DDI_LANES_TC5: 93 return "PORT_DDI_LANES_TC5"; 94 case POWER_DOMAIN_PORT_DDI_LANES_TC6: 95 return "PORT_DDI_LANES_TC6"; 96 case POWER_DOMAIN_PORT_DDI_IO_A: 97 return "PORT_DDI_IO_A"; 98 case POWER_DOMAIN_PORT_DDI_IO_B: 99 return "PORT_DDI_IO_B"; 100 case POWER_DOMAIN_PORT_DDI_IO_C: 101 return "PORT_DDI_IO_C"; 102 case POWER_DOMAIN_PORT_DDI_IO_D: 103 return "PORT_DDI_IO_D"; 104 case POWER_DOMAIN_PORT_DDI_IO_E: 105 return "PORT_DDI_IO_E"; 106 case POWER_DOMAIN_PORT_DDI_IO_F: 107 return "PORT_DDI_IO_F"; 108 case POWER_DOMAIN_PORT_DDI_IO_TC1: 109 return "PORT_DDI_IO_TC1"; 110 case POWER_DOMAIN_PORT_DDI_IO_TC2: 111 return "PORT_DDI_IO_TC2"; 112 case POWER_DOMAIN_PORT_DDI_IO_TC3: 113 return "PORT_DDI_IO_TC3"; 114 case POWER_DOMAIN_PORT_DDI_IO_TC4: 115 return "PORT_DDI_IO_TC4"; 116 case POWER_DOMAIN_PORT_DDI_IO_TC5: 117 return "PORT_DDI_IO_TC5"; 118 case POWER_DOMAIN_PORT_DDI_IO_TC6: 119 return "PORT_DDI_IO_TC6"; 120 case POWER_DOMAIN_PORT_DSI: 121 return "PORT_DSI"; 122 case POWER_DOMAIN_PORT_CRT: 123 return "PORT_CRT"; 124 case POWER_DOMAIN_PORT_OTHER: 125 return "PORT_OTHER"; 126 case POWER_DOMAIN_VGA: 127 return "VGA"; 128 case POWER_DOMAIN_AUDIO_MMIO: 129 return "AUDIO_MMIO"; 130 case POWER_DOMAIN_AUDIO_PLAYBACK: 131 return "AUDIO_PLAYBACK"; 132 case POWER_DOMAIN_AUX_IO_A: 133 return "AUX_IO_A"; 134 case POWER_DOMAIN_AUX_IO_B: 135 return "AUX_IO_B"; 136 case POWER_DOMAIN_AUX_IO_C: 137 return "AUX_IO_C"; 138 case POWER_DOMAIN_AUX_IO_D: 139 return "AUX_IO_D"; 140 case POWER_DOMAIN_AUX_IO_E: 141 return "AUX_IO_E"; 142 case POWER_DOMAIN_AUX_IO_F: 143 return "AUX_IO_F"; 144 case POWER_DOMAIN_AUX_A: 145 return "AUX_A"; 146 case POWER_DOMAIN_AUX_B: 147 return "AUX_B"; 148 case POWER_DOMAIN_AUX_C: 149 return "AUX_C"; 150 case POWER_DOMAIN_AUX_D: 151 return "AUX_D"; 152 case POWER_DOMAIN_AUX_E: 153 return "AUX_E"; 154 case POWER_DOMAIN_AUX_F: 155 return "AUX_F"; 156 case POWER_DOMAIN_AUX_USBC1: 157 return "AUX_USBC1"; 158 case POWER_DOMAIN_AUX_USBC2: 159 return "AUX_USBC2"; 160 case POWER_DOMAIN_AUX_USBC3: 161 return "AUX_USBC3"; 162 case POWER_DOMAIN_AUX_USBC4: 163 return "AUX_USBC4"; 164 case POWER_DOMAIN_AUX_USBC5: 165 return "AUX_USBC5"; 166 case POWER_DOMAIN_AUX_USBC6: 167 return "AUX_USBC6"; 168 case POWER_DOMAIN_AUX_TBT1: 169 return "AUX_TBT1"; 170 case POWER_DOMAIN_AUX_TBT2: 171 return "AUX_TBT2"; 172 case POWER_DOMAIN_AUX_TBT3: 173 return "AUX_TBT3"; 174 case POWER_DOMAIN_AUX_TBT4: 175 return "AUX_TBT4"; 176 case POWER_DOMAIN_AUX_TBT5: 177 return "AUX_TBT5"; 178 case POWER_DOMAIN_AUX_TBT6: 179 return "AUX_TBT6"; 180 case POWER_DOMAIN_GMBUS: 181 return "GMBUS"; 182 case POWER_DOMAIN_INIT: 183 return "INIT"; 184 case POWER_DOMAIN_MODESET: 185 return "MODESET"; 186 case POWER_DOMAIN_GT_IRQ: 187 return "GT_IRQ"; 188 case POWER_DOMAIN_DC_OFF: 189 return "DC_OFF"; 190 case POWER_DOMAIN_TC_COLD_OFF: 191 return "TC_COLD_OFF"; 192 default: 193 MISSING_CASE(domain); 194 return "?"; 195 } 196 } 197 198 /** 199 * __intel_display_power_is_enabled - unlocked check for a power domain 200 * @dev_priv: i915 device instance 201 * @domain: power domain to check 202 * 203 * This is the unlocked version of intel_display_power_is_enabled() and should 204 * only be used from error capture and recovery code where deadlocks are 205 * possible. 206 * 207 * Returns: 208 * True when the power domain is enabled, false otherwise. 209 */ 210 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 211 enum intel_display_power_domain domain) 212 { 213 struct i915_power_well *power_well; 214 bool is_enabled; 215 216 if (dev_priv->runtime_pm.suspended) 217 return false; 218 219 is_enabled = true; 220 221 for_each_power_domain_well_reverse(dev_priv, power_well, domain) { 222 if (intel_power_well_is_always_on(power_well)) 223 continue; 224 225 if (!intel_power_well_is_enabled_cached(power_well)) { 226 is_enabled = false; 227 break; 228 } 229 } 230 231 return is_enabled; 232 } 233 234 /** 235 * intel_display_power_is_enabled - check for a power domain 236 * @dev_priv: i915 device instance 237 * @domain: power domain to check 238 * 239 * This function can be used to check the hw power domain state. It is mostly 240 * used in hardware state readout functions. Everywhere else code should rely 241 * upon explicit power domain reference counting to ensure that the hardware 242 * block is powered up before accessing it. 243 * 244 * Callers must hold the relevant modesetting locks to ensure that concurrent 245 * threads can't disable the power well while the caller tries to read a few 246 * registers. 247 * 248 * Returns: 249 * True when the power domain is enabled, false otherwise. 250 */ 251 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 252 enum intel_display_power_domain domain) 253 { 254 struct i915_power_domains *power_domains; 255 bool ret; 256 257 power_domains = &dev_priv->display.power.domains; 258 259 mutex_lock(&power_domains->lock); 260 ret = __intel_display_power_is_enabled(dev_priv, domain); 261 mutex_unlock(&power_domains->lock); 262 263 return ret; 264 } 265 266 static u32 267 sanitize_target_dc_state(struct drm_i915_private *i915, 268 u32 target_dc_state) 269 { 270 struct i915_power_domains *power_domains = &i915->display.power.domains; 271 static const u32 states[] = { 272 DC_STATE_EN_UPTO_DC6, 273 DC_STATE_EN_UPTO_DC5, 274 DC_STATE_EN_DC3CO, 275 DC_STATE_DISABLE, 276 }; 277 int i; 278 279 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 280 if (target_dc_state != states[i]) 281 continue; 282 283 if (power_domains->allowed_dc_mask & target_dc_state) 284 break; 285 286 target_dc_state = states[i + 1]; 287 } 288 289 return target_dc_state; 290 } 291 292 /** 293 * intel_display_power_set_target_dc_state - Set target dc state. 294 * @dev_priv: i915 device 295 * @state: state which needs to be set as target_dc_state. 296 * 297 * This function set the "DC off" power well target_dc_state, 298 * based upon this target_dc_stste, "DC off" power well will 299 * enable desired DC state. 300 */ 301 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, 302 u32 state) 303 { 304 struct i915_power_well *power_well; 305 bool dc_off_enabled; 306 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 307 308 mutex_lock(&power_domains->lock); 309 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); 310 311 if (drm_WARN_ON(&dev_priv->drm, !power_well)) 312 goto unlock; 313 314 state = sanitize_target_dc_state(dev_priv, state); 315 316 if (state == power_domains->target_dc_state) 317 goto unlock; 318 319 dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well); 320 /* 321 * If DC off power well is disabled, need to enable and disable the 322 * DC off power well to effect target DC state. 323 */ 324 if (!dc_off_enabled) 325 intel_power_well_enable(dev_priv, power_well); 326 327 power_domains->target_dc_state = state; 328 329 if (!dc_off_enabled) 330 intel_power_well_disable(dev_priv, power_well); 331 332 unlock: 333 mutex_unlock(&power_domains->lock); 334 } 335 336 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 337 338 static void __async_put_domains_mask(struct i915_power_domains *power_domains, 339 struct intel_power_domain_mask *mask) 340 { 341 bitmap_or(mask->bits, 342 power_domains->async_put_domains[0].bits, 343 power_domains->async_put_domains[1].bits, 344 POWER_DOMAIN_NUM); 345 } 346 347 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 348 349 static bool 350 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 351 { 352 struct drm_i915_private *i915 = container_of(power_domains, 353 struct drm_i915_private, 354 display.power.domains); 355 356 return !drm_WARN_ON(&i915->drm, 357 bitmap_intersects(power_domains->async_put_domains[0].bits, 358 power_domains->async_put_domains[1].bits, 359 POWER_DOMAIN_NUM)); 360 } 361 362 static bool 363 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 364 { 365 struct drm_i915_private *i915 = container_of(power_domains, 366 struct drm_i915_private, 367 display.power.domains); 368 struct intel_power_domain_mask async_put_mask; 369 enum intel_display_power_domain domain; 370 bool err = false; 371 372 err |= !assert_async_put_domain_masks_disjoint(power_domains); 373 __async_put_domains_mask(power_domains, &async_put_mask); 374 err |= drm_WARN_ON(&i915->drm, 375 !!power_domains->async_put_wakeref != 376 !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)); 377 378 for_each_power_domain(domain, &async_put_mask) 379 err |= drm_WARN_ON(&i915->drm, 380 power_domains->domain_use_count[domain] != 1); 381 382 return !err; 383 } 384 385 static void print_power_domains(struct i915_power_domains *power_domains, 386 const char *prefix, struct intel_power_domain_mask *mask) 387 { 388 struct drm_i915_private *i915 = container_of(power_domains, 389 struct drm_i915_private, 390 display.power.domains); 391 enum intel_display_power_domain domain; 392 393 drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM)); 394 for_each_power_domain(domain, mask) 395 drm_dbg(&i915->drm, "%s use_count %d\n", 396 intel_display_power_domain_str(domain), 397 power_domains->domain_use_count[domain]); 398 } 399 400 static void 401 print_async_put_domains_state(struct i915_power_domains *power_domains) 402 { 403 struct drm_i915_private *i915 = container_of(power_domains, 404 struct drm_i915_private, 405 display.power.domains); 406 407 drm_dbg(&i915->drm, "async_put_wakeref %u\n", 408 power_domains->async_put_wakeref); 409 410 print_power_domains(power_domains, "async_put_domains[0]", 411 &power_domains->async_put_domains[0]); 412 print_power_domains(power_domains, "async_put_domains[1]", 413 &power_domains->async_put_domains[1]); 414 } 415 416 static void 417 verify_async_put_domains_state(struct i915_power_domains *power_domains) 418 { 419 if (!__async_put_domains_state_ok(power_domains)) 420 print_async_put_domains_state(power_domains); 421 } 422 423 #else 424 425 static void 426 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 427 { 428 } 429 430 static void 431 verify_async_put_domains_state(struct i915_power_domains *power_domains) 432 { 433 } 434 435 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 436 437 static void async_put_domains_mask(struct i915_power_domains *power_domains, 438 struct intel_power_domain_mask *mask) 439 440 { 441 assert_async_put_domain_masks_disjoint(power_domains); 442 443 __async_put_domains_mask(power_domains, mask); 444 } 445 446 static void 447 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 448 enum intel_display_power_domain domain) 449 { 450 assert_async_put_domain_masks_disjoint(power_domains); 451 452 clear_bit(domain, power_domains->async_put_domains[0].bits); 453 clear_bit(domain, power_domains->async_put_domains[1].bits); 454 } 455 456 static bool 457 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 458 enum intel_display_power_domain domain) 459 { 460 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 461 struct intel_power_domain_mask async_put_mask; 462 bool ret = false; 463 464 async_put_domains_mask(power_domains, &async_put_mask); 465 if (!test_bit(domain, async_put_mask.bits)) 466 goto out_verify; 467 468 async_put_domains_clear_domain(power_domains, domain); 469 470 ret = true; 471 472 async_put_domains_mask(power_domains, &async_put_mask); 473 if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)) 474 goto out_verify; 475 476 cancel_delayed_work(&power_domains->async_put_work); 477 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 478 fetch_and_zero(&power_domains->async_put_wakeref)); 479 out_verify: 480 verify_async_put_domains_state(power_domains); 481 482 return ret; 483 } 484 485 static void 486 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 487 enum intel_display_power_domain domain) 488 { 489 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 490 struct i915_power_well *power_well; 491 492 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 493 return; 494 495 for_each_power_domain_well(dev_priv, power_well, domain) 496 intel_power_well_get(dev_priv, power_well); 497 498 power_domains->domain_use_count[domain]++; 499 } 500 501 /** 502 * intel_display_power_get - grab a power domain reference 503 * @dev_priv: i915 device instance 504 * @domain: power domain to reference 505 * 506 * This function grabs a power domain reference for @domain and ensures that the 507 * power domain and all its parents are powered up. Therefore users should only 508 * grab a reference to the innermost power domain they need. 509 * 510 * Any power domain reference obtained by this function must have a symmetric 511 * call to intel_display_power_put() to release the reference again. 512 */ 513 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 514 enum intel_display_power_domain domain) 515 { 516 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 517 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 518 519 mutex_lock(&power_domains->lock); 520 __intel_display_power_get_domain(dev_priv, domain); 521 mutex_unlock(&power_domains->lock); 522 523 return wakeref; 524 } 525 526 /** 527 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 528 * @dev_priv: i915 device instance 529 * @domain: power domain to reference 530 * 531 * This function grabs a power domain reference for @domain and ensures that the 532 * power domain and all its parents are powered up. Therefore users should only 533 * grab a reference to the innermost power domain they need. 534 * 535 * Any power domain reference obtained by this function must have a symmetric 536 * call to intel_display_power_put() to release the reference again. 537 */ 538 intel_wakeref_t 539 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 540 enum intel_display_power_domain domain) 541 { 542 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 543 intel_wakeref_t wakeref; 544 bool is_enabled; 545 546 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 547 if (!wakeref) 548 return false; 549 550 mutex_lock(&power_domains->lock); 551 552 if (__intel_display_power_is_enabled(dev_priv, domain)) { 553 __intel_display_power_get_domain(dev_priv, domain); 554 is_enabled = true; 555 } else { 556 is_enabled = false; 557 } 558 559 mutex_unlock(&power_domains->lock); 560 561 if (!is_enabled) { 562 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 563 wakeref = 0; 564 } 565 566 return wakeref; 567 } 568 569 static void 570 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 571 enum intel_display_power_domain domain) 572 { 573 struct i915_power_domains *power_domains; 574 struct i915_power_well *power_well; 575 const char *name = intel_display_power_domain_str(domain); 576 struct intel_power_domain_mask async_put_mask; 577 578 power_domains = &dev_priv->display.power.domains; 579 580 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], 581 "Use count on domain %s is already zero\n", 582 name); 583 async_put_domains_mask(power_domains, &async_put_mask); 584 drm_WARN(&dev_priv->drm, 585 test_bit(domain, async_put_mask.bits), 586 "Async disabling of domain %s is pending\n", 587 name); 588 589 power_domains->domain_use_count[domain]--; 590 591 for_each_power_domain_well_reverse(dev_priv, power_well, domain) 592 intel_power_well_put(dev_priv, power_well); 593 } 594 595 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 596 enum intel_display_power_domain domain) 597 { 598 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 599 600 mutex_lock(&power_domains->lock); 601 __intel_display_power_put_domain(dev_priv, domain); 602 mutex_unlock(&power_domains->lock); 603 } 604 605 static void 606 queue_async_put_domains_work(struct i915_power_domains *power_domains, 607 intel_wakeref_t wakeref) 608 { 609 struct drm_i915_private *i915 = container_of(power_domains, 610 struct drm_i915_private, 611 display.power.domains); 612 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 613 power_domains->async_put_wakeref = wakeref; 614 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, 615 &power_domains->async_put_work, 616 msecs_to_jiffies(100))); 617 } 618 619 static void 620 release_async_put_domains(struct i915_power_domains *power_domains, 621 struct intel_power_domain_mask *mask) 622 { 623 struct drm_i915_private *dev_priv = 624 container_of(power_domains, struct drm_i915_private, 625 display.power.domains); 626 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 627 enum intel_display_power_domain domain; 628 intel_wakeref_t wakeref; 629 630 /* 631 * The caller must hold already raw wakeref, upgrade that to a proper 632 * wakeref to make the state checker happy about the HW access during 633 * power well disabling. 634 */ 635 assert_rpm_raw_wakeref_held(rpm); 636 wakeref = intel_runtime_pm_get(rpm); 637 638 for_each_power_domain(domain, mask) { 639 /* Clear before put, so put's sanity check is happy. */ 640 async_put_domains_clear_domain(power_domains, domain); 641 __intel_display_power_put_domain(dev_priv, domain); 642 } 643 644 intel_runtime_pm_put(rpm, wakeref); 645 } 646 647 static void 648 intel_display_power_put_async_work(struct work_struct *work) 649 { 650 struct drm_i915_private *dev_priv = 651 container_of(work, struct drm_i915_private, 652 display.power.domains.async_put_work.work); 653 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 654 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 655 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 656 intel_wakeref_t old_work_wakeref = 0; 657 658 mutex_lock(&power_domains->lock); 659 660 /* 661 * Bail out if all the domain refs pending to be released were grabbed 662 * by subsequent gets or a flush_work. 663 */ 664 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 665 if (!old_work_wakeref) 666 goto out_verify; 667 668 release_async_put_domains(power_domains, 669 &power_domains->async_put_domains[0]); 670 671 /* Requeue the work if more domains were async put meanwhile. */ 672 if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) { 673 bitmap_copy(power_domains->async_put_domains[0].bits, 674 power_domains->async_put_domains[1].bits, 675 POWER_DOMAIN_NUM); 676 bitmap_zero(power_domains->async_put_domains[1].bits, 677 POWER_DOMAIN_NUM); 678 queue_async_put_domains_work(power_domains, 679 fetch_and_zero(&new_work_wakeref)); 680 } else { 681 /* 682 * Cancel the work that got queued after this one got dequeued, 683 * since here we released the corresponding async-put reference. 684 */ 685 cancel_delayed_work(&power_domains->async_put_work); 686 } 687 688 out_verify: 689 verify_async_put_domains_state(power_domains); 690 691 mutex_unlock(&power_domains->lock); 692 693 if (old_work_wakeref) 694 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 695 if (new_work_wakeref) 696 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 697 } 698 699 /** 700 * intel_display_power_put_async - release a power domain reference asynchronously 701 * @i915: i915 device instance 702 * @domain: power domain to reference 703 * @wakeref: wakeref acquired for the reference that is being released 704 * 705 * This function drops the power domain reference obtained by 706 * intel_display_power_get*() and schedules a work to power down the 707 * corresponding hardware block if this is the last reference. 708 */ 709 void __intel_display_power_put_async(struct drm_i915_private *i915, 710 enum intel_display_power_domain domain, 711 intel_wakeref_t wakeref) 712 { 713 struct i915_power_domains *power_domains = &i915->display.power.domains; 714 struct intel_runtime_pm *rpm = &i915->runtime_pm; 715 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 716 717 mutex_lock(&power_domains->lock); 718 719 if (power_domains->domain_use_count[domain] > 1) { 720 __intel_display_power_put_domain(i915, domain); 721 722 goto out_verify; 723 } 724 725 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); 726 727 /* Let a pending work requeue itself or queue a new one. */ 728 if (power_domains->async_put_wakeref) { 729 set_bit(domain, power_domains->async_put_domains[1].bits); 730 } else { 731 set_bit(domain, power_domains->async_put_domains[0].bits); 732 queue_async_put_domains_work(power_domains, 733 fetch_and_zero(&work_wakeref)); 734 } 735 736 out_verify: 737 verify_async_put_domains_state(power_domains); 738 739 mutex_unlock(&power_domains->lock); 740 741 if (work_wakeref) 742 intel_runtime_pm_put_raw(rpm, work_wakeref); 743 744 intel_runtime_pm_put(rpm, wakeref); 745 } 746 747 /** 748 * intel_display_power_flush_work - flushes the async display power disabling work 749 * @i915: i915 device instance 750 * 751 * Flushes any pending work that was scheduled by a preceding 752 * intel_display_power_put_async() call, completing the disabling of the 753 * corresponding power domains. 754 * 755 * Note that the work handler function may still be running after this 756 * function returns; to ensure that the work handler isn't running use 757 * intel_display_power_flush_work_sync() instead. 758 */ 759 void intel_display_power_flush_work(struct drm_i915_private *i915) 760 { 761 struct i915_power_domains *power_domains = &i915->display.power.domains; 762 struct intel_power_domain_mask async_put_mask; 763 intel_wakeref_t work_wakeref; 764 765 mutex_lock(&power_domains->lock); 766 767 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 768 if (!work_wakeref) 769 goto out_verify; 770 771 async_put_domains_mask(power_domains, &async_put_mask); 772 release_async_put_domains(power_domains, &async_put_mask); 773 cancel_delayed_work(&power_domains->async_put_work); 774 775 out_verify: 776 verify_async_put_domains_state(power_domains); 777 778 mutex_unlock(&power_domains->lock); 779 780 if (work_wakeref) 781 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 782 } 783 784 /** 785 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 786 * @i915: i915 device instance 787 * 788 * Like intel_display_power_flush_work(), but also ensure that the work 789 * handler function is not running any more when this function returns. 790 */ 791 static void 792 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 793 { 794 struct i915_power_domains *power_domains = &i915->display.power.domains; 795 796 intel_display_power_flush_work(i915); 797 cancel_delayed_work_sync(&power_domains->async_put_work); 798 799 verify_async_put_domains_state(power_domains); 800 801 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 802 } 803 804 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 805 /** 806 * intel_display_power_put - release a power domain reference 807 * @dev_priv: i915 device instance 808 * @domain: power domain to reference 809 * @wakeref: wakeref acquired for the reference that is being released 810 * 811 * This function drops the power domain reference obtained by 812 * intel_display_power_get() and might power down the corresponding hardware 813 * block right away if this is the last reference. 814 */ 815 void intel_display_power_put(struct drm_i915_private *dev_priv, 816 enum intel_display_power_domain domain, 817 intel_wakeref_t wakeref) 818 { 819 __intel_display_power_put(dev_priv, domain); 820 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 821 } 822 #else 823 /** 824 * intel_display_power_put_unchecked - release an unchecked power domain reference 825 * @dev_priv: i915 device instance 826 * @domain: power domain to reference 827 * 828 * This function drops the power domain reference obtained by 829 * intel_display_power_get() and might power down the corresponding hardware 830 * block right away if this is the last reference. 831 * 832 * This function is only for the power domain code's internal use to suppress wakeref 833 * tracking when the correspondig debug kconfig option is disabled, should not 834 * be used otherwise. 835 */ 836 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 837 enum intel_display_power_domain domain) 838 { 839 __intel_display_power_put(dev_priv, domain); 840 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 841 } 842 #endif 843 844 void 845 intel_display_power_get_in_set(struct drm_i915_private *i915, 846 struct intel_display_power_domain_set *power_domain_set, 847 enum intel_display_power_domain domain) 848 { 849 intel_wakeref_t __maybe_unused wf; 850 851 drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); 852 853 wf = intel_display_power_get(i915, domain); 854 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 855 power_domain_set->wakerefs[domain] = wf; 856 #endif 857 set_bit(domain, power_domain_set->mask.bits); 858 } 859 860 bool 861 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, 862 struct intel_display_power_domain_set *power_domain_set, 863 enum intel_display_power_domain domain) 864 { 865 intel_wakeref_t wf; 866 867 drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); 868 869 wf = intel_display_power_get_if_enabled(i915, domain); 870 if (!wf) 871 return false; 872 873 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 874 power_domain_set->wakerefs[domain] = wf; 875 #endif 876 set_bit(domain, power_domain_set->mask.bits); 877 878 return true; 879 } 880 881 void 882 intel_display_power_put_mask_in_set(struct drm_i915_private *i915, 883 struct intel_display_power_domain_set *power_domain_set, 884 struct intel_power_domain_mask *mask) 885 { 886 enum intel_display_power_domain domain; 887 888 drm_WARN_ON(&i915->drm, 889 !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM)); 890 891 for_each_power_domain(domain, mask) { 892 intel_wakeref_t __maybe_unused wf = -1; 893 894 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 895 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); 896 #endif 897 intel_display_power_put(i915, domain, wf); 898 clear_bit(domain, power_domain_set->mask.bits); 899 } 900 } 901 902 static int 903 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 904 int disable_power_well) 905 { 906 if (disable_power_well >= 0) 907 return !!disable_power_well; 908 909 return 1; 910 } 911 912 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 913 int enable_dc) 914 { 915 u32 mask; 916 int requested_dc; 917 int max_dc; 918 919 if (!HAS_DISPLAY(dev_priv)) 920 return 0; 921 922 if (IS_DG2(dev_priv)) 923 max_dc = 1; 924 else if (IS_DG1(dev_priv)) 925 max_dc = 3; 926 else if (DISPLAY_VER(dev_priv) >= 12) 927 max_dc = 4; 928 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 929 max_dc = 1; 930 else if (DISPLAY_VER(dev_priv) >= 9) 931 max_dc = 2; 932 else 933 max_dc = 0; 934 935 /* 936 * DC9 has a separate HW flow from the rest of the DC states, 937 * not depending on the DMC firmware. It's needed by system 938 * suspend/resume, so allow it unconditionally. 939 */ 940 mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || 941 DISPLAY_VER(dev_priv) >= 11 ? 942 DC_STATE_EN_DC9 : 0; 943 944 if (!dev_priv->params.disable_power_well) 945 max_dc = 0; 946 947 if (enable_dc >= 0 && enable_dc <= max_dc) { 948 requested_dc = enable_dc; 949 } else if (enable_dc == -1) { 950 requested_dc = max_dc; 951 } else if (enable_dc > max_dc && enable_dc <= 4) { 952 drm_dbg_kms(&dev_priv->drm, 953 "Adjusting requested max DC state (%d->%d)\n", 954 enable_dc, max_dc); 955 requested_dc = max_dc; 956 } else { 957 drm_err(&dev_priv->drm, 958 "Unexpected value for enable_dc (%d)\n", enable_dc); 959 requested_dc = max_dc; 960 } 961 962 switch (requested_dc) { 963 case 4: 964 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 965 break; 966 case 3: 967 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 968 break; 969 case 2: 970 mask |= DC_STATE_EN_UPTO_DC6; 971 break; 972 case 1: 973 mask |= DC_STATE_EN_UPTO_DC5; 974 break; 975 } 976 977 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); 978 979 return mask; 980 } 981 982 /** 983 * intel_power_domains_init - initializes the power domain structures 984 * @dev_priv: i915 device instance 985 * 986 * Initializes the power domain structures for @dev_priv depending upon the 987 * supported platform. 988 */ 989 int intel_power_domains_init(struct drm_i915_private *dev_priv) 990 { 991 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 992 993 dev_priv->params.disable_power_well = 994 sanitize_disable_power_well_option(dev_priv, 995 dev_priv->params.disable_power_well); 996 power_domains->allowed_dc_mask = 997 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); 998 999 power_domains->target_dc_state = 1000 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 1001 1002 mutex_init(&power_domains->lock); 1003 1004 INIT_DELAYED_WORK(&power_domains->async_put_work, 1005 intel_display_power_put_async_work); 1006 1007 return intel_display_power_map_init(power_domains); 1008 } 1009 1010 /** 1011 * intel_power_domains_cleanup - clean up power domains resources 1012 * @dev_priv: i915 device instance 1013 * 1014 * Release any resources acquired by intel_power_domains_init() 1015 */ 1016 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 1017 { 1018 intel_display_power_map_cleanup(&dev_priv->display.power.domains); 1019 } 1020 1021 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 1022 { 1023 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1024 struct i915_power_well *power_well; 1025 1026 mutex_lock(&power_domains->lock); 1027 for_each_power_well(dev_priv, power_well) 1028 intel_power_well_sync_hw(dev_priv, power_well); 1029 mutex_unlock(&power_domains->lock); 1030 } 1031 1032 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, 1033 enum dbuf_slice slice, bool enable) 1034 { 1035 i915_reg_t reg = DBUF_CTL_S(slice); 1036 bool state; 1037 1038 intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST, 1039 enable ? DBUF_POWER_REQUEST : 0); 1040 intel_de_posting_read(dev_priv, reg); 1041 udelay(10); 1042 1043 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; 1044 drm_WARN(&dev_priv->drm, enable != state, 1045 "DBuf slice %d power %s timeout!\n", 1046 slice, str_enable_disable(enable)); 1047 } 1048 1049 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, 1050 u8 req_slices) 1051 { 1052 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1053 u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask; 1054 enum dbuf_slice slice; 1055 1056 drm_WARN(&dev_priv->drm, req_slices & ~slice_mask, 1057 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n", 1058 req_slices, slice_mask); 1059 1060 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", 1061 req_slices); 1062 1063 /* 1064 * Might be running this in parallel to gen9_dc_off_power_well_enable 1065 * being called from intel_dp_detect for instance, 1066 * which causes assertion triggered by race condition, 1067 * as gen9_assert_dbuf_enabled might preempt this when registers 1068 * were already updated, while dev_priv was not. 1069 */ 1070 mutex_lock(&power_domains->lock); 1071 1072 for_each_dbuf_slice(dev_priv, slice) 1073 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); 1074 1075 dev_priv->display.dbuf.enabled_slices = req_slices; 1076 1077 mutex_unlock(&power_domains->lock); 1078 } 1079 1080 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 1081 { 1082 dev_priv->display.dbuf.enabled_slices = 1083 intel_enabled_dbuf_slices_mask(dev_priv); 1084 1085 /* 1086 * Just power up at least 1 slice, we will 1087 * figure out later which slices we have and what we need. 1088 */ 1089 gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | 1090 dev_priv->display.dbuf.enabled_slices); 1091 } 1092 1093 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 1094 { 1095 gen9_dbuf_slices_update(dev_priv, 0); 1096 } 1097 1098 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) 1099 { 1100 enum dbuf_slice slice; 1101 1102 if (IS_ALDERLAKE_P(dev_priv)) 1103 return; 1104 1105 for_each_dbuf_slice(dev_priv, slice) 1106 intel_de_rmw(dev_priv, DBUF_CTL_S(slice), 1107 DBUF_TRACKER_STATE_SERVICE_MASK, 1108 DBUF_TRACKER_STATE_SERVICE(8)); 1109 } 1110 1111 static void icl_mbus_init(struct drm_i915_private *dev_priv) 1112 { 1113 unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask; 1114 u32 mask, val, i; 1115 1116 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) 1117 return; 1118 1119 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | 1120 MBUS_ABOX_BT_CREDIT_POOL2_MASK | 1121 MBUS_ABOX_B_CREDIT_MASK | 1122 MBUS_ABOX_BW_CREDIT_MASK; 1123 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 1124 MBUS_ABOX_BT_CREDIT_POOL2(16) | 1125 MBUS_ABOX_B_CREDIT(1) | 1126 MBUS_ABOX_BW_CREDIT(1); 1127 1128 /* 1129 * gen12 platforms that use abox1 and abox2 for pixel data reads still 1130 * expect us to program the abox_ctl0 register as well, even though 1131 * we don't have to program other instance-0 registers like BW_BUDDY. 1132 */ 1133 if (DISPLAY_VER(dev_priv) == 12) 1134 abox_regs |= BIT(0); 1135 1136 for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) 1137 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val); 1138 } 1139 1140 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 1141 { 1142 u32 val = intel_de_read(dev_priv, LCPLL_CTL); 1143 1144 /* 1145 * The LCPLL register should be turned on by the BIOS. For now 1146 * let's just check its state and print errors in case 1147 * something is wrong. Don't even try to turn it on. 1148 */ 1149 1150 if (val & LCPLL_CD_SOURCE_FCLK) 1151 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); 1152 1153 if (val & LCPLL_PLL_DISABLE) 1154 drm_err(&dev_priv->drm, "LCPLL is disabled\n"); 1155 1156 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 1157 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); 1158 } 1159 1160 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 1161 { 1162 struct intel_crtc *crtc; 1163 1164 for_each_intel_crtc(&dev_priv->drm, crtc) 1165 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 1166 pipe_name(crtc->pipe)); 1167 1168 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), 1169 "Display power well on\n"); 1170 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, 1171 "SPLL enabled\n"); 1172 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 1173 "WRPLL1 enabled\n"); 1174 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 1175 "WRPLL2 enabled\n"); 1176 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, 1177 "Panel power on\n"); 1178 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 1179 "CPU PWM1 enabled\n"); 1180 if (IS_HASWELL(dev_priv)) 1181 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 1182 "CPU PWM2 enabled\n"); 1183 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 1184 "PCH PWM1 enabled\n"); 1185 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 1186 "Utility pin enabled\n"); 1187 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, 1188 "PCH GTC enabled\n"); 1189 1190 /* 1191 * In theory we can still leave IRQs enabled, as long as only the HPD 1192 * interrupts remain enabled. We used to check for that, but since it's 1193 * gen-specific and since we only disable LCPLL after we fully disable 1194 * the interrupts, the check below should be enough. 1195 */ 1196 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 1197 } 1198 1199 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 1200 { 1201 if (IS_HASWELL(dev_priv)) 1202 return intel_de_read(dev_priv, D_COMP_HSW); 1203 else 1204 return intel_de_read(dev_priv, D_COMP_BDW); 1205 } 1206 1207 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 1208 { 1209 if (IS_HASWELL(dev_priv)) { 1210 if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val)) 1211 drm_dbg_kms(&dev_priv->drm, 1212 "Failed to write to D_COMP\n"); 1213 } else { 1214 intel_de_write(dev_priv, D_COMP_BDW, val); 1215 intel_de_posting_read(dev_priv, D_COMP_BDW); 1216 } 1217 } 1218 1219 /* 1220 * This function implements pieces of two sequences from BSpec: 1221 * - Sequence for display software to disable LCPLL 1222 * - Sequence for display software to allow package C8+ 1223 * The steps implemented here are just the steps that actually touch the LCPLL 1224 * register. Callers should take care of disabling all the display engine 1225 * functions, doing the mode unset, fixing interrupts, etc. 1226 */ 1227 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 1228 bool switch_to_fclk, bool allow_power_down) 1229 { 1230 u32 val; 1231 1232 assert_can_disable_lcpll(dev_priv); 1233 1234 val = intel_de_read(dev_priv, LCPLL_CTL); 1235 1236 if (switch_to_fclk) { 1237 val |= LCPLL_CD_SOURCE_FCLK; 1238 intel_de_write(dev_priv, LCPLL_CTL, val); 1239 1240 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & 1241 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 1242 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); 1243 1244 val = intel_de_read(dev_priv, LCPLL_CTL); 1245 } 1246 1247 val |= LCPLL_PLL_DISABLE; 1248 intel_de_write(dev_priv, LCPLL_CTL, val); 1249 intel_de_posting_read(dev_priv, LCPLL_CTL); 1250 1251 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 1252 drm_err(&dev_priv->drm, "LCPLL still locked\n"); 1253 1254 val = hsw_read_dcomp(dev_priv); 1255 val |= D_COMP_COMP_DISABLE; 1256 hsw_write_dcomp(dev_priv, val); 1257 ndelay(100); 1258 1259 if (wait_for((hsw_read_dcomp(dev_priv) & 1260 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 1261 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); 1262 1263 if (allow_power_down) { 1264 intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW); 1265 intel_de_posting_read(dev_priv, LCPLL_CTL); 1266 } 1267 } 1268 1269 /* 1270 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 1271 * source. 1272 */ 1273 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 1274 { 1275 u32 val; 1276 1277 val = intel_de_read(dev_priv, LCPLL_CTL); 1278 1279 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 1280 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 1281 return; 1282 1283 /* 1284 * Make sure we're not on PC8 state before disabling PC8, otherwise 1285 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 1286 */ 1287 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 1288 1289 if (val & LCPLL_POWER_DOWN_ALLOW) { 1290 val &= ~LCPLL_POWER_DOWN_ALLOW; 1291 intel_de_write(dev_priv, LCPLL_CTL, val); 1292 intel_de_posting_read(dev_priv, LCPLL_CTL); 1293 } 1294 1295 val = hsw_read_dcomp(dev_priv); 1296 val |= D_COMP_COMP_FORCE; 1297 val &= ~D_COMP_COMP_DISABLE; 1298 hsw_write_dcomp(dev_priv, val); 1299 1300 val = intel_de_read(dev_priv, LCPLL_CTL); 1301 val &= ~LCPLL_PLL_DISABLE; 1302 intel_de_write(dev_priv, LCPLL_CTL, val); 1303 1304 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 1305 drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); 1306 1307 if (val & LCPLL_CD_SOURCE_FCLK) { 1308 intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0); 1309 1310 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & 1311 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 1312 drm_err(&dev_priv->drm, 1313 "Switching back to LCPLL failed\n"); 1314 } 1315 1316 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 1317 1318 intel_update_cdclk(dev_priv); 1319 intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); 1320 } 1321 1322 /* 1323 * Package states C8 and deeper are really deep PC states that can only be 1324 * reached when all the devices on the system allow it, so even if the graphics 1325 * device allows PC8+, it doesn't mean the system will actually get to these 1326 * states. Our driver only allows PC8+ when going into runtime PM. 1327 * 1328 * The requirements for PC8+ are that all the outputs are disabled, the power 1329 * well is disabled and most interrupts are disabled, and these are also 1330 * requirements for runtime PM. When these conditions are met, we manually do 1331 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 1332 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 1333 * hang the machine. 1334 * 1335 * When we really reach PC8 or deeper states (not just when we allow it) we lose 1336 * the state of some registers, so when we come back from PC8+ we need to 1337 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 1338 * need to take care of the registers kept by RC6. Notice that this happens even 1339 * if we don't put the device in PCI D3 state (which is what currently happens 1340 * because of the runtime PM support). 1341 * 1342 * For more, read "Display Sequences for Package C8" on the hardware 1343 * documentation. 1344 */ 1345 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 1346 { 1347 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); 1348 1349 if (HAS_PCH_LPT_LP(dev_priv)) 1350 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 1351 PCH_LP_PARTITION_LEVEL_DISABLE, 0); 1352 1353 lpt_disable_clkout_dp(dev_priv); 1354 hsw_disable_lcpll(dev_priv, true, true); 1355 } 1356 1357 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 1358 { 1359 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); 1360 1361 hsw_restore_lcpll(dev_priv); 1362 intel_init_pch_refclk(dev_priv); 1363 1364 if (HAS_PCH_LPT_LP(dev_priv)) 1365 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 1366 0, PCH_LP_PARTITION_LEVEL_DISABLE); 1367 } 1368 1369 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 1370 bool enable) 1371 { 1372 i915_reg_t reg; 1373 u32 reset_bits; 1374 1375 if (IS_IVYBRIDGE(dev_priv)) { 1376 reg = GEN7_MSG_CTL; 1377 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 1378 } else { 1379 reg = HSW_NDE_RSTWRN_OPT; 1380 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 1381 } 1382 1383 if (DISPLAY_VER(dev_priv) >= 14) 1384 reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN; 1385 1386 intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0); 1387 } 1388 1389 static void skl_display_core_init(struct drm_i915_private *dev_priv, 1390 bool resume) 1391 { 1392 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1393 struct i915_power_well *well; 1394 1395 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1396 1397 /* enable PCH reset handshake */ 1398 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 1399 1400 if (!HAS_DISPLAY(dev_priv)) 1401 return; 1402 1403 /* enable PG1 and Misc I/O */ 1404 mutex_lock(&power_domains->lock); 1405 1406 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1407 intel_power_well_enable(dev_priv, well); 1408 1409 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 1410 intel_power_well_enable(dev_priv, well); 1411 1412 mutex_unlock(&power_domains->lock); 1413 1414 intel_cdclk_init_hw(dev_priv); 1415 1416 gen9_dbuf_enable(dev_priv); 1417 1418 if (resume) 1419 intel_dmc_load_program(dev_priv); 1420 } 1421 1422 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 1423 { 1424 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1425 struct i915_power_well *well; 1426 1427 if (!HAS_DISPLAY(dev_priv)) 1428 return; 1429 1430 gen9_disable_dc_states(dev_priv); 1431 /* TODO: disable DMC program */ 1432 1433 gen9_dbuf_disable(dev_priv); 1434 1435 intel_cdclk_uninit_hw(dev_priv); 1436 1437 /* The spec doesn't call for removing the reset handshake flag */ 1438 /* disable PG1 and Misc I/O */ 1439 1440 mutex_lock(&power_domains->lock); 1441 1442 /* 1443 * BSpec says to keep the MISC IO power well enabled here, only 1444 * remove our request for power well 1. 1445 * Note that even though the driver's request is removed power well 1 1446 * may stay enabled after this due to DMC's own request on it. 1447 */ 1448 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1449 intel_power_well_disable(dev_priv, well); 1450 1451 mutex_unlock(&power_domains->lock); 1452 1453 usleep_range(10, 30); /* 10 us delay per Bspec */ 1454 } 1455 1456 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 1457 { 1458 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1459 struct i915_power_well *well; 1460 1461 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1462 1463 /* 1464 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 1465 * or else the reset will hang because there is no PCH to respond. 1466 * Move the handshake programming to initialization sequence. 1467 * Previously was left up to BIOS. 1468 */ 1469 intel_pch_reset_handshake(dev_priv, false); 1470 1471 if (!HAS_DISPLAY(dev_priv)) 1472 return; 1473 1474 /* Enable PG1 */ 1475 mutex_lock(&power_domains->lock); 1476 1477 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1478 intel_power_well_enable(dev_priv, well); 1479 1480 mutex_unlock(&power_domains->lock); 1481 1482 intel_cdclk_init_hw(dev_priv); 1483 1484 gen9_dbuf_enable(dev_priv); 1485 1486 if (resume) 1487 intel_dmc_load_program(dev_priv); 1488 } 1489 1490 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 1491 { 1492 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1493 struct i915_power_well *well; 1494 1495 if (!HAS_DISPLAY(dev_priv)) 1496 return; 1497 1498 gen9_disable_dc_states(dev_priv); 1499 /* TODO: disable DMC program */ 1500 1501 gen9_dbuf_disable(dev_priv); 1502 1503 intel_cdclk_uninit_hw(dev_priv); 1504 1505 /* The spec doesn't call for removing the reset handshake flag */ 1506 1507 /* 1508 * Disable PW1 (PG1). 1509 * Note that even though the driver's request is removed power well 1 1510 * may stay enabled after this due to DMC's own request on it. 1511 */ 1512 mutex_lock(&power_domains->lock); 1513 1514 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1515 intel_power_well_disable(dev_priv, well); 1516 1517 mutex_unlock(&power_domains->lock); 1518 1519 usleep_range(10, 30); /* 10 us delay per Bspec */ 1520 } 1521 1522 struct buddy_page_mask { 1523 u32 page_mask; 1524 u8 type; 1525 u8 num_channels; 1526 }; 1527 1528 static const struct buddy_page_mask tgl_buddy_page_masks[] = { 1529 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, 1530 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF }, 1531 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, 1532 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C }, 1533 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, 1534 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E }, 1535 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, 1536 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 }, 1537 {} 1538 }; 1539 1540 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { 1541 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, 1542 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, 1543 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 }, 1544 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 }, 1545 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, 1546 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, 1547 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 }, 1548 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 }, 1549 {} 1550 }; 1551 1552 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) 1553 { 1554 enum intel_dram_type type = dev_priv->dram_info.type; 1555 u8 num_channels = dev_priv->dram_info.num_channels; 1556 const struct buddy_page_mask *table; 1557 unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask; 1558 int config, i; 1559 1560 /* BW_BUDDY registers are not used on dgpu's beyond DG1 */ 1561 if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv)) 1562 return; 1563 1564 if (IS_ALDERLAKE_S(dev_priv) || 1565 IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) 1566 /* Wa_1409767108 */ 1567 table = wa_1409767108_buddy_page_masks; 1568 else 1569 table = tgl_buddy_page_masks; 1570 1571 for (config = 0; table[config].page_mask != 0; config++) 1572 if (table[config].num_channels == num_channels && 1573 table[config].type == type) 1574 break; 1575 1576 if (table[config].page_mask == 0) { 1577 drm_dbg(&dev_priv->drm, 1578 "Unknown memory configuration; disabling address buddy logic.\n"); 1579 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) 1580 intel_de_write(dev_priv, BW_BUDDY_CTL(i), 1581 BW_BUDDY_DISABLE); 1582 } else { 1583 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { 1584 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i), 1585 table[config].page_mask); 1586 1587 /* Wa_22010178259:tgl,dg1,rkl,adl-s */ 1588 if (DISPLAY_VER(dev_priv) == 12) 1589 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i), 1590 BW_BUDDY_TLB_REQ_TIMER_MASK, 1591 BW_BUDDY_TLB_REQ_TIMER(0x8)); 1592 } 1593 } 1594 } 1595 1596 static void icl_display_core_init(struct drm_i915_private *dev_priv, 1597 bool resume) 1598 { 1599 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1600 struct i915_power_well *well; 1601 1602 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1603 1604 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ 1605 if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && 1606 INTEL_PCH_TYPE(dev_priv) < PCH_DG1) 1607 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0, 1608 PCH_DPMGUNIT_CLOCK_GATE_DISABLE); 1609 1610 /* 1. Enable PCH reset handshake. */ 1611 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 1612 1613 if (!HAS_DISPLAY(dev_priv)) 1614 return; 1615 1616 /* 2. Initialize all combo phys */ 1617 intel_combo_phy_init(dev_priv); 1618 1619 /* 1620 * 3. Enable Power Well 1 (PG1). 1621 * The AUX IO power wells will be enabled on demand. 1622 */ 1623 mutex_lock(&power_domains->lock); 1624 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1625 intel_power_well_enable(dev_priv, well); 1626 mutex_unlock(&power_domains->lock); 1627 1628 if (DISPLAY_VER(dev_priv) == 14) 1629 intel_de_rmw(dev_priv, DC_STATE_EN, 1630 HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0); 1631 1632 /* 4. Enable CDCLK. */ 1633 intel_cdclk_init_hw(dev_priv); 1634 1635 if (DISPLAY_VER(dev_priv) >= 12) 1636 gen12_dbuf_slices_config(dev_priv); 1637 1638 /* 5. Enable DBUF. */ 1639 gen9_dbuf_enable(dev_priv); 1640 1641 /* 6. Setup MBUS. */ 1642 icl_mbus_init(dev_priv); 1643 1644 /* 7. Program arbiter BW_BUDDY registers */ 1645 if (DISPLAY_VER(dev_priv) >= 12) 1646 tgl_bw_buddy_init(dev_priv); 1647 1648 /* 8. Ensure PHYs have completed calibration and adaptation */ 1649 if (IS_DG2(dev_priv)) 1650 intel_snps_phy_wait_for_calibration(dev_priv); 1651 1652 if (resume) 1653 intel_dmc_load_program(dev_priv); 1654 1655 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */ 1656 if (DISPLAY_VER(dev_priv) >= 12) 1657 intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, 1658 DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | 1659 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR); 1660 1661 /* Wa_14011503030:xelpd */ 1662 if (DISPLAY_VER(dev_priv) >= 13) 1663 intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0); 1664 } 1665 1666 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 1667 { 1668 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1669 struct i915_power_well *well; 1670 1671 if (!HAS_DISPLAY(dev_priv)) 1672 return; 1673 1674 gen9_disable_dc_states(dev_priv); 1675 intel_dmc_disable_program(dev_priv); 1676 1677 /* 1. Disable all display engine functions -> aready done */ 1678 1679 /* 2. Disable DBUF */ 1680 gen9_dbuf_disable(dev_priv); 1681 1682 /* 3. Disable CD clock */ 1683 intel_cdclk_uninit_hw(dev_priv); 1684 1685 if (DISPLAY_VER(dev_priv) == 14) 1686 intel_de_rmw(dev_priv, DC_STATE_EN, 0, 1687 HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH); 1688 1689 /* 1690 * 4. Disable Power Well 1 (PG1). 1691 * The AUX IO power wells are toggled on demand, so they are already 1692 * disabled at this point. 1693 */ 1694 mutex_lock(&power_domains->lock); 1695 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1696 intel_power_well_disable(dev_priv, well); 1697 mutex_unlock(&power_domains->lock); 1698 1699 /* 5. */ 1700 intel_combo_phy_uninit(dev_priv); 1701 } 1702 1703 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 1704 { 1705 struct i915_power_well *cmn_bc = 1706 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1707 struct i915_power_well *cmn_d = 1708 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1709 1710 /* 1711 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 1712 * workaround never ever read DISPLAY_PHY_CONTROL, and 1713 * instead maintain a shadow copy ourselves. Use the actual 1714 * power well state and lane status to reconstruct the 1715 * expected initial value. 1716 */ 1717 dev_priv->display.power.chv_phy_control = 1718 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 1719 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 1720 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 1721 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 1722 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 1723 1724 /* 1725 * If all lanes are disabled we leave the override disabled 1726 * with all power down bits cleared to match the state we 1727 * would use after disabling the port. Otherwise enable the 1728 * override and set the lane powerdown bits accding to the 1729 * current lane status. 1730 */ 1731 if (intel_power_well_is_enabled(dev_priv, cmn_bc)) { 1732 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)); 1733 unsigned int mask; 1734 1735 mask = status & DPLL_PORTB_READY_MASK; 1736 if (mask == 0xf) 1737 mask = 0x0; 1738 else 1739 dev_priv->display.power.chv_phy_control |= 1740 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 1741 1742 dev_priv->display.power.chv_phy_control |= 1743 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 1744 1745 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 1746 if (mask == 0xf) 1747 mask = 0x0; 1748 else 1749 dev_priv->display.power.chv_phy_control |= 1750 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 1751 1752 dev_priv->display.power.chv_phy_control |= 1753 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 1754 1755 dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 1756 1757 dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false; 1758 } else { 1759 dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true; 1760 } 1761 1762 if (intel_power_well_is_enabled(dev_priv, cmn_d)) { 1763 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS); 1764 unsigned int mask; 1765 1766 mask = status & DPLL_PORTD_READY_MASK; 1767 1768 if (mask == 0xf) 1769 mask = 0x0; 1770 else 1771 dev_priv->display.power.chv_phy_control |= 1772 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 1773 1774 dev_priv->display.power.chv_phy_control |= 1775 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 1776 1777 dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 1778 1779 dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false; 1780 } else { 1781 dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true; 1782 } 1783 1784 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", 1785 dev_priv->display.power.chv_phy_control); 1786 1787 /* Defer application of initial phy_control to enabling the powerwell */ 1788 } 1789 1790 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 1791 { 1792 struct i915_power_well *cmn = 1793 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1794 struct i915_power_well *disp2d = 1795 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 1796 1797 /* If the display might be already active skip this */ 1798 if (intel_power_well_is_enabled(dev_priv, cmn) && 1799 intel_power_well_is_enabled(dev_priv, disp2d) && 1800 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST) 1801 return; 1802 1803 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); 1804 1805 /* cmnlane needs DPLL registers */ 1806 intel_power_well_enable(dev_priv, disp2d); 1807 1808 /* 1809 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 1810 * Need to assert and de-assert PHY SB reset by gating the 1811 * common lane power, then un-gating it. 1812 * Simply ungating isn't enough to reset the PHY enough to get 1813 * ports and lanes running. 1814 */ 1815 intel_power_well_disable(dev_priv, cmn); 1816 } 1817 1818 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 1819 { 1820 bool ret; 1821 1822 vlv_punit_get(dev_priv); 1823 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 1824 vlv_punit_put(dev_priv); 1825 1826 return ret; 1827 } 1828 1829 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 1830 { 1831 drm_WARN(&dev_priv->drm, 1832 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 1833 "VED not power gated\n"); 1834 } 1835 1836 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 1837 { 1838 static const struct pci_device_id isp_ids[] = { 1839 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 1840 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 1841 {} 1842 }; 1843 1844 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && 1845 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 1846 "ISP not power gated\n"); 1847 } 1848 1849 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 1850 1851 /** 1852 * intel_power_domains_init_hw - initialize hardware power domain state 1853 * @i915: i915 device instance 1854 * @resume: Called from resume code paths or not 1855 * 1856 * This function initializes the hardware power domain state and enables all 1857 * power wells belonging to the INIT power domain. Power wells in other 1858 * domains (and not in the INIT domain) are referenced or disabled by 1859 * intel_modeset_readout_hw_state(). After that the reference count of each 1860 * power well must match its HW enabled state, see 1861 * intel_power_domains_verify_state(). 1862 * 1863 * It will return with power domains disabled (to be enabled later by 1864 * intel_power_domains_enable()) and must be paired with 1865 * intel_power_domains_driver_remove(). 1866 */ 1867 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 1868 { 1869 struct i915_power_domains *power_domains = &i915->display.power.domains; 1870 1871 power_domains->initializing = true; 1872 1873 if (DISPLAY_VER(i915) >= 11) { 1874 icl_display_core_init(i915, resume); 1875 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 1876 bxt_display_core_init(i915, resume); 1877 } else if (DISPLAY_VER(i915) == 9) { 1878 skl_display_core_init(i915, resume); 1879 } else if (IS_CHERRYVIEW(i915)) { 1880 mutex_lock(&power_domains->lock); 1881 chv_phy_control_init(i915); 1882 mutex_unlock(&power_domains->lock); 1883 assert_isp_power_gated(i915); 1884 } else if (IS_VALLEYVIEW(i915)) { 1885 mutex_lock(&power_domains->lock); 1886 vlv_cmnlane_wa(i915); 1887 mutex_unlock(&power_domains->lock); 1888 assert_ved_power_gated(i915); 1889 assert_isp_power_gated(i915); 1890 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 1891 hsw_assert_cdclk(i915); 1892 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 1893 } else if (IS_IVYBRIDGE(i915)) { 1894 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 1895 } 1896 1897 /* 1898 * Keep all power wells enabled for any dependent HW access during 1899 * initialization and to make sure we keep BIOS enabled display HW 1900 * resources powered until display HW readout is complete. We drop 1901 * this reference in intel_power_domains_enable(). 1902 */ 1903 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 1904 power_domains->init_wakeref = 1905 intel_display_power_get(i915, POWER_DOMAIN_INIT); 1906 1907 /* Disable power support if the user asked so. */ 1908 if (!i915->params.disable_power_well) { 1909 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); 1910 i915->display.power.domains.disable_wakeref = intel_display_power_get(i915, 1911 POWER_DOMAIN_INIT); 1912 } 1913 intel_power_domains_sync_hw(i915); 1914 1915 power_domains->initializing = false; 1916 } 1917 1918 /** 1919 * intel_power_domains_driver_remove - deinitialize hw power domain state 1920 * @i915: i915 device instance 1921 * 1922 * De-initializes the display power domain HW state. It also ensures that the 1923 * device stays powered up so that the driver can be reloaded. 1924 * 1925 * It must be called with power domains already disabled (after a call to 1926 * intel_power_domains_disable()) and must be paired with 1927 * intel_power_domains_init_hw(). 1928 */ 1929 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 1930 { 1931 intel_wakeref_t wakeref __maybe_unused = 1932 fetch_and_zero(&i915->display.power.domains.init_wakeref); 1933 1934 /* Remove the refcount we took to keep power well support disabled. */ 1935 if (!i915->params.disable_power_well) 1936 intel_display_power_put(i915, POWER_DOMAIN_INIT, 1937 fetch_and_zero(&i915->display.power.domains.disable_wakeref)); 1938 1939 intel_display_power_flush_work_sync(i915); 1940 1941 intel_power_domains_verify_state(i915); 1942 1943 /* Keep the power well enabled, but cancel its rpm wakeref. */ 1944 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 1945 } 1946 1947 /** 1948 * intel_power_domains_sanitize_state - sanitize power domains state 1949 * @i915: i915 device instance 1950 * 1951 * Sanitize the power domains state during driver loading and system resume. 1952 * The function will disable all display power wells that BIOS has enabled 1953 * without a user for it (any user for a power well has taken a reference 1954 * on it by the time this function is called, after the state of all the 1955 * pipe, encoder, etc. HW resources have been sanitized). 1956 */ 1957 void intel_power_domains_sanitize_state(struct drm_i915_private *i915) 1958 { 1959 struct i915_power_domains *power_domains = &i915->display.power.domains; 1960 struct i915_power_well *power_well; 1961 1962 mutex_lock(&power_domains->lock); 1963 1964 for_each_power_well_reverse(i915, power_well) { 1965 if (power_well->desc->always_on || power_well->count || 1966 !intel_power_well_is_enabled(i915, power_well)) 1967 continue; 1968 1969 drm_dbg_kms(&i915->drm, 1970 "BIOS left unused %s power well enabled, disabling it\n", 1971 intel_power_well_name(power_well)); 1972 intel_power_well_disable(i915, power_well); 1973 } 1974 1975 mutex_unlock(&power_domains->lock); 1976 } 1977 1978 /** 1979 * intel_power_domains_enable - enable toggling of display power wells 1980 * @i915: i915 device instance 1981 * 1982 * Enable the ondemand enabling/disabling of the display power wells. Note that 1983 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 1984 * only at specific points of the display modeset sequence, thus they are not 1985 * affected by the intel_power_domains_enable()/disable() calls. The purpose 1986 * of these function is to keep the rest of power wells enabled until the end 1987 * of display HW readout (which will acquire the power references reflecting 1988 * the current HW state). 1989 */ 1990 void intel_power_domains_enable(struct drm_i915_private *i915) 1991 { 1992 intel_wakeref_t wakeref __maybe_unused = 1993 fetch_and_zero(&i915->display.power.domains.init_wakeref); 1994 1995 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 1996 intel_power_domains_verify_state(i915); 1997 } 1998 1999 /** 2000 * intel_power_domains_disable - disable toggling of display power wells 2001 * @i915: i915 device instance 2002 * 2003 * Disable the ondemand enabling/disabling of the display power wells. See 2004 * intel_power_domains_enable() for which power wells this call controls. 2005 */ 2006 void intel_power_domains_disable(struct drm_i915_private *i915) 2007 { 2008 struct i915_power_domains *power_domains = &i915->display.power.domains; 2009 2010 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 2011 power_domains->init_wakeref = 2012 intel_display_power_get(i915, POWER_DOMAIN_INIT); 2013 2014 intel_power_domains_verify_state(i915); 2015 } 2016 2017 /** 2018 * intel_power_domains_suspend - suspend power domain state 2019 * @i915: i915 device instance 2020 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 2021 * 2022 * This function prepares the hardware power domain state before entering 2023 * system suspend. 2024 * 2025 * It must be called with power domains already disabled (after a call to 2026 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 2027 */ 2028 void intel_power_domains_suspend(struct drm_i915_private *i915, 2029 enum i915_drm_suspend_mode suspend_mode) 2030 { 2031 struct i915_power_domains *power_domains = &i915->display.power.domains; 2032 intel_wakeref_t wakeref __maybe_unused = 2033 fetch_and_zero(&power_domains->init_wakeref); 2034 2035 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 2036 2037 /* 2038 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 2039 * support don't manually deinit the power domains. This also means the 2040 * DMC firmware will stay active, it will power down any HW 2041 * resources as required and also enable deeper system power states 2042 * that would be blocked if the firmware was inactive. 2043 */ 2044 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && 2045 suspend_mode == I915_DRM_SUSPEND_IDLE && 2046 intel_dmc_has_payload(i915)) { 2047 intel_display_power_flush_work(i915); 2048 intel_power_domains_verify_state(i915); 2049 return; 2050 } 2051 2052 /* 2053 * Even if power well support was disabled we still want to disable 2054 * power wells if power domains must be deinitialized for suspend. 2055 */ 2056 if (!i915->params.disable_power_well) 2057 intel_display_power_put(i915, POWER_DOMAIN_INIT, 2058 fetch_and_zero(&i915->display.power.domains.disable_wakeref)); 2059 2060 intel_display_power_flush_work(i915); 2061 intel_power_domains_verify_state(i915); 2062 2063 if (DISPLAY_VER(i915) >= 11) 2064 icl_display_core_uninit(i915); 2065 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) 2066 bxt_display_core_uninit(i915); 2067 else if (DISPLAY_VER(i915) == 9) 2068 skl_display_core_uninit(i915); 2069 2070 power_domains->display_core_suspended = true; 2071 } 2072 2073 /** 2074 * intel_power_domains_resume - resume power domain state 2075 * @i915: i915 device instance 2076 * 2077 * This function resume the hardware power domain state during system resume. 2078 * 2079 * It will return with power domain support disabled (to be enabled later by 2080 * intel_power_domains_enable()) and must be paired with 2081 * intel_power_domains_suspend(). 2082 */ 2083 void intel_power_domains_resume(struct drm_i915_private *i915) 2084 { 2085 struct i915_power_domains *power_domains = &i915->display.power.domains; 2086 2087 if (power_domains->display_core_suspended) { 2088 intel_power_domains_init_hw(i915, true); 2089 power_domains->display_core_suspended = false; 2090 } else { 2091 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 2092 power_domains->init_wakeref = 2093 intel_display_power_get(i915, POWER_DOMAIN_INIT); 2094 } 2095 2096 intel_power_domains_verify_state(i915); 2097 } 2098 2099 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2100 2101 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 2102 { 2103 struct i915_power_domains *power_domains = &i915->display.power.domains; 2104 struct i915_power_well *power_well; 2105 2106 for_each_power_well(i915, power_well) { 2107 enum intel_display_power_domain domain; 2108 2109 drm_dbg(&i915->drm, "%-25s %d\n", 2110 intel_power_well_name(power_well), intel_power_well_refcount(power_well)); 2111 2112 for_each_power_domain(domain, intel_power_well_domains(power_well)) 2113 drm_dbg(&i915->drm, " %-23s %d\n", 2114 intel_display_power_domain_str(domain), 2115 power_domains->domain_use_count[domain]); 2116 } 2117 } 2118 2119 /** 2120 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 2121 * @i915: i915 device instance 2122 * 2123 * Verify if the reference count of each power well matches its HW enabled 2124 * state and the total refcount of the domains it belongs to. This must be 2125 * called after modeset HW state sanitization, which is responsible for 2126 * acquiring reference counts for any power wells in use and disabling the 2127 * ones left on by BIOS but not required by any active output. 2128 */ 2129 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 2130 { 2131 struct i915_power_domains *power_domains = &i915->display.power.domains; 2132 struct i915_power_well *power_well; 2133 bool dump_domain_info; 2134 2135 mutex_lock(&power_domains->lock); 2136 2137 verify_async_put_domains_state(power_domains); 2138 2139 dump_domain_info = false; 2140 for_each_power_well(i915, power_well) { 2141 enum intel_display_power_domain domain; 2142 int domains_count; 2143 bool enabled; 2144 2145 enabled = intel_power_well_is_enabled(i915, power_well); 2146 if ((intel_power_well_refcount(power_well) || 2147 intel_power_well_is_always_on(power_well)) != 2148 enabled) 2149 drm_err(&i915->drm, 2150 "power well %s state mismatch (refcount %d/enabled %d)", 2151 intel_power_well_name(power_well), 2152 intel_power_well_refcount(power_well), enabled); 2153 2154 domains_count = 0; 2155 for_each_power_domain(domain, intel_power_well_domains(power_well)) 2156 domains_count += power_domains->domain_use_count[domain]; 2157 2158 if (intel_power_well_refcount(power_well) != domains_count) { 2159 drm_err(&i915->drm, 2160 "power well %s refcount/domain refcount mismatch " 2161 "(refcount %d/domains refcount %d)\n", 2162 intel_power_well_name(power_well), 2163 intel_power_well_refcount(power_well), 2164 domains_count); 2165 dump_domain_info = true; 2166 } 2167 } 2168 2169 if (dump_domain_info) { 2170 static bool dumped; 2171 2172 if (!dumped) { 2173 intel_power_domains_dump_info(i915); 2174 dumped = true; 2175 } 2176 } 2177 2178 mutex_unlock(&power_domains->lock); 2179 } 2180 2181 #else 2182 2183 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 2184 { 2185 } 2186 2187 #endif 2188 2189 void intel_display_power_suspend_late(struct drm_i915_private *i915) 2190 { 2191 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 2192 IS_BROXTON(i915)) { 2193 bxt_enable_dc9(i915); 2194 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2195 hsw_enable_pc8(i915); 2196 } 2197 2198 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 2199 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 2200 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); 2201 } 2202 2203 void intel_display_power_resume_early(struct drm_i915_private *i915) 2204 { 2205 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 2206 IS_BROXTON(i915)) { 2207 gen9_sanitize_dc_state(i915); 2208 bxt_disable_dc9(i915); 2209 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2210 hsw_disable_pc8(i915); 2211 } 2212 2213 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 2214 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 2215 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); 2216 } 2217 2218 void intel_display_power_suspend(struct drm_i915_private *i915) 2219 { 2220 if (DISPLAY_VER(i915) >= 11) { 2221 icl_display_core_uninit(i915); 2222 bxt_enable_dc9(i915); 2223 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 2224 bxt_display_core_uninit(i915); 2225 bxt_enable_dc9(i915); 2226 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2227 hsw_enable_pc8(i915); 2228 } 2229 } 2230 2231 void intel_display_power_resume(struct drm_i915_private *i915) 2232 { 2233 struct i915_power_domains *power_domains = &i915->display.power.domains; 2234 2235 if (DISPLAY_VER(i915) >= 11) { 2236 bxt_disable_dc9(i915); 2237 icl_display_core_init(i915, true); 2238 if (intel_dmc_has_payload(i915)) { 2239 if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6) 2240 skl_enable_dc6(i915); 2241 else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5) 2242 gen9_enable_dc5(i915); 2243 } 2244 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 2245 bxt_disable_dc9(i915); 2246 bxt_display_core_init(i915, true); 2247 if (intel_dmc_has_payload(i915) && 2248 (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 2249 gen9_enable_dc5(i915); 2250 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2251 hsw_disable_pc8(i915); 2252 } 2253 } 2254 2255 void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m) 2256 { 2257 struct i915_power_domains *power_domains = &i915->display.power.domains; 2258 int i; 2259 2260 mutex_lock(&power_domains->lock); 2261 2262 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2263 for (i = 0; i < power_domains->power_well_count; i++) { 2264 struct i915_power_well *power_well; 2265 enum intel_display_power_domain power_domain; 2266 2267 power_well = &power_domains->power_wells[i]; 2268 seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well), 2269 intel_power_well_refcount(power_well)); 2270 2271 for_each_power_domain(power_domain, intel_power_well_domains(power_well)) 2272 seq_printf(m, " %-23s %d\n", 2273 intel_display_power_domain_str(power_domain), 2274 power_domains->domain_use_count[power_domain]); 2275 } 2276 2277 mutex_unlock(&power_domains->lock); 2278 } 2279 2280 struct intel_ddi_port_domains { 2281 enum port port_start; 2282 enum port port_end; 2283 enum aux_ch aux_ch_start; 2284 enum aux_ch aux_ch_end; 2285 2286 enum intel_display_power_domain ddi_lanes; 2287 enum intel_display_power_domain ddi_io; 2288 enum intel_display_power_domain aux_io; 2289 enum intel_display_power_domain aux_legacy_usbc; 2290 enum intel_display_power_domain aux_tbt; 2291 }; 2292 2293 static const struct intel_ddi_port_domains 2294 i9xx_port_domains[] = { 2295 { 2296 .port_start = PORT_A, 2297 .port_end = PORT_F, 2298 .aux_ch_start = AUX_CH_A, 2299 .aux_ch_end = AUX_CH_F, 2300 2301 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2302 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2303 .aux_io = POWER_DOMAIN_AUX_IO_A, 2304 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2305 .aux_tbt = POWER_DOMAIN_INVALID, 2306 }, 2307 }; 2308 2309 static const struct intel_ddi_port_domains 2310 d11_port_domains[] = { 2311 { 2312 .port_start = PORT_A, 2313 .port_end = PORT_B, 2314 .aux_ch_start = AUX_CH_A, 2315 .aux_ch_end = AUX_CH_B, 2316 2317 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2318 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2319 .aux_io = POWER_DOMAIN_AUX_IO_A, 2320 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2321 .aux_tbt = POWER_DOMAIN_INVALID, 2322 }, { 2323 .port_start = PORT_C, 2324 .port_end = PORT_F, 2325 .aux_ch_start = AUX_CH_C, 2326 .aux_ch_end = AUX_CH_F, 2327 2328 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C, 2329 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C, 2330 .aux_io = POWER_DOMAIN_AUX_IO_C, 2331 .aux_legacy_usbc = POWER_DOMAIN_AUX_C, 2332 .aux_tbt = POWER_DOMAIN_AUX_TBT1, 2333 }, 2334 }; 2335 2336 static const struct intel_ddi_port_domains 2337 d12_port_domains[] = { 2338 { 2339 .port_start = PORT_A, 2340 .port_end = PORT_C, 2341 .aux_ch_start = AUX_CH_A, 2342 .aux_ch_end = AUX_CH_C, 2343 2344 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2345 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2346 .aux_io = POWER_DOMAIN_AUX_IO_A, 2347 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2348 .aux_tbt = POWER_DOMAIN_INVALID, 2349 }, { 2350 .port_start = PORT_TC1, 2351 .port_end = PORT_TC6, 2352 .aux_ch_start = AUX_CH_USBC1, 2353 .aux_ch_end = AUX_CH_USBC6, 2354 2355 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, 2356 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, 2357 .aux_io = POWER_DOMAIN_INVALID, 2358 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, 2359 .aux_tbt = POWER_DOMAIN_AUX_TBT1, 2360 }, 2361 }; 2362 2363 static const struct intel_ddi_port_domains 2364 d13_port_domains[] = { 2365 { 2366 .port_start = PORT_A, 2367 .port_end = PORT_C, 2368 .aux_ch_start = AUX_CH_A, 2369 .aux_ch_end = AUX_CH_C, 2370 2371 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2372 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2373 .aux_io = POWER_DOMAIN_AUX_IO_A, 2374 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2375 .aux_tbt = POWER_DOMAIN_INVALID, 2376 }, { 2377 .port_start = PORT_TC1, 2378 .port_end = PORT_TC4, 2379 .aux_ch_start = AUX_CH_USBC1, 2380 .aux_ch_end = AUX_CH_USBC4, 2381 2382 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, 2383 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, 2384 .aux_io = POWER_DOMAIN_INVALID, 2385 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, 2386 .aux_tbt = POWER_DOMAIN_AUX_TBT1, 2387 }, { 2388 .port_start = PORT_D_XELPD, 2389 .port_end = PORT_E_XELPD, 2390 .aux_ch_start = AUX_CH_D_XELPD, 2391 .aux_ch_end = AUX_CH_E_XELPD, 2392 2393 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D, 2394 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D, 2395 .aux_io = POWER_DOMAIN_AUX_IO_D, 2396 .aux_legacy_usbc = POWER_DOMAIN_AUX_D, 2397 .aux_tbt = POWER_DOMAIN_INVALID, 2398 }, 2399 }; 2400 2401 static void 2402 intel_port_domains_for_platform(struct drm_i915_private *i915, 2403 const struct intel_ddi_port_domains **domains, 2404 int *domains_size) 2405 { 2406 if (DISPLAY_VER(i915) >= 13) { 2407 *domains = d13_port_domains; 2408 *domains_size = ARRAY_SIZE(d13_port_domains); 2409 } else if (DISPLAY_VER(i915) >= 12) { 2410 *domains = d12_port_domains; 2411 *domains_size = ARRAY_SIZE(d12_port_domains); 2412 } else if (DISPLAY_VER(i915) >= 11) { 2413 *domains = d11_port_domains; 2414 *domains_size = ARRAY_SIZE(d11_port_domains); 2415 } else { 2416 *domains = i9xx_port_domains; 2417 *domains_size = ARRAY_SIZE(i9xx_port_domains); 2418 } 2419 } 2420 2421 static const struct intel_ddi_port_domains * 2422 intel_port_domains_for_port(struct drm_i915_private *i915, enum port port) 2423 { 2424 const struct intel_ddi_port_domains *domains; 2425 int domains_size; 2426 int i; 2427 2428 intel_port_domains_for_platform(i915, &domains, &domains_size); 2429 for (i = 0; i < domains_size; i++) 2430 if (port >= domains[i].port_start && port <= domains[i].port_end) 2431 return &domains[i]; 2432 2433 return NULL; 2434 } 2435 2436 enum intel_display_power_domain 2437 intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port) 2438 { 2439 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); 2440 2441 if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID)) 2442 return POWER_DOMAIN_PORT_DDI_IO_A; 2443 2444 return domains->ddi_io + (int)(port - domains->port_start); 2445 } 2446 2447 enum intel_display_power_domain 2448 intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port) 2449 { 2450 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); 2451 2452 if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID)) 2453 return POWER_DOMAIN_PORT_DDI_LANES_A; 2454 2455 return domains->ddi_lanes + (int)(port - domains->port_start); 2456 } 2457 2458 static const struct intel_ddi_port_domains * 2459 intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch) 2460 { 2461 const struct intel_ddi_port_domains *domains; 2462 int domains_size; 2463 int i; 2464 2465 intel_port_domains_for_platform(i915, &domains, &domains_size); 2466 for (i = 0; i < domains_size; i++) 2467 if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end) 2468 return &domains[i]; 2469 2470 return NULL; 2471 } 2472 2473 enum intel_display_power_domain 2474 intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) 2475 { 2476 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); 2477 2478 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID)) 2479 return POWER_DOMAIN_AUX_IO_A; 2480 2481 return domains->aux_io + (int)(aux_ch - domains->aux_ch_start); 2482 } 2483 2484 enum intel_display_power_domain 2485 intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) 2486 { 2487 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); 2488 2489 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)) 2490 return POWER_DOMAIN_AUX_A; 2491 2492 return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start); 2493 } 2494 2495 enum intel_display_power_domain 2496 intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) 2497 { 2498 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); 2499 2500 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID)) 2501 return POWER_DOMAIN_AUX_TBT1; 2502 2503 return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start); 2504 } 2505